repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
Kiiv/CouchPotatoServer
libs/xmpp/__init__.py
212
1795
# $Id: __init__.py,v 1.9 2005/03/07 09:34:51 snakeru Exp $ """ All features of xmpppy library contained within separate modules. At present there are modules: simplexml - XML handling routines protocol - jabber-objects (I.e. JID and different stanzas and sub-stanzas) handling routines. debug - Jacob Lundquist's debugging module. Very handy if you like colored debug. auth - Non-SASL and SASL stuff. You will need it to auth as a client or transport. transports - low level connection handling. TCP and TLS currently. HTTP support planned. roster - simple roster for use in clients. dispatcher - decision-making logic. Handles all hooks. The first who takes control over fresh stanzas. features - different stuff that didn't worths separating into modules browser - DISCO server framework. Allows to build dynamic disco tree. filetransfer - Currently contains only IBB stuff. Can be used for bot-to-bot transfers. Most of the classes that is defined in all these modules is an ancestors of class PlugIn so they share a single set of methods allowing you to compile a featured XMPP client. For every instance of PlugIn class the 'owner' is the class in what the plug was plugged. While plugging in such instance usually sets some methods of owner to it's own ones for easy access. All session specific info stored either in instance of PlugIn or in owner's instance. This is considered unhandy and there are plans to port 'Session' class from xmppd.py project for storing all session-related info. Though if you are not accessing instances variables directly and use only methods for access all values you should not have any problems. """ import simplexml,protocol,debug,auth,transports,roster,dispatcher,features,browser,filetransfer,commands from client import * from protocol import *
gpl-3.0
fake-name/IntraArchiveDeduplicator
Tests/Test_BKTree_Issue_2.py
1
3225
# from cbktree import BkHammingTree, explicitSignCast import pyximport print("Have Cython") pyximport.install() import deduplicator.cyHamDb as hamDb int_bits = lambda b: hamDb.explicitSignCast(int(b, 2)) TEST_DATA = { # Format: id -> bitstring 1: int_bits('1011010010010110110111111000001000001000100011110001010110111011'), 2: int_bits('1011010010010110110111111000001000000001100011110001010110111011'), 3: int_bits('1101011110100100001011001101001110010011100010011101001000110101'), } SEARCH_DIST = 2 # 2 out of 64 bits import unittest import scanner.logSetup as logSetup from bitstring import Bits import pyximport print("Have Cython") pyximport.install() import deduplicator.cyHamDb as hamDb def hamming(a, b): tot = 0 x = (a ^ b) while x > 0: tot += x & 1 x >>= 1 return tot def b2i(binaryStringIn): if len(binaryStringIn) != 64: print("ERROR: Passed string not 64 characters. String length = %s" % len(binaryStringIn)) print("ERROR: String value '%s'" % binaryStringIn) raise ValueError("Input strings must be 64 chars long!") val = Bits(bin=binaryStringIn) return val.int class TestSequenceFunctions(unittest.TestCase): def __init__(self, *args, **kwargs): logSetup.initLogging() super().__init__(*args, **kwargs) def setUp(self): self.buildTestTree() def buildTestTree(self): self.tree = hamDb.BkHammingTree() for nodeId, nodeHash in TEST_DATA.items(): self.tree.insert(nodeHash, nodeId) def test_2(self): # Find near matches for each node that was inserted. for node_id, ib in TEST_DATA.items(): res = self.tree.getWithinDistance(ib, SEARCH_DIST) print("{}: {}".format(node_id, res)) # Find near matches for items that were not inserted. new = '1101011110100100001011001101001110010011100010011101001000110101' self.assertEqual(self.tree.getWithinDistance(int_bits(new), SEARCH_DIST), {3}) print("new: {}".format(self.tree.getWithinDistance(int_bits(new), SEARCH_DIST))) ones = '1' * 64 print("111..: {}".format(self.tree.getWithinDistance(int_bits(ones), SEARCH_DIST))) # XXX Should return empty, returns [0] instead. zeroes = '0' * 64 print("000..: {}".format(self.tree.getWithinDistance(int_bits(zeroes), SEARCH_DIST))) self.assertEqual(self.tree.getWithinDistance(int_bits(ones), SEARCH_DIST), set()) self.assertEqual(self.tree.getWithinDistance(int_bits(zeroes), SEARCH_DIST), set()) # def test_1(self): # tgtHash = -6076574518398440533 # ret = self.tree.getWithinDistance(tgtHash, 2) # self.assertEqual(ret, set([item[0] for item in TEST_DATA])) # def test_signModification_1(self): # x = hamDb.explicitUnsignCast(5) # x = hamDb.explicitSignCast(x) # self.assertEqual(x, 5) # tgtHash = -6076574518398440533 # for hashVal in [data[1] for data in TEST_DATA]: # x = hamDb.explicitUnsignCast(hashVal) # x = hamDb.explicitSignCast(x) # self.assertEqual(hashVal, x) # # pr = hamDb.explicitUnsignCast(tgtHash) ^ hamDb.explicitUnsignCast(hashVal) # # print("{0:b}".format(hamDb.explicitUnsignCast(tgtHash)).zfill(64)) # # print("{0:b}".format(hamDb.explicitUnsignCast(hashVal)).zfill(64)) # # print("{0:b}".format(pr).zfill(64).replace("0", " ")) # # print()
bsd-3-clause
JCBarahona/edX
common/test/acceptance/tests/studio/test_studio_with_ora_component.py
87
4074
""" Acceptance tests for Studio related to edit/save peer grading interface. """ from ...fixtures.course import XBlockFixtureDesc from ...pages.studio.import_export import ExportCoursePage from ...pages.studio.component_editor import ComponentEditorView from ...pages.studio.overview import CourseOutlinePage from base_studio_test import StudioCourseTest from ..helpers import load_data_str class ORAComponentTest(StudioCourseTest): """ Tests tht edit/save is working correctly when link_to_location is given in peer grading interface settings. """ def setUp(self): super(ORAComponentTest, self).setUp() self.course_outline_page = CourseOutlinePage( self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'] ) self.export_page = ExportCoursePage( self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'] ) def populate_course_fixture(self, course_fixture): """ Return a test course fixture containing a discussion component. """ course_fixture.add_children( XBlockFixtureDesc('chapter', 'Test Section').add_children( XBlockFixtureDesc('sequential', 'Test Subsection').add_children( XBlockFixtureDesc('vertical', 'Test Unit').add_children( XBlockFixtureDesc( 'combinedopenended', "Peer Problem", data=load_data_str('ora_peer_problem.xml'), metadata={ 'graded': True, }, ), XBlockFixtureDesc('peergrading', 'Peer Module'), ) ) ) ) def _go_to_unit_page(self, section_name='Test Section', subsection_name='Test Subsection', unit_name='Test Unit'): self.course_outline_page.visit() subsection = self.course_outline_page.section(section_name).subsection(subsection_name) return subsection.expand_subsection().unit(unit_name).go_to() def test_edit_save_and_export(self): """ Ensure that edit/save is working correctly with link_to_location in peer interface settings. """ self.course_outline_page.visit() unit = self._go_to_unit_page() peer_problem_location = unit.xblocks[1].locator # Problem location should contain "combinedopeneneded". self.assertIn("combinedopenended", peer_problem_location) component = unit.xblocks[2] # Interface component name should be "Peer Module". self.assertEqual(component.name, "Peer Module") component.edit() component_editor = ComponentEditorView(self.browser, component.locator) component_editor.set_field_value_and_save('Link to Problem Location', peer_problem_location) # Verify that we can edit component again after saving and link_to_location is present. component.edit() location_input_element = component_editor.get_setting_element("Link to Problem Location") self.assertEqual( location_input_element.get_attribute('value'), peer_problem_location ) def test_verify_ora1_deprecation_message(self): """ Scenario: Verifies the ora1 deprecation message on ora components. Given I have a course with ora 1 components When I go to the unit page Then I see a deprecation error message in ora 1 components. """ self.course_outline_page.visit() unit = self._go_to_unit_page() for xblock in unit.xblocks: self.assertTrue(xblock.has_validation_error) self.assertEqual( xblock.validation_error_text, "ORA1 is no longer supported. To use this assessment, " "replace this ORA1 component with an ORA2 component." )
agpl-3.0
cloud-fan/spark
examples/src/main/python/ml/one_vs_rest_example.py
27
2197
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ An example of Multiclass to Binary Reduction with One Vs Rest, using Logistic Regression as the base classifier. Run with: bin/spark-submit examples/src/main/python/ml/one_vs_rest_example.py """ # $example on$ from pyspark.ml.classification import LogisticRegression, OneVsRest from pyspark.ml.evaluation import MulticlassClassificationEvaluator # $example off$ from pyspark.sql import SparkSession if __name__ == "__main__": spark = SparkSession \ .builder \ .appName("OneVsRestExample") \ .getOrCreate() # $example on$ # load data file. inputData = spark.read.format("libsvm") \ .load("data/mllib/sample_multiclass_classification_data.txt") # generate the train/test split. (train, test) = inputData.randomSplit([0.8, 0.2]) # instantiate the base classifier. lr = LogisticRegression(maxIter=10, tol=1E-6, fitIntercept=True) # instantiate the One Vs Rest Classifier. ovr = OneVsRest(classifier=lr) # train the multiclass model. ovrModel = ovr.fit(train) # score the model on test data. predictions = ovrModel.transform(test) # obtain evaluator. evaluator = MulticlassClassificationEvaluator(metricName="accuracy") # compute the classification error on test data. accuracy = evaluator.evaluate(predictions) print("Test Error = %g" % (1.0 - accuracy)) # $example off$ spark.stop()
apache-2.0
fxia22/ASM_xf
PythonD/lib/python2.4/test/test_urllib.py
5
22204
"""Regresssion tests for urllib""" import urllib import httplib import unittest from test import test_support import os import mimetools import tempfile import StringIO def hexescape(char): """Escape char as RFC 2396 specifies""" hex_repr = hex(ord(char))[2:].upper() if len(hex_repr) == 1: hex_repr = "0%s" % hex_repr return "%" + hex_repr class urlopen_FileTests(unittest.TestCase): """Test urlopen() opening a temporary file. Try to test as much functionality as possible so as to cut down on reliance on connecting to the Net for testing. """ def setUp(self): """Setup of a temp file to use for testing""" self.text = "test_urllib: %s\n" % self.__class__.__name__ FILE = file(test_support.TESTFN, 'wb') try: FILE.write(self.text) finally: FILE.close() self.pathname = test_support.TESTFN self.returned_obj = urllib.urlopen("file:%s" % self.pathname) def tearDown(self): """Shut down the open object""" self.returned_obj.close() os.remove(test_support.TESTFN) def test_interface(self): # Make sure object returned by urlopen() has the specified methods for attr in ("read", "readline", "readlines", "fileno", "close", "info", "geturl", "__iter__"): self.assert_(hasattr(self.returned_obj, attr), "object returned by urlopen() lacks %s attribute" % attr) def test_read(self): self.assertEqual(self.text, self.returned_obj.read()) def test_readline(self): self.assertEqual(self.text, self.returned_obj.readline()) self.assertEqual('', self.returned_obj.readline(), "calling readline() after exhausting the file did not" " return an empty string") def test_readlines(self): lines_list = self.returned_obj.readlines() self.assertEqual(len(lines_list), 1, "readlines() returned the wrong number of lines") self.assertEqual(lines_list[0], self.text, "readlines() returned improper text") def test_fileno(self): file_num = self.returned_obj.fileno() self.assert_(isinstance(file_num, int), "fileno() did not return an int") self.assertEqual(os.read(file_num, len(self.text)), self.text, "Reading on the file descriptor returned by fileno() " "did not return the expected text") def test_close(self): # Test close() by calling it hear and then having it be called again # by the tearDown() method for the test self.returned_obj.close() def test_info(self): self.assert_(isinstance(self.returned_obj.info(), mimetools.Message)) def test_geturl(self): self.assertEqual(self.returned_obj.geturl(), self.pathname) def test_iter(self): # Test iterator # Don't need to count number of iterations since test would fail the # instant it returned anything beyond the first line from the # comparison for line in self.returned_obj.__iter__(): self.assertEqual(line, self.text) class urlopen_HttpTests(unittest.TestCase): """Test urlopen() opening a fake http connection.""" def fakehttp(self, fakedata): class FakeSocket(StringIO.StringIO): def sendall(self, str): pass def makefile(self, mode, name): return self def read(self, amt=None): if self.closed: return '' return StringIO.StringIO.read(self, amt) def readline(self, length=None): if self.closed: return '' return StringIO.StringIO.readline(self, length) class FakeHTTPConnection(httplib.HTTPConnection): def connect(self): self.sock = FakeSocket(fakedata) assert httplib.HTTP._connection_class == httplib.HTTPConnection httplib.HTTP._connection_class = FakeHTTPConnection def unfakehttp(self): httplib.HTTP._connection_class = httplib.HTTPConnection def test_read(self): self.fakehttp('Hello!') try: fp = urllib.urlopen("http://python.org/") self.assertEqual(fp.readline(), 'Hello!') self.assertEqual(fp.readline(), '') finally: self.unfakehttp() class urlretrieve_FileTests(unittest.TestCase): """Test urllib.urlretrieve() on local files""" def setUp(self): # Create a list of temporary files. Each item in the list is a file # name (absolute path or relative to the current working directory). # All files in this list will be deleted in the tearDown method. Note, # this only helps to makes sure temporary files get deleted, but it # does nothing about trying to close files that may still be open. It # is the responsibility of the developer to properly close files even # when exceptional conditions occur. self.tempFiles = [] # Create a temporary file. self.registerFileForCleanUp(test_support.TESTFN) self.text = 'testing urllib.urlretrieve' try: FILE = file(test_support.TESTFN, 'wb') FILE.write(self.text) FILE.close() finally: try: FILE.close() except: pass def tearDown(self): # Delete the temporary files. for each in self.tempFiles: try: os.remove(each) except: pass def constructLocalFileUrl(self, filePath): return "file://%s" % urllib.pathname2url(os.path.abspath(filePath)) def createNewTempFile(self, data=""): """Creates a new temporary file containing the specified data, registers the file for deletion during the test fixture tear down, and returns the absolute path of the file.""" newFd, newFilePath = tempfile.mkstemp() try: self.registerFileForCleanUp(newFilePath) newFile = os.fdopen(newFd, "wb") newFile.write(data) newFile.close() finally: try: newFile.close() except: pass return newFilePath def registerFileForCleanUp(self, fileName): self.tempFiles.append(fileName) def test_basic(self): # Make sure that a local file just gets its own location returned and # a headers value is returned. result = urllib.urlretrieve("file:%s" % test_support.TESTFN) self.assertEqual(result[0], test_support.TESTFN) self.assert_(isinstance(result[1], mimetools.Message), "did not get a mimetools.Message instance as second " "returned value") def test_copy(self): # Test that setting the filename argument works. second_temp = "%s.2" % test_support.TESTFN self.registerFileForCleanUp(second_temp) result = urllib.urlretrieve(self.constructLocalFileUrl( test_support.TESTFN), second_temp) self.assertEqual(second_temp, result[0]) self.assert_(os.path.exists(second_temp), "copy of the file was not " "made") FILE = file(second_temp, 'rb') try: text = FILE.read() FILE.close() finally: try: FILE.close() except: pass self.assertEqual(self.text, text) def test_reporthook(self): # Make sure that the reporthook works. def hooktester(count, block_size, total_size, count_holder=[0]): self.assert_(isinstance(count, int)) self.assert_(isinstance(block_size, int)) self.assert_(isinstance(total_size, int)) self.assertEqual(count, count_holder[0]) count_holder[0] = count_holder[0] + 1 second_temp = "%s.2" % test_support.TESTFN self.registerFileForCleanUp(second_temp) urllib.urlretrieve(self.constructLocalFileUrl(test_support.TESTFN), second_temp, hooktester) def test_reporthook_0_bytes(self): # Test on zero length file. Should call reporthook only 1 time. report = [] def hooktester(count, block_size, total_size, _report=report): _report.append((count, block_size, total_size)) srcFileName = self.createNewTempFile() urllib.urlretrieve(self.constructLocalFileUrl(srcFileName), test_support.TESTFN, hooktester) self.assertEqual(len(report), 1) self.assertEqual(report[0][2], 0) def test_reporthook_5_bytes(self): # Test on 5 byte file. Should call reporthook only 2 times (once when # the "network connection" is established and once when the block is # read). Since the block size is 8192 bytes, only one block read is # required to read the entire file. report = [] def hooktester(count, block_size, total_size, _report=report): _report.append((count, block_size, total_size)) srcFileName = self.createNewTempFile("x" * 5) urllib.urlretrieve(self.constructLocalFileUrl(srcFileName), test_support.TESTFN, hooktester) self.assertEqual(len(report), 2) self.assertEqual(report[0][1], 8192) self.assertEqual(report[0][2], 5) def test_reporthook_8193_bytes(self): # Test on 8193 byte file. Should call reporthook only 3 times (once # when the "network connection" is established, once for the next 8192 # bytes, and once for the last byte). report = [] def hooktester(count, block_size, total_size, _report=report): _report.append((count, block_size, total_size)) srcFileName = self.createNewTempFile("x" * 8193) urllib.urlretrieve(self.constructLocalFileUrl(srcFileName), test_support.TESTFN, hooktester) self.assertEqual(len(report), 3) self.assertEqual(report[0][1], 8192) self.assertEqual(report[0][2], 8193) class QuotingTests(unittest.TestCase): """Tests for urllib.quote() and urllib.quote_plus() According to RFC 2396 ("Uniform Resource Identifiers), to escape a character you write it as '%' + <2 character US-ASCII hex value>. The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a character properly. Case does not matter on the hex letters. The various character sets specified are: Reserved characters : ";/?:@&=+$," Have special meaning in URIs and must be escaped if not being used for their special meaning Data characters : letters, digits, and "-_.!~*'()" Unreserved and do not need to be escaped; can be, though, if desired Control characters : 0x00 - 0x1F, 0x7F Have no use in URIs so must be escaped space : 0x20 Must be escaped Delimiters : '<>#%"' Must be escaped Unwise : "{}|\^[]`" Must be escaped """ def test_never_quote(self): # Make sure quote() does not quote letters, digits, and "_,.-" do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz", "0123456789", "_.-"]) result = urllib.quote(do_not_quote) self.assertEqual(do_not_quote, result, "using quote(): %s != %s" % (do_not_quote, result)) result = urllib.quote_plus(do_not_quote) self.assertEqual(do_not_quote, result, "using quote_plus(): %s != %s" % (do_not_quote, result)) def test_default_safe(self): # Test '/' is default value for 'safe' parameter self.assertEqual(urllib.quote.func_defaults[0], '/') def test_safe(self): # Test setting 'safe' parameter does what it should do quote_by_default = "<>" result = urllib.quote(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, "using quote(): %s != %s" % (quote_by_default, result)) result = urllib.quote_plus(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, "using quote_plus(): %s != %s" % (quote_by_default, result)) def test_default_quoting(self): # Make sure all characters that should be quoted are by default sans # space (separate test for that). should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F should_quote.append('<>#%"{}|\^[]`') should_quote.append(chr(127)) # For 0x7F should_quote = ''.join(should_quote) for char in should_quote: result = urllib.quote(char) self.assertEqual(hexescape(char), result, "using quote(): %s should be escaped to %s, not %s" % (char, hexescape(char), result)) result = urllib.quote_plus(char) self.assertEqual(hexescape(char), result, "using quote_plus(): " "%s should be escapes to %s, not %s" % (char, hexescape(char), result)) del should_quote partial_quote = "ab[]cd" expected = "ab%5B%5Dcd" result = urllib.quote(partial_quote) self.assertEqual(expected, result, "using quote(): %s != %s" % (expected, result)) self.assertEqual(expected, result, "using quote_plus(): %s != %s" % (expected, result)) def test_quoting_space(self): # Make sure quote() and quote_plus() handle spaces as specified in # their unique way result = urllib.quote(' ') self.assertEqual(result, hexescape(' '), "using quote(): %s != %s" % (result, hexescape(' '))) result = urllib.quote_plus(' ') self.assertEqual(result, '+', "using quote_plus(): %s != +" % result) given = "a b cd e f" expect = given.replace(' ', hexescape(' ')) result = urllib.quote(given) self.assertEqual(expect, result, "using quote(): %s != %s" % (expect, result)) expect = given.replace(' ', '+') result = urllib.quote_plus(given) self.assertEqual(expect, result, "using quote_plus(): %s != %s" % (expect, result)) class UnquotingTests(unittest.TestCase): """Tests for unquote() and unquote_plus() See the doc string for quoting_Tests for details on quoting and such. """ def test_unquoting(self): # Make sure unquoting of all ASCII values works escape_list = [] for num in range(128): given = hexescape(chr(num)) expect = chr(num) result = urllib.unquote(given) self.assertEqual(expect, result, "using unquote(): %s != %s" % (expect, result)) result = urllib.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %s != %s" % (expect, result)) escape_list.append(given) escape_string = ''.join(escape_list) del escape_list result = urllib.unquote(escape_string) self.assertEqual(result.count('%'), 1, "using quote(): not all characters escaped; %s" % result) result = urllib.unquote(escape_string) self.assertEqual(result.count('%'), 1, "using unquote(): not all characters escaped: " "%s" % result) def test_unquoting_parts(self): # Make sure unquoting works when have non-quoted characters # interspersed given = 'ab%sd' % hexescape('c') expect = "abcd" result = urllib.unquote(given) self.assertEqual(expect, result, "using quote(): %s != %s" % (expect, result)) result = urllib.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %s != %s" % (expect, result)) def test_unquoting_plus(self): # Test difference between unquote() and unquote_plus() given = "are+there+spaces..." expect = given result = urllib.unquote(given) self.assertEqual(expect, result, "using unquote(): %s != %s" % (expect, result)) expect = given.replace('+', ' ') result = urllib.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %s != %s" % (expect, result)) class urlencode_Tests(unittest.TestCase): """Tests for urlencode()""" def help_inputtype(self, given, test_type): """Helper method for testing different input types. 'given' must lead to only the pairs: * 1st, 1 * 2nd, 2 * 3rd, 3 Test cannot assume anything about order. Docs make no guarantee and have possible dictionary input. """ expect_somewhere = ["1st=1", "2nd=2", "3rd=3"] result = urllib.urlencode(given) for expected in expect_somewhere: self.assert_(expected in result, "testing %s: %s not found in %s" % (test_type, expected, result)) self.assertEqual(result.count('&'), 2, "testing %s: expected 2 '&'s; got %s" % (test_type, result.count('&'))) amp_location = result.index('&') on_amp_left = result[amp_location - 1] on_amp_right = result[amp_location + 1] self.assert_(on_amp_left.isdigit() and on_amp_right.isdigit(), "testing %s: '&' not located in proper place in %s" % (test_type, result)) self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps "testing %s: " "unexpected number of characters: %s != %s" % (test_type, len(result), (5 * 3) + 2)) def test_using_mapping(self): # Test passing in a mapping object as an argument. self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'}, "using dict as input type") def test_using_sequence(self): # Test passing in a sequence of two-item sequences as an argument. self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')], "using sequence of two-item tuples as input") def test_quoting(self): # Make sure keys and values are quoted using quote_plus() given = {"&":"="} expect = "%s=%s" % (hexescape('&'), hexescape('=')) result = urllib.urlencode(given) self.assertEqual(expect, result) given = {"key name":"A bunch of pluses"} expect = "key+name=A+bunch+of+pluses" result = urllib.urlencode(given) self.assertEqual(expect, result) def test_doseq(self): # Test that passing True for 'doseq' parameter works correctly given = {'sequence':['1', '2', '3']} expect = "sequence=%s" % urllib.quote_plus(str(['1', '2', '3'])) result = urllib.urlencode(given) self.assertEqual(expect, result) result = urllib.urlencode(given, True) for value in given["sequence"]: expect = "sequence=%s" % value self.assert_(expect in result, "%s not found in %s" % (expect, result)) self.assertEqual(result.count('&'), 2, "Expected 2 '&'s, got %s" % result.count('&')) class Pathname_Tests(unittest.TestCase): """Test pathname2url() and url2pathname()""" def test_basic(self): # Make sure simple tests pass expected_path = os.path.join("parts", "of", "a", "path") expected_url = "parts/of/a/path" result = urllib.pathname2url(expected_path) self.assertEqual(expected_url, result, "pathname2url() failed; %s != %s" % (result, expected_url)) result = urllib.url2pathname(expected_url) self.assertEqual(expected_path, result, "url2pathame() failed; %s != %s" % (result, expected_path)) def test_quoting(self): # Test automatic quoting and unquoting works for pathnam2url() and # url2pathname() respectively given = os.path.join("needs", "quot=ing", "here") expect = "needs/%s/here" % urllib.quote("quot=ing") result = urllib.pathname2url(given) self.assertEqual(expect, result, "pathname2url() failed; %s != %s" % (expect, result)) expect = given result = urllib.url2pathname(result) self.assertEqual(expect, result, "url2pathname() failed; %s != %s" % (expect, result)) given = os.path.join("make sure", "using_quote") expect = "%s/using_quote" % urllib.quote("make sure") result = urllib.pathname2url(given) self.assertEqual(expect, result, "pathname2url() failed; %s != %s" % (expect, result)) given = "make+sure/using_unquote" expect = os.path.join("make+sure", "using_unquote") result = urllib.url2pathname(given) self.assertEqual(expect, result, "url2pathname() failed; %s != %s" % (expect, result)) def test_main(): test_support.run_unittest( urlopen_FileTests, urlopen_HttpTests, urlretrieve_FileTests, QuotingTests, UnquotingTests, urlencode_Tests, Pathname_Tests ) if __name__ == '__main__': test_main()
gpl-2.0
TheMOOCAgency/edx-platform
common/djangoapps/third_party_auth/tests/test_pipeline.py
77
1807
"""Unit tests for third_party_auth/pipeline.py.""" import random from third_party_auth import pipeline from third_party_auth.tests import testutil import unittest # Allow tests access to protected methods (or module-protected methods) under test. # pylint: disable=protected-access class MakeRandomPasswordTest(testutil.TestCase): """Tests formation of random placeholder passwords.""" def setUp(self): super(MakeRandomPasswordTest, self).setUp() self.seed = 1 def test_default_args(self): self.assertEqual(pipeline._DEFAULT_RANDOM_PASSWORD_LENGTH, len(pipeline.make_random_password())) def test_probably_only_uses_charset(self): # This is ultimately probablistic since we could randomly select a good character 100000 consecutive times. for char in pipeline.make_random_password(length=100000): self.assertIn(char, pipeline._PASSWORD_CHARSET) def test_pseudorandomly_picks_chars_from_charset(self): random_instance = random.Random(self.seed) expected = ''.join( random_instance.choice(pipeline._PASSWORD_CHARSET) for _ in xrange(pipeline._DEFAULT_RANDOM_PASSWORD_LENGTH)) random_instance.seed(self.seed) self.assertEqual(expected, pipeline.make_random_password(choice_fn=random_instance.choice)) @unittest.skipUnless(testutil.AUTH_FEATURE_ENABLED, 'third_party_auth not enabled') class ProviderUserStateTestCase(testutil.TestCase): """Tests ProviderUserState behavior.""" def test_get_unlink_form_name(self): google_provider = self.configure_google_provider(enabled=True) state = pipeline.ProviderUserState(google_provider, object(), None) self.assertEqual(google_provider.provider_id + '_unlink_form', state.get_unlink_form_name())
agpl-3.0
achang97/YouTunes
lib/python2.7/site-packages/googleapiclient/channel.py
32
10067
"""Channel notifications support. Classes and functions to support channel subscriptions and notifications on those channels. Notes: - This code is based on experimental APIs and is subject to change. - Notification does not do deduplication of notification ids, that's up to the receiver. - Storing the Channel between calls is up to the caller. Example setting up a channel: # Create a new channel that gets notifications via webhook. channel = new_webhook_channel("https://example.com/my_web_hook") # Store the channel, keyed by 'channel.id'. Store it before calling the # watch method because notifications may start arriving before the watch # method returns. ... resp = service.objects().watchAll( bucket="some_bucket_id", body=channel.body()).execute() channel.update(resp) # Store the channel, keyed by 'channel.id'. Store it after being updated # since the resource_id value will now be correct, and that's needed to # stop a subscription. ... An example Webhook implementation using webapp2. Note that webapp2 puts headers in a case insensitive dictionary, as headers aren't guaranteed to always be upper case. id = self.request.headers[X_GOOG_CHANNEL_ID] # Retrieve the channel by id. channel = ... # Parse notification from the headers, including validating the id. n = notification_from_headers(channel, self.request.headers) # Do app specific stuff with the notification here. if n.resource_state == 'sync': # Code to handle sync state. elif n.resource_state == 'exists': # Code to handle the exists state. elif n.resource_state == 'not_exists': # Code to handle the not exists state. Example of unsubscribing. service.channels().stop(channel.body()) """ from __future__ import absolute_import import datetime import uuid from googleapiclient import errors import six # Oauth2client < 3 has the positional helper in 'util', >= 3 has it # in '_helpers'. try: from oauth2client import util except ImportError: from oauth2client import _helpers as util # The unix time epoch starts at midnight 1970. EPOCH = datetime.datetime.utcfromtimestamp(0) # Map the names of the parameters in the JSON channel description to # the parameter names we use in the Channel class. CHANNEL_PARAMS = { 'address': 'address', 'id': 'id', 'expiration': 'expiration', 'params': 'params', 'resourceId': 'resource_id', 'resourceUri': 'resource_uri', 'type': 'type', 'token': 'token', } X_GOOG_CHANNEL_ID = 'X-GOOG-CHANNEL-ID' X_GOOG_MESSAGE_NUMBER = 'X-GOOG-MESSAGE-NUMBER' X_GOOG_RESOURCE_STATE = 'X-GOOG-RESOURCE-STATE' X_GOOG_RESOURCE_URI = 'X-GOOG-RESOURCE-URI' X_GOOG_RESOURCE_ID = 'X-GOOG-RESOURCE-ID' def _upper_header_keys(headers): new_headers = {} for k, v in six.iteritems(headers): new_headers[k.upper()] = v return new_headers class Notification(object): """A Notification from a Channel. Notifications are not usually constructed directly, but are returned from functions like notification_from_headers(). Attributes: message_number: int, The unique id number of this notification. state: str, The state of the resource being monitored. uri: str, The address of the resource being monitored. resource_id: str, The unique identifier of the version of the resource at this event. """ @util.positional(5) def __init__(self, message_number, state, resource_uri, resource_id): """Notification constructor. Args: message_number: int, The unique id number of this notification. state: str, The state of the resource being monitored. Can be one of "exists", "not_exists", or "sync". resource_uri: str, The address of the resource being monitored. resource_id: str, The identifier of the watched resource. """ self.message_number = message_number self.state = state self.resource_uri = resource_uri self.resource_id = resource_id class Channel(object): """A Channel for notifications. Usually not constructed directly, instead it is returned from helper functions like new_webhook_channel(). Attributes: type: str, The type of delivery mechanism used by this channel. For example, 'web_hook'. id: str, A UUID for the channel. token: str, An arbitrary string associated with the channel that is delivered to the target address with each event delivered over this channel. address: str, The address of the receiving entity where events are delivered. Specific to the channel type. expiration: int, The time, in milliseconds from the epoch, when this channel will expire. params: dict, A dictionary of string to string, with additional parameters controlling delivery channel behavior. resource_id: str, An opaque id that identifies the resource that is being watched. Stable across different API versions. resource_uri: str, The canonicalized ID of the watched resource. """ @util.positional(5) def __init__(self, type, id, token, address, expiration=None, params=None, resource_id="", resource_uri=""): """Create a new Channel. In user code, this Channel constructor will not typically be called manually since there are functions for creating channels for each specific type with a more customized set of arguments to pass. Args: type: str, The type of delivery mechanism used by this channel. For example, 'web_hook'. id: str, A UUID for the channel. token: str, An arbitrary string associated with the channel that is delivered to the target address with each event delivered over this channel. address: str, The address of the receiving entity where events are delivered. Specific to the channel type. expiration: int, The time, in milliseconds from the epoch, when this channel will expire. params: dict, A dictionary of string to string, with additional parameters controlling delivery channel behavior. resource_id: str, An opaque id that identifies the resource that is being watched. Stable across different API versions. resource_uri: str, The canonicalized ID of the watched resource. """ self.type = type self.id = id self.token = token self.address = address self.expiration = expiration self.params = params self.resource_id = resource_id self.resource_uri = resource_uri def body(self): """Build a body from the Channel. Constructs a dictionary that's appropriate for passing into watch() methods as the value of body argument. Returns: A dictionary representation of the channel. """ result = { 'id': self.id, 'token': self.token, 'type': self.type, 'address': self.address } if self.params: result['params'] = self.params if self.resource_id: result['resourceId'] = self.resource_id if self.resource_uri: result['resourceUri'] = self.resource_uri if self.expiration: result['expiration'] = self.expiration return result def update(self, resp): """Update a channel with information from the response of watch(). When a request is sent to watch() a resource, the response returned from the watch() request is a dictionary with updated channel information, such as the resource_id, which is needed when stopping a subscription. Args: resp: dict, The response from a watch() method. """ for json_name, param_name in six.iteritems(CHANNEL_PARAMS): value = resp.get(json_name) if value is not None: setattr(self, param_name, value) def notification_from_headers(channel, headers): """Parse a notification from the webhook request headers, validate the notification, and return a Notification object. Args: channel: Channel, The channel that the notification is associated with. headers: dict, A dictionary like object that contains the request headers from the webhook HTTP request. Returns: A Notification object. Raises: errors.InvalidNotificationError if the notification is invalid. ValueError if the X-GOOG-MESSAGE-NUMBER can't be converted to an int. """ headers = _upper_header_keys(headers) channel_id = headers[X_GOOG_CHANNEL_ID] if channel.id != channel_id: raise errors.InvalidNotificationError( 'Channel id mismatch: %s != %s' % (channel.id, channel_id)) else: message_number = int(headers[X_GOOG_MESSAGE_NUMBER]) state = headers[X_GOOG_RESOURCE_STATE] resource_uri = headers[X_GOOG_RESOURCE_URI] resource_id = headers[X_GOOG_RESOURCE_ID] return Notification(message_number, state, resource_uri, resource_id) @util.positional(2) def new_webhook_channel(url, token=None, expiration=None, params=None): """Create a new webhook Channel. Args: url: str, URL to post notifications to. token: str, An arbitrary string associated with the channel that is delivered to the target address with each notification delivered over this channel. expiration: datetime.datetime, A time in the future when the channel should expire. Can also be None if the subscription should use the default expiration. Note that different services may have different limits on how long a subscription lasts. Check the response from the watch() method to see the value the service has set for an expiration time. params: dict, Extra parameters to pass on channel creation. Currently not used for webhook channels. """ expiration_ms = 0 if expiration: delta = expiration - EPOCH expiration_ms = delta.microseconds/1000 + ( delta.seconds + delta.days*24*3600)*1000 if expiration_ms < 0: expiration_ms = 0 return Channel('web_hook', str(uuid.uuid4()), token, url, expiration=expiration_ms, params=params)
mit
mbauskar/tele-erpnext
erpnext/accounts/doctype/journal_entry/journal_entry.py
3
24285
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.utils import cstr, flt, fmt_money, formatdate, getdate from frappe import msgprint, _, scrub from erpnext.setup.utils import get_company_currency from erpnext.controllers.accounts_controller import AccountsController from erpnext.accounts.utils import get_balance_on class JournalEntry(AccountsController): def __init__(self, arg1, arg2=None): super(JournalEntry, self).__init__(arg1, arg2) def get_feed(self): return self.voucher_type def validate(self): if not self.is_opening: self.is_opening='No' self.clearance_date = None super(JournalEntry, self).validate_date_with_fiscal_year() self.validate_party() self.validate_cheque_info() self.validate_entries_for_advance() self.validate_debit_and_credit() self.validate_against_jv() self.validate_against_sales_invoice() self.validate_against_purchase_invoice() self.set_against_account() self.create_remarks() self.set_print_format_fields() self.validate_against_sales_order() self.validate_against_purchase_order() self.check_credit_days() self.validate_expense_claim() self.validate_credit_debit_note() self.validate_empty_accounts_table() self.set_title() def on_submit(self): self.check_credit_limit() self.make_gl_entries() self.update_advance_paid() self.update_expense_claim() def set_title(self): self.title = self.pay_to_recd_from or self.accounts[0].account def update_advance_paid(self): advance_paid = frappe._dict() for d in self.get("accounts"): if d.is_advance: if d.against_sales_order: advance_paid.setdefault("Sales Order", []).append(d.against_sales_order) elif d.against_purchase_order: advance_paid.setdefault("Purchase Order", []).append(d.against_purchase_order) for voucher_type, order_list in advance_paid.items(): for voucher_no in list(set(order_list)): frappe.get_doc(voucher_type, voucher_no).set_total_advance_paid() def on_cancel(self): from erpnext.accounts.utils import remove_against_link_from_jv remove_against_link_from_jv(self.doctype, self.name, "against_jv") self.make_gl_entries(1) self.update_advance_paid() self.update_expense_claim() def validate_party(self): for d in self.get("accounts"): account_type = frappe.db.get_value("Account", d.account, "account_type") if account_type in ["Receivable", "Payable"]: if not (d.party_type and d.party): frappe.throw(_("Row {0}: Party Type and Party is required for Receivable / Payable account {1}").format(d.idx, d.account)) elif d.party_type and d.party: frappe.throw(_("Row {0}: Party Type and Party is only applicable against Receivable / Payable account").format(d.idx)) def check_credit_limit(self): customers = list(set([d.party for d in self.get("accounts") if d.party_type=="Customer" and d.party and flt(d.debit) > 0])) if customers: from erpnext.selling.doctype.customer.customer import check_credit_limit for customer in customers: check_credit_limit(customer, self.company) def check_credit_days(self): from erpnext.accounts.party import get_credit_days posting_date = None if self.cheque_date: for d in self.get("accounts"): if d.party_type and d.party and d.get("credit" if d.party_type=="Customer" else "debit") > 0: if d.against_invoice: posting_date = frappe.db.get_value("Sales Invoice", d.against_invoice, "posting_date") elif d.against_voucher: posting_date = frappe.db.get_value("Purchase Invoice", d.against_voucher, "posting_date") credit_days = get_credit_days(d.party_type, d.party, self.company) if posting_date and credit_days: date_diff = (getdate(self.cheque_date) - getdate(posting_date)).days if date_diff > flt(credit_days): msgprint(_("Note: Reference Date exceeds allowed credit days by {0} days for {1} {2}") .format(date_diff - flt(credit_days), d.party_type, d.party)) def validate_cheque_info(self): if self.voucher_type in ['Bank Entry']: if not self.cheque_no or not self.cheque_date: msgprint(_("Reference No & Reference Date is required for {0}").format(self.voucher_type), raise_exception=1) if self.cheque_date and not self.cheque_no: msgprint(_("Reference No is mandatory if you entered Reference Date"), raise_exception=1) def validate_entries_for_advance(self): for d in self.get('accounts'): if not (d.against_voucher and d.against_invoice and d.against_jv): if (d.party_type == 'Customer' and flt(d.credit) > 0) or \ (d.party_type == 'Supplier' and flt(d.debit) > 0): if not d.is_advance: msgprint(_("Row {0}: Please check 'Is Advance' against Account {1} if this is an advance entry.").format(d.idx, d.account)) elif (d.against_sales_order or d.against_purchase_order) and d.is_advance != "Yes": frappe.throw(_("Row {0}: Payment against Sales/Purchase Order should always be marked as advance").format(d.idx)) def validate_against_jv(self): for d in self.get('accounts'): if d.against_jv: account_root_type = frappe.db.get_value("Account", d.account, "root_type") if account_root_type == "Asset" and flt(d.debit) > 0: frappe.throw(_("For {0}, only credit accounts can be linked against another debit entry") .format(d.account)) elif account_root_type == "Liability" and flt(d.credit) > 0: frappe.throw(_("For {0}, only debit accounts can be linked against another credit entry") .format(d.account)) if d.against_jv == self.name: frappe.throw(_("You can not enter current voucher in 'Against Journal Entry' column")) against_entries = frappe.db.sql("""select * from `tabJournal Entry Account` where account = %s and docstatus = 1 and parent = %s and ifnull(against_jv, '') = '' and ifnull(against_invoice, '') = '' and ifnull(against_voucher, '') = ''""", (d.account, d.against_jv), as_dict=True) if not against_entries: frappe.throw(_("Journal Entry {0} does not have account {1} or already matched against other voucher") .format(d.against_jv, d.account)) else: dr_or_cr = "debit" if d.credit > 0 else "credit" valid = False for jvd in against_entries: if flt(jvd[dr_or_cr]) > 0: valid = True if not valid: frappe.throw(_("Against Journal Entry {0} does not have any unmatched {1} entry") .format(d.against_jv, dr_or_cr)) def validate_against_sales_invoice(self): payment_against_voucher = self.validate_account_in_against_voucher("against_invoice", "Sales Invoice") self.validate_against_invoice_fields("Sales Invoice", payment_against_voucher) def validate_against_purchase_invoice(self): payment_against_voucher = self.validate_account_in_against_voucher("against_voucher", "Purchase Invoice") self.validate_against_invoice_fields("Purchase Invoice", payment_against_voucher) def validate_against_sales_order(self): payment_against_voucher = self.validate_account_in_against_voucher("against_sales_order", "Sales Order") self.validate_against_order_fields("Sales Order", payment_against_voucher) def validate_against_purchase_order(self): payment_against_voucher = self.validate_account_in_against_voucher("against_purchase_order", "Purchase Order") self.validate_against_order_fields("Purchase Order", payment_against_voucher) def validate_account_in_against_voucher(self, against_field, doctype): payment_against_voucher = frappe._dict() field_dict = {'Sales Invoice': ["Customer", "Debit To"], 'Purchase Invoice': ["Supplier", "Credit To"], 'Sales Order': ["Customer"], 'Purchase Order': ["Supplier"] } for d in self.get("accounts"): if d.get(against_field): dr_or_cr = "credit" if against_field in ["against_invoice", "against_sales_order"] \ else "debit" if against_field in ["against_invoice", "against_sales_order"] and flt(d.debit) > 0: frappe.throw(_("Row {0}: Debit entry can not be linked with a {1}").format(d.idx, doctype)) if against_field in ["against_voucher", "against_purchase_order"] and flt(d.credit) > 0: frappe.throw(_("Row {0}: Credit entry can not be linked with a {1}").format(d.idx, doctype)) against_voucher = frappe.db.get_value(doctype, d.get(against_field), [scrub(dt) for dt in field_dict.get(doctype)]) if against_field in ["against_invoice", "against_voucher"]: if (against_voucher[0] !=d.party or against_voucher[1] != d.account): frappe.throw(_("Row {0}: Party / Account does not match with \ Customer / Debit To in {1}").format(d.idx, doctype)) else: payment_against_voucher.setdefault(d.get(against_field), []).append(flt(d.get(dr_or_cr))) if against_field in ["against_sales_order", "against_purchase_order"]: if against_voucher != d.party: frappe.throw(_("Row {0}: {1} {2} does not match with {3}") \ .format(d.idx, d.party_type, d.party, doctype)) elif d.is_advance == "Yes": payment_against_voucher.setdefault(d.get(against_field), []).append(flt(d.get(dr_or_cr))) return payment_against_voucher def validate_against_invoice_fields(self, doctype, payment_against_voucher): for voucher_no, payment_list in payment_against_voucher.items(): voucher_properties = frappe.db.get_value(doctype, voucher_no, ["docstatus", "outstanding_amount"]) if voucher_properties[0] != 1: frappe.throw(_("{0} {1} is not submitted").format(doctype, voucher_no)) if flt(voucher_properties[1]) < flt(sum(payment_list)): frappe.throw(_("Payment against {0} {1} cannot be greater \ than Outstanding Amount {2}").format(doctype, voucher_no, voucher_properties[1])) def validate_against_order_fields(self, doctype, payment_against_voucher): for voucher_no, payment_list in payment_against_voucher.items(): voucher_properties = frappe.db.get_value(doctype, voucher_no, ["docstatus", "per_billed", "status", "advance_paid", "base_grand_total"]) if voucher_properties[0] != 1: frappe.throw(_("{0} {1} is not submitted").format(doctype, voucher_no)) if flt(voucher_properties[1]) >= 100: frappe.throw(_("{0} {1} is fully billed").format(doctype, voucher_no)) if cstr(voucher_properties[2]) == "Stopped": frappe.throw(_("{0} {1} is stopped").format(doctype, voucher_no)) if flt(voucher_properties[4]) < flt(voucher_properties[3]) + flt(sum(payment_list)): frappe.throw(_("Advance paid against {0} {1} cannot be greater \ than Grand Total {2}").format(doctype, voucher_no, voucher_properties[3])) def set_against_account(self): accounts_debited, accounts_credited = [], [] for d in self.get("accounts"): if flt(d.debit > 0): accounts_debited.append(d.account) if flt(d.credit) > 0: accounts_credited.append(d.account) for d in self.get("accounts"): if flt(d.debit > 0): d.against_account = ", ".join(list(set(accounts_credited))) if flt(d.credit > 0): d.against_account = ", ".join(list(set(accounts_debited))) def validate_debit_and_credit(self): self.total_debit, self.total_credit, self.difference = 0, 0, 0 for d in self.get("accounts"): if d.debit and d.credit: frappe.throw(_("You cannot credit and debit same account at the same time")) self.total_debit = flt(self.total_debit) + flt(d.debit, self.precision("debit", "accounts")) self.total_credit = flt(self.total_credit) + flt(d.credit, self.precision("credit", "accounts")) self.difference = flt(self.total_debit, self.precision("total_debit")) - \ flt(self.total_credit, self.precision("total_credit")) if self.difference: frappe.throw(_("Total Debit must be equal to Total Credit. The difference is {0}") .format(self.difference)) def create_remarks(self): r = [] if self.cheque_no: if self.cheque_date: r.append(_('Reference #{0} dated {1}').format(self.cheque_no, formatdate(self.cheque_date))) else: msgprint(_("Please enter Reference date"), raise_exception=frappe.MandatoryError) company_currency = get_company_currency(self.company) for d in self.get('accounts'): if d.against_invoice and d.credit: r.append(_("{0} against Sales Invoice {1}").format(fmt_money(flt(d.credit), currency = company_currency), \ d.against_invoice)) if d.against_sales_order and d.credit: r.append(_("{0} against Sales Order {1}").format(fmt_money(flt(d.credit), currency = company_currency), \ d.against_sales_order)) if d.against_voucher and d.debit: bill_no = frappe.db.sql("""select bill_no, bill_date from `tabPurchase Invoice` where name=%s""", d.against_voucher) if bill_no and bill_no[0][0] and bill_no[0][0].lower().strip() \ not in ['na', 'not applicable', 'none']: r.append(_('{0} against Bill {1} dated {2}').format(fmt_money(flt(d.debit), currency=company_currency), bill_no[0][0], bill_no[0][1] and formatdate(bill_no[0][1].strftime('%Y-%m-%d')))) if d.against_purchase_order and d.debit: r.append(_("{0} against Purchase Order {1}").format(fmt_money(flt(d.credit), currency = company_currency), \ d.against_purchase_order)) if self.user_remark: r.append(_("Note: {0}").format(self.user_remark)) if r: self.remark = ("\n").join(r) #User Remarks is not mandatory def set_print_format_fields(self): for d in self.get('accounts'): if d.party_type and d.party: if not self.pay_to_recd_from: self.pay_to_recd_from = frappe.db.get_value(d.party_type, d.party, "customer_name" if d.party_type=="Customer" else "supplier_name") self.set_total_amount(d.debit or d.credit) elif frappe.db.get_value("Account", d.account, "account_type") in ["Bank", "Cash"]: self.set_total_amount(d.debit or d.credit) def set_total_amount(self, amt): company_currency = get_company_currency(self.company) self.total_amount = amt from frappe.utils import money_in_words self.total_amount_in_words = money_in_words(amt, company_currency) def make_gl_entries(self, cancel=0, adv_adj=0): from erpnext.accounts.general_ledger import make_gl_entries gl_map = [] for d in self.get("accounts"): if d.debit or d.credit: gl_map.append( self.get_gl_dict({ "account": d.account, "party_type": d.party_type, "party": d.party, "against": d.against_account, "debit": flt(d.debit, self.precision("debit", "accounts")), "credit": flt(d.credit, self.precision("credit", "accounts")), "against_voucher_type": (("Purchase Invoice" if d.against_voucher else None) or ("Sales Invoice" if d.against_invoice else None) or ("Journal Entry" if d.against_jv else None) or ("Sales Order" if d.against_sales_order else None) or ("Purchase Order" if d.against_purchase_order else None)), "against_voucher": d.against_voucher or d.against_invoice or d.against_jv or d.against_sales_order or d.against_purchase_order, "remarks": self.remark, "cost_center": d.cost_center }) ) if gl_map: make_gl_entries(gl_map, cancel=cancel, adv_adj=adv_adj) def get_balance(self): if not self.get('accounts'): msgprint(_("'Entries' cannot be empty"), raise_exception=True) else: flag, self.total_debit, self.total_credit = 0, 0, 0 diff = flt(self.difference, self.precision("difference")) # If any row without amount, set the diff on that row for d in self.get('accounts'): if not d.credit and not d.debit and diff != 0: if diff>0: d.credit = diff elif diff<0: d.debit = diff flag = 1 # Set the diff in a new row if flag == 0 and diff != 0: jd = self.append('accounts', {}) if diff>0: jd.credit = abs(diff) elif diff<0: jd.debit = abs(diff) self.validate_debit_and_credit() def get_outstanding_invoices(self): self.set('accounts', []) total = 0 for d in self.get_values(): total += flt(d.outstanding_amount, self.precision("credit", "accounts")) jd1 = self.append('accounts', {}) jd1.account = d.account jd1.party = d.party if self.write_off_based_on == 'Accounts Receivable': jd1.party_type = "Customer" jd1.credit = flt(d.outstanding_amount, self.precision("credit", "accounts")) jd1.against_invoice = cstr(d.name) elif self.write_off_based_on == 'Accounts Payable': jd1.party_type = "Supplier" jd1.debit = flt(d.outstanding_amount, self.precision("debit", "accounts")) jd1.against_voucher = cstr(d.name) jd2 = self.append('accounts', {}) if self.write_off_based_on == 'Accounts Receivable': jd2.debit = total elif self.write_off_based_on == 'Accounts Payable': jd2.credit = total self.validate_debit_and_credit() def get_values(self): cond = " and outstanding_amount <= {0}".format(self.write_off_amount) \ if flt(self.write_off_amount) > 0 else "" if self.write_off_based_on == 'Accounts Receivable': return frappe.db.sql("""select name, debit_to as account, customer as party, outstanding_amount from `tabSales Invoice` where docstatus = 1 and company = %s and outstanding_amount > 0 %s""" % ('%s', cond), self.company, as_dict=True) elif self.write_off_based_on == 'Accounts Payable': return frappe.db.sql("""select name, credit_to as account, supplier as party, outstanding_amount from `tabPurchase Invoice` where docstatus = 1 and company = %s and outstanding_amount > 0 %s""" % ('%s', cond), self.company, as_dict=True) def update_expense_claim(self): for d in self.accounts: if d.against_expense_claim: amt = frappe.db.sql("""select sum(debit) as amt from `tabJournal Entry Account` where against_expense_claim = %s and docstatus = 1""", d.against_expense_claim ,as_dict=1)[0].amt frappe.db.set_value("Expense Claim", d.against_expense_claim , "total_amount_reimbursed", amt) def validate_expense_claim(self): for d in self.accounts: if d.against_expense_claim: sanctioned_amount, reimbursed_amount = frappe.db.get_value("Expense Claim", d.against_expense_claim, ("total_sanctioned_amount", "total_amount_reimbursed")) pending_amount = flt(sanctioned_amount) - flt(reimbursed_amount) if d.debit > pending_amount: frappe.throw(_("Row No {0}: Amount cannot be greater than Pending Amount against Expense Claim {1}. Pending Amount is {2}".format(d.idx, d.against_expense_claim, pending_amount))) def validate_credit_debit_note(self): if self.stock_entry: if frappe.db.get_value("Stock Entry", self.stock_entry, "docstatus") != 1: frappe.throw(_("Stock Entry {0} is not submitted").format(self.stock_entry)) if frappe.db.exists({"doctype": "Journal Entry", "stock_entry": self.stock_entry, "docstatus":1}): frappe.msgprint(_("Warning: Another {0} # {1} exists against stock entry {2}".format(self.voucher_type, self.name, self.stock_entry))) def validate_empty_accounts_table(self): if not self.get('accounts'): frappe.throw("Accounts table cannot be blank.") @frappe.whitelist() def get_default_bank_cash_account(company, voucher_type, mode_of_payment=None): from erpnext.accounts.doctype.sales_invoice.sales_invoice import get_bank_cash_account if mode_of_payment: account = get_bank_cash_account(mode_of_payment, company) if account.get("account"): account.update({"balance": get_balance_on(account.get("account"))}) return account if voucher_type=="Bank Entry": account = frappe.db.get_value("Company", company, "default_bank_account") if not account: account = frappe.db.get_value("Account", {"company": company, "account_type": "Bank", "is_group": 0}) elif voucher_type=="Cash Entry": account = frappe.db.get_value("Company", company, "default_cash_account") if not account: account = frappe.db.get_value("Account", {"company": company, "account_type": "Cash", "is_group": 0}) if account: return { "account": account, "balance": get_balance_on(account) } @frappe.whitelist() def get_payment_entry_from_sales_invoice(sales_invoice): from erpnext.accounts.utils import get_balance_on si = frappe.get_doc("Sales Invoice", sales_invoice) jv = get_payment_entry(si) jv.remark = 'Payment received against Sales Invoice {0}. {1}'.format(si.name, si.remarks) # credit customer jv.get("accounts")[0].account = si.debit_to jv.get("accounts")[0].party_type = "Customer" jv.get("accounts")[0].party = si.customer jv.get("accounts")[0].balance = get_balance_on(si.debit_to) jv.get("accounts")[0].party_balance = get_balance_on(party=si.customer, party_type="Customer") jv.get("accounts")[0].credit = si.outstanding_amount jv.get("accounts")[0].against_invoice = si.name # debit bank jv.get("accounts")[1].debit = si.outstanding_amount return jv.as_dict() @frappe.whitelist() def get_payment_entry_from_purchase_invoice(purchase_invoice): pi = frappe.get_doc("Purchase Invoice", purchase_invoice) jv = get_payment_entry(pi) jv.remark = 'Payment against Purchase Invoice {0}. {1}'.format(pi.name, pi.remarks) # credit supplier jv.get("accounts")[0].account = pi.credit_to jv.get("accounts")[0].party_type = "Supplier" jv.get("accounts")[0].party = pi.supplier jv.get("accounts")[0].balance = get_balance_on(pi.credit_to) jv.get("accounts")[0].party_balance = get_balance_on(party=pi.supplier, party_type="Supplier") jv.get("accounts")[0].debit = pi.outstanding_amount jv.get("accounts")[0].against_voucher = pi.name # credit bank jv.get("accounts")[1].credit = pi.outstanding_amount return jv.as_dict() def get_payment_entry(doc): bank_account = get_default_bank_cash_account(doc.company, "Bank Entry") jv = frappe.new_doc('Journal Entry') jv.voucher_type = 'Bank Entry' jv.company = doc.company jv.fiscal_year = doc.fiscal_year jv.append("accounts") d2 = jv.append("accounts") if bank_account: d2.account = bank_account["account"] d2.balance = bank_account["balance"] return jv @frappe.whitelist() def get_opening_accounts(company): """get all balance sheet accounts for opening entry""" accounts = frappe.db.sql_list("""select name from tabAccount where is_group=0 and report_type='Balance Sheet' and company=%s""", company) return [{"account": a, "balance": get_balance_on(a)} for a in accounts] def get_against_jv(doctype, txt, searchfield, start, page_len, filters): return frappe.db.sql("""select jv.name, jv.posting_date, jv.user_remark from `tabJournal Entry` jv, `tabJournal Entry Account` jv_detail where jv_detail.parent = jv.name and jv_detail.account = %s and ifnull(jv_detail.party, '') = %s and (ifnull(jv_detail.against_invoice, '') = '' and ifnull(jv_detail.against_voucher, '') = '' and ifnull(jv_detail.against_jv, '') = '' ) and jv.docstatus = 1 and jv.{0} like %s order by jv.name desc limit %s, %s""".format(searchfield), (filters.get("account"), cstr(filters.get("party")), "%{0}%".format(txt), start, page_len)) @frappe.whitelist() def get_outstanding(args): args = eval(args) if args.get("doctype") == "Journal Entry" and args.get("party"): against_jv_amount = frappe.db.sql(""" select sum(ifnull(debit, 0)) - sum(ifnull(credit, 0)) from `tabJournal Entry Account` where parent=%s and party=%s and ifnull(against_invoice, '')='' and ifnull(against_voucher, '')='' and ifnull(against_jv, '')=''""", (args['docname'], args['party'])) against_jv_amount = flt(against_jv_amount[0][0]) if against_jv_amount else 0 if against_jv_amount > 0: return {"credit": against_jv_amount} else: return {"debit": -1* against_jv_amount} elif args.get("doctype") == "Sales Invoice": return { "credit": flt(frappe.db.get_value("Sales Invoice", args["docname"], "outstanding_amount")) } elif args.get("doctype") == "Purchase Invoice": return { "debit": flt(frappe.db.get_value("Purchase Invoice", args["docname"], "outstanding_amount")) } @frappe.whitelist() def get_party_account_and_balance(company, party_type, party): from erpnext.accounts.party import get_party_account account = get_party_account(company, party, party_type) account_balance = get_balance_on(account=account) party_balance = get_balance_on(party_type=party_type, party=party) return { "account": account, "balance": account_balance, "party_balance": party_balance }
agpl-3.0
ubc/compair
compair/api/comparison_example.py
1
7395
import dateutil.parser from bouncer.constants import READ, EDIT, CREATE, DELETE, MANAGE from flask import Blueprint from flask_login import login_required, current_user from flask_restful import Resource, marshal from flask_restful.reqparse import RequestParser from sqlalchemy import desc, or_, func, and_ from sqlalchemy.orm import joinedload, undefer_group, load_only from . import dataformat from compair.core import db, event, abort from compair.authorization import allow, require from compair.models import Assignment, Course, Answer, ComparisonExample from .util import new_restful_api, get_model_changes comparison_example_api = Blueprint('comparison_example_api', __name__) api = new_restful_api(comparison_example_api) new_comparison_example_parser = RequestParser() new_comparison_example_parser.add_argument('answer1_id', required=True, nullable=False) new_comparison_example_parser.add_argument('answer2_id', required=True, nullable=False) existing_comparison_example_parser = new_comparison_example_parser.copy() existing_comparison_example_parser.add_argument('id', required=True, nullable=False) # events on_comparison_example_modified = event.signal('COMPARISON_EXAMPLE_MODIFIED') on_comparison_example_list_get = event.signal('COMPARISON_EXAMPLE_LIST_GET') on_comparison_example_create = event.signal('COMPARISON_EXAMPLE_CREATE') on_comparison_example_delete = event.signal('COMPARISON_EXAMPLE_DELETE') # /id class ComparisonExampleIdAPI(Resource): @login_required def post(self, course_uuid, assignment_uuid, comparison_example_uuid): course = Course.get_active_by_uuid_or_404(course_uuid) assignment = Assignment.get_active_by_uuid_or_404(assignment_uuid) comparison_example = ComparisonExample.get_active_by_uuid_or_404(comparison_example_uuid) require(EDIT, comparison_example, title="Comparison Example Not Saved", message="Sorry, your role in this course does not allow you to save practice answers.") params = existing_comparison_example_parser.parse_args() answer1_uuid = params.get("answer1_id") answer2_uuid = params.get("answer2_id") if answer1_uuid: answer1 = Answer.get_active_by_uuid_or_404(answer1_uuid) answer1.practice = True comparison_example.answer1 = answer1 else: abort(400, title="Comparison Example Not Saved", message="Please add two answers with content to the practice answers and try again.") if answer2_uuid: answer2 = Answer.get_active_by_uuid_or_404(answer2_uuid) answer2.practice = True comparison_example.answer2 = answer2 else: abort(400, title="Comparison Example Not Saved", message="Please add two answers with content to the practice answers and try again.") model_changes = get_model_changes(comparison_example) db.session.add(comparison_example) db.session.commit() on_comparison_example_modified.send( self, event_name=on_comparison_example_modified.name, user=current_user, course_id=course.id, data=model_changes) return marshal(comparison_example, dataformat.get_comparison_example()) @login_required def delete(self, course_uuid, assignment_uuid, comparison_example_uuid): course = Course.get_active_by_uuid_or_404(course_uuid) assignment = Assignment.get_active_by_uuid_or_404(assignment_uuid) comparison_example = ComparisonExample.get_active_by_uuid_or_404(comparison_example_uuid) require(DELETE, comparison_example, title="Comparison Example Not Deleted", message="Sorry, your role in this course does not allow you to delete practice answers.") formatted_comparison_example = marshal(comparison_example, dataformat.get_comparison_example(with_answers=False)) comparison_example.active = False db.session.add(comparison_example) db.session.commit() on_comparison_example_delete.send( self, event_name=on_comparison_example_delete.name, user=current_user, course_id=course.id, data=formatted_comparison_example) return {'id': comparison_example.uuid} api.add_resource(ComparisonExampleIdAPI, '/<comparison_example_uuid>') # / class ComparisonExampleRootAPI(Resource): @login_required def get(self, course_uuid, assignment_uuid): course = Course.get_active_by_uuid_or_404(course_uuid) assignment = Assignment.get_active_by_uuid_or_404(assignment_uuid) require(READ, ComparisonExample(course_id=course.id), title="Comparison Example Unavailable", message="Sorry, your role in this course does not allow you to view practice answers.") # Get all comparison examples for this assignment comparison_examples = ComparisonExample.query \ .filter_by( active=True, assignment_id=assignment.id ) \ .all() on_comparison_example_list_get.send( self, event_name=on_comparison_example_list_get.name, user=current_user, course_id=course.id, data={'assignment_id': assignment.id}) return { "objects": marshal(comparison_examples, dataformat.get_comparison_example()) } @login_required def post(self, course_uuid, assignment_uuid): course = Course.get_active_by_uuid_or_404(course_uuid) assignment = Assignment.get_active_by_uuid_or_404(assignment_uuid) require(CREATE, ComparisonExample(assignment=Assignment(course_id=course.id)), title="Comparison Example Not Saved", message="Sorry, your role in this course does not allow you to save practice answers.") new_comparison_example = ComparisonExample(assignment_id=assignment.id) params = new_comparison_example_parser.parse_args() answer1_uuid = params.get("answer1_id") answer2_uuid = params.get("answer2_id") if answer1_uuid: answer1 = Answer.get_active_by_uuid_or_404(answer1_uuid) answer1.practice = True new_comparison_example.answer1 = answer1 else: abort(400, title="Comparison Example Not Saved", message="Please add two answers with content to the practice answers and try again.") if answer2_uuid: answer2 = Answer.get_active_by_uuid_or_404(answer2_uuid) answer2.practice = True new_comparison_example.answer2 = answer2 else: abort(400, title="Comparison Example Not Saved", message="Please add two answers with content to the practice answers and try again.") on_comparison_example_create.send( self, event_name=on_comparison_example_create.name, user=current_user, course_id=course.id, data=marshal(new_comparison_example, dataformat.get_comparison_example(with_answers=False))) db.session.add(new_comparison_example) db.session.commit() return marshal(new_comparison_example, dataformat.get_comparison_example()) api.add_resource(ComparisonExampleRootAPI, '')
gpl-3.0
voidException/zulip
zerver/test_external.py
115
8407
# -*- coding: utf-8 -*- from __future__ import absolute_import from django.conf import settings from django.core.exceptions import ValidationError from django.test import TestCase from unittest import skip from zerver.forms import not_mit_mailing_list from zerver.lib.rate_limiter import ( add_ratelimit_rule, clear_user_history, remove_ratelimit_rule, ) from zerver.lib.actions import compute_mit_user_fullname from zerver.lib.test_helpers import AuthedTestCase from zerver.models import get_user_profile_by_email from zerver.lib.test_runner import slow import time import ujson import urllib import urllib2 from boto.s3.connection import S3Connection from boto.s3.key import Key from StringIO import StringIO class MITNameTest(TestCase): def test_valid_hesiod(self): self.assertEquals(compute_mit_user_fullname("starnine@mit.edu"), "Athena Consulting Exchange User") self.assertEquals(compute_mit_user_fullname("sipbexch@mit.edu"), "Exch Sipb") def test_invalid_hesiod(self): self.assertEquals(compute_mit_user_fullname("1234567890@mit.edu"), "1234567890@mit.edu") self.assertEquals(compute_mit_user_fullname("ec-discuss@mit.edu"), "ec-discuss@mit.edu") def test_mailinglist(self): self.assertRaises(ValidationError, not_mit_mailing_list, "1234567890@mit.edu") self.assertRaises(ValidationError, not_mit_mailing_list, "ec-discuss@mit.edu") def test_notmailinglist(self): self.assertTrue(not_mit_mailing_list("sipbexch@mit.edu")) class S3Test(AuthedTestCase): test_uris = [] # full URIs in public bucket test_keys = [] # keys in authed bucket @slow(2.6, "has to contact external S3 service") @skip("Need S3 mock") def test_file_upload_authed(self): """ A call to /json/upload_file should return a uri and actually create an object. """ self.login("hamlet@zulip.com") fp = StringIO("zulip!") fp.name = "zulip.txt" result = self.client.post("/json/upload_file", {'file': fp}) self.assert_json_success(result) json = ujson.loads(result.content) self.assertIn("uri", json) uri = json["uri"] base = '/user_uploads/' self.assertEquals(base, uri[:len(base)]) self.test_keys.append(uri[len(base):]) response = self.client.get(uri) redirect_url = response['Location'] self.assertEquals("zulip!", urllib2.urlopen(redirect_url).read().strip()) def test_multiple_upload_failure(self): """ Attempting to upload two files should fail. """ self.login("hamlet@zulip.com") fp = StringIO("bah!") fp.name = "a.txt" fp2 = StringIO("pshaw!") fp2.name = "b.txt" result = self.client.post("/json/upload_file", {'f1': fp, 'f2': fp2}) self.assert_json_error(result, "You may only upload one file at a time") def test_no_file_upload_failure(self): """ Calling this endpoint with no files should fail. """ self.login("hamlet@zulip.com") result = self.client.post("/json/upload_file") self.assert_json_error(result, "You must specify a file to upload") def tearDown(self): # clean up return # TODO: un-deadden this code when we have proper S3 mocking. conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY) for uri in self.test_uris: key = Key(conn.get_bucket(settings.S3_BUCKET)) key.name = urllib2.urlparse.urlparse(uri).path[1:] key.delete() self.test_uris.remove(uri) for path in self.test_keys: key = Key(conn.get_bucket(settings.S3_AUTH_UPLOADS_BUCKET)) key.name = path key.delete() self.test_keys.remove(path) class RateLimitTests(AuthedTestCase): def setUp(self): settings.RATE_LIMITING = True add_ratelimit_rule(1, 5) def tearDown(self): settings.RATE_LIMITING = False remove_ratelimit_rule(1, 5) def send_api_message(self, email, api_key, content): return self.client.post("/api/v1/send_message", {"type": "stream", "to": "Verona", "client": "test suite", "content": content, "subject": "Test subject", "email": email, "api-key": api_key}) def test_headers(self): email = "hamlet@zulip.com" user = get_user_profile_by_email(email) clear_user_history(user) api_key = self.get_api_key(email) result = self.send_api_message(email, api_key, "some stuff") self.assertTrue('X-RateLimit-Remaining' in result) self.assertTrue('X-RateLimit-Limit' in result) self.assertTrue('X-RateLimit-Reset' in result) def test_ratelimit_decrease(self): email = "hamlet@zulip.com" user = get_user_profile_by_email(email) clear_user_history(user) api_key = self.get_api_key(email) result = self.send_api_message(email, api_key, "some stuff") limit = int(result['X-RateLimit-Remaining']) result = self.send_api_message(email, api_key, "some stuff 2") newlimit = int(result['X-RateLimit-Remaining']) self.assertEqual(limit, newlimit + 1) @slow(1.1, 'has to sleep to work') def test_hit_ratelimits(self): email = "cordelia@zulip.com" user = get_user_profile_by_email(email) clear_user_history(user) api_key = self.get_api_key(email) for i in range(6): result = self.send_api_message(email, api_key, "some stuff %s" % (i,)) self.assertEqual(result.status_code, 429) json = ujson.loads(result.content) self.assertEqual(json.get("result"), "error") self.assertIn("API usage exceeded rate limit, try again in", json.get("msg")) self.assertTrue('Retry-After' in result) self.assertIn(result['Retry-After'], json.get("msg")) # We actually wait a second here, rather than force-clearing our history, # to make sure the rate-limiting code automatically forgives a user # after some time has passed. time.sleep(1) result = self.send_api_message(email, api_key, "Good message") self.assert_json_success(result) class APNSTokenTests(AuthedTestCase): def test_add_token(self): email = "cordelia@zulip.com" self.login(email) result = self.client.post('/json/users/me/apns_device_token', {'token': "test_token"}) self.assert_json_success(result) def test_delete_token(self): email = "cordelia@zulip.com" self.login(email) token = "test_token" result = self.client.post('/json/users/me/apns_device_token', {'token':token}) self.assert_json_success(result) result = self.client_delete('/json/users/me/apns_device_token', {'token': token}) self.assert_json_success(result) class GCMTokenTests(AuthedTestCase): def test_add_token(self): email = "cordelia@zulip.com" self.login(email) result = self.client.post('/json/users/me/apns_device_token', {'token': "test_token"}) self.assert_json_success(result) def test_delete_token(self): email = "cordelia@zulip.com" self.login(email) token = "test_token" result = self.client.post('/json/users/me/android_gcm_reg_id', {'token':token}) self.assert_json_success(result) result = self.client.delete('/json/users/me/android_gcm_reg_id', urllib.urlencode({'token': token})) self.assert_json_success(result) def test_change_user(self): token = "test_token" self.login("cordelia@zulip.com") result = self.client.post('/json/users/me/android_gcm_reg_id', {'token':token}) self.assert_json_success(result) self.login("hamlet@zulip.com") result = self.client.post('/json/users/me/android_gcm_reg_id', {'token':token}) self.assert_json_success(result)
apache-2.0
Beercow/viper
viper/modules/rats/albertino.py
12
1613
# Originally written by Kevin Breen (@KevTheHermit): # https://github.com/kevthehermit/RATDecoders/blob/master/Albertino.py import re import string from Crypto.Cipher import DES from base64 import b64decode def string_print(line): return filter(lambda x: x in string.printable, line) def get_config(data): m = re.search('\x01\x96\x01(.*)@@', data) raw_config = m.group(0).replace('@','')[3:] return raw_config def decrypt_des(data): key = '&%#@?,:*' iv = '\x12\x34\x56\x78\x90\xab\xcd\xef' cipher = DES.new(key, DES.MODE_CBC, iv) return cipher.decrypt(data) def parsed_config(clean_config): sections = clean_config.split('*') config_dict = {} if len(sections) == 7: config_dict['Version'] = '4.x' config_dict['Domain1'] = sections[0] config_dict['Domain2'] = sections[1] config_dict['RegKey1'] = sections[2] config_dict['RegKey2'] = sections[3] config_dict['Port1'] = sections[4] config_dict['Port2'] = sections[5] config_dict['Mutex'] = sections[6] if len(sections) == 5: config_dict['Version'] = '2.x' config_dict['Domain1'] = sections[0] config_dict['Domain2'] = sections[1] config_dict['Port1'] = sections[2] config_dict['Port2'] = sections[3] config_dict['AntiDebug'] = sections[4] return config_dict def config(data): coded_config = get_config(data) decoded_config = b64decode(coded_config) raw_config = decrypt_des(decoded_config) clean_config = string_print(raw_config) return parsed_config(clean_config)
bsd-3-clause
cmdunkers/DeeperMind
PythonEnv/lib/python2.7/site-packages/scipy/weave/tests/test_scxx_sequence.py
91
13199
""" Test refcounting and behavior of SCXX. """ from __future__ import absolute_import, print_function import time import sys from numpy.testing import (TestCase, assert_, assert_raises, run_module_suite) from scipy.weave import inline_tools from weave_test_utils import debug_print, dec class _TestSequenceBase(TestCase): seq_type = None @dec.slow def test_conversion(self): a = self.seq_type([1]) before = sys.getrefcount(a) inline_tools.inline(" ",['a']) # first call is goofing up refcount. before = sys.getrefcount(a) inline_tools.inline(" ",['a']) after = sys.getrefcount(a) assert_(after == before) @dec.slow def test_in(self): # Test the "in" method for lists. We'll assume it works for # sequences if it works here. a = self.seq_type([1,2,'alpha',3.1416]) item = 1 code = "return_val = a.in(item);" res = inline_tools.inline(code,['a','item']) assert_(res == 1) item = 0 res = inline_tools.inline(code,['a','item']) assert_(res == 0) # check overloaded in(int val) method code = "return_val = a.in(1);" res = inline_tools.inline(code,['a']) assert_(res == 1) code = "return_val = a.in(0);" res = inline_tools.inline(code,['a']) assert_(res == 0) # check overloaded in(double val) method code = "return_val = a.in(3.1416);" res = inline_tools.inline(code,['a']) assert_(res == 1) code = "return_val = a.in(3.1417);" res = inline_tools.inline(code,['a']) assert_(res == 0) # check overloaded in(char* val) method code = 'return_val = a.in("alpha");' res = inline_tools.inline(code,['a']) assert_(res == 1) code = 'return_val = a.in("beta");' res = inline_tools.inline(code,['a']) assert_(res == 0) # check overloaded in(std::string val) method code = """ std::string val = std::string("alpha"); return_val = a.in(val); """ res = inline_tools.inline(code,['a']) assert_(res == 1) code = """ std::string val = std::string("beta"); return_val = a.in(val); """ res = inline_tools.inline(code,['a']) assert_(res == 0) @dec.slow def test_count(self): # Test the "count" method for lists. We'll assume it works for # sequences if it works here. a = self.seq_type([1,2,'alpha',3.1416]) item = 1 code = "return_val = a.count(item);" res = inline_tools.inline(code,['a','item']) assert_(res == 1) # check overloaded count(int val) method code = "return_val = a.count(1);" res = inline_tools.inline(code,['a']) assert_(res == 1) # check overloaded count(double val) method code = "return_val = a.count(3.1416);" res = inline_tools.inline(code,['a']) assert_(res == 1) # check overloaded count(char* val) method code = 'return_val = a.count("alpha");' res = inline_tools.inline(code,['a']) assert_(res == 1) # check overloaded count(std::string val) method code = """ std::string alpha = std::string("alpha"); return_val = a.count(alpha); """ res = inline_tools.inline(code,['a']) assert_(res == 1) @dec.slow def test_access_speed(self): N = 1000000 debug_print('%s access -- val = a[i] for N =', (self.seq_type, N)) a = self.seq_type([0]) * N val = 0 t1 = time.time() for i in xrange(N): val = a[i] t2 = time.time() debug_print('python1:', t2 - t1) t1 = time.time() for i in a: val = i t2 = time.time() debug_print('python2:', t2 - t1) code = """ const int N = a.length(); py::object val; for(int i=0; i < N; i++) val = a[i]; """ # compile not included in timing inline_tools.inline(code,['a']) t1 = time.time() inline_tools.inline(code,['a']) t2 = time.time() debug_print('weave:', t2 - t1) @dec.slow def test_access_set_speed(self): N = 1000000 debug_print('%s access/set -- b[i] = a[i] for N =', (self.seq_type,N)) a = self.seq_type([0]) * N # b is always a list so we can assign to it. b = [1] * N t1 = time.time() for i in xrange(N): b[i] = a[i] t2 = time.time() debug_print('python:', t2 - t1) a = self.seq_type([0]) * N b = [1] * N code = """ const int N = a.length(); for(int i=0; i < N; i++) b[i] = a[i]; """ # compile not included in timing inline_tools.inline(code,['a','b']) t1 = time.time() inline_tools.inline(code,['a','b']) t2 = time.time() debug_print('weave:', t2 - t1) assert_(list(b) == list(a)) class TestTuple(_TestSequenceBase): seq_type = tuple @dec.slow def test_set_item_operator_equal_fail(self): # Tuples should only allow setting of variables # immediately after creation. a = (1,2,3) assert_raises(TypeError, inline_tools.inline, "a[1] = 1234;",['a']) @dec.slow def test_set_item_operator_equal(self): code = """ py::tuple a(3); a[0] = 1; a[1] = 2; a[2] = 3; return_val = a; """ a = inline_tools.inline(code) assert_(a == (1,2,3)) # returned value should only have a single refcount assert_(sys.getrefcount(a) == 2) @dec.slow def test_set_item_index_error(self): code = """ py::tuple a(3); a[4] = 1; return_val = a; """ assert_raises(IndexError, inline_tools.inline, code) @dec.slow def test_get_item_operator_index_error(self): code = """ py::tuple a(3); py::object b = a[4]; // should fail. """ assert_raises(IndexError, inline_tools.inline, code) class TestList(_TestSequenceBase): seq_type = list @dec.slow def test_append_passed_item(self): a = [] item = 1 # temporary refcount fix until I understand why it incs by one. inline_tools.inline("a.append(item);",['a','item']) del a[0] before1 = sys.getrefcount(a) before2 = sys.getrefcount(item) inline_tools.inline("a.append(item);",['a','item']) assert_(a[0] is item) del a[0] after1 = sys.getrefcount(a) after2 = sys.getrefcount(item) assert_(after1 == before1) assert_(after2 == before2) @dec.slow def test_append(self): a = [] # temporary refcount fix until I understand why it incs by one. inline_tools.inline("a.append(1);",['a']) del a[0] before1 = sys.getrefcount(a) # check overloaded append(int val) method inline_tools.inline("a.append(1234);",['a']) assert_(sys.getrefcount(a[0]) == 2) assert_(a[0] == 1234) del a[0] # check overloaded append(double val) method inline_tools.inline("a.append(123.0);",['a']) assert_(sys.getrefcount(a[0]) == 2) assert_(a[0] == 123.0) del a[0] # check overloaded append(char* val) method inline_tools.inline('a.append("bubba");',['a']) assert_(sys.getrefcount(a[0]) == 2) assert_(a[0] == 'bubba') del a[0] # check overloaded append(std::string val) method inline_tools.inline('a.append(std::string("sissy"));',['a']) assert_(sys.getrefcount(a[0]) == 2) assert_(a[0] == 'sissy') del a[0] after1 = sys.getrefcount(a) assert_(after1 == before1) @dec.slow def test_insert(self): a = [1,2,3] a.insert(1,234) del a[1] # temporary refcount fix until I understand why it incs by one. inline_tools.inline("a.insert(1,1234);",['a']) del a[1] before1 = sys.getrefcount(a) # check overloaded insert(int ndx, int val) method inline_tools.inline("a.insert(1,1234);",['a']) assert_(sys.getrefcount(a[1]) == 2) assert_(a[1] == 1234) del a[1] # check overloaded insert(int ndx, double val) method inline_tools.inline("a.insert(1,123.0);",['a']) assert_(sys.getrefcount(a[1]) == 2) assert_(a[1] == 123.0) del a[1] # check overloaded insert(int ndx, char* val) method inline_tools.inline('a.insert(1,"bubba");',['a']) assert_(sys.getrefcount(a[1]) == 2) assert_(a[1] == 'bubba') del a[1] # check overloaded insert(int ndx, std::string val) method inline_tools.inline('a.insert(1,std::string("sissy"));',['a']) assert_(sys.getrefcount(a[1]) == 2) assert_(a[1] == 'sissy') del a[0] after1 = sys.getrefcount(a) assert_(after1 == before1) @dec.slow def test_set_item_operator_equal(self): a = self.seq_type([1,2,3]) # temporary refcount fix until I understand why it incs by one. inline_tools.inline("a[1] = 1234;",['a']) before1 = sys.getrefcount(a) # check overloaded insert(int ndx, int val) method inline_tools.inline("a[1] = 1234;",['a']) assert_(sys.getrefcount(a[1]) == 2) assert_(a[1] == 1234) # check overloaded insert(int ndx, double val) method inline_tools.inline("a[1] = 123.0;",['a']) assert_(sys.getrefcount(a[1]) == 2) assert_(a[1] == 123.0) # check overloaded insert(int ndx, char* val) method inline_tools.inline('a[1] = "bubba";',['a']) assert_(sys.getrefcount(a[1]) == 2) assert_(a[1] == 'bubba') # check overloaded insert(int ndx, std::string val) method code = """ std::string val = std::string("sissy"); a[1] = val; """ inline_tools.inline(code,['a']) assert_(sys.getrefcount(a[1]) == 2) assert_(a[1] == 'sissy') after1 = sys.getrefcount(a) assert_(after1 == before1) @dec.slow def test_set_item_operator_equal_created(self): code = """ py::list a(3); a[0] = 1; a[1] = 2; a[2] = 3; return_val = a; """ a = inline_tools.inline(code) assert_(a == [1,2,3]) # returned value should only have a single refcount assert_(sys.getrefcount(a) == 2) @dec.slow def test_set_item_index_error(self): code = """ py::list a(3); a[4] = 1; """ assert_raises(IndexError, inline_tools.inline, code) @dec.slow def test_get_item_index_error(self): code = """ py::list a(3); py::object o = a[4]; """ assert_raises(IndexError, inline_tools.inline, code) @dec.slow def test_string_add_speed(self): N = 1000000 debug_print('string add -- b[i] = a[i] + "blah" for N =', N) a = ["blah"] * N desired = [1] * N t1 = time.time() for i in xrange(N): desired[i] = a[i] + 'blah' t2 = time.time() debug_print('python:', t2 - t1) a = ["blah"] * N b = [1] * N code = """ const int N = a.length(); std::string blah = std::string("blah"); for(int i=0; i < N; i++) b[i] = convert_to_string(a[i],"a") + blah; """ # compile not included in timing inline_tools.inline(code,['a','b']) t1 = time.time() inline_tools.inline(code,['a','b']) t2 = time.time() debug_print('weave:', t2 - t1) assert_(b == desired) @dec.slow def test_int_add_speed(self): N = 1000000 debug_print('int add -- b[i] = a[i] + 1 for N =', N) a = [0] * N desired = [1] * N t1 = time.time() for i in xrange(N): desired[i] = a[i] + 1 t2 = time.time() debug_print('python:', t2 - t1) a = [0] * N b = [0] * N code = """ const int N = a.length(); for(int i=0; i < N; i++) b[i] = (int)a[i] + 1; """ # compile not included in timing inline_tools.inline(code,['a','b']) t1 = time.time() inline_tools.inline(code,['a','b']) t2 = time.time() debug_print('weave:', t2 - t1) assert_(b == desired) if __name__ == "__main__": run_module_suite()
bsd-3-clause
TheIoTLearningInitiative/CodeLabs
Tulum/device/iot101inc.py
3
2148
#!/usr/bin/python import paho.mqtt.client as paho import psutil import pywapi import signal import sys import time from threading import Thread def functionApiWeather(): data = pywapi.get_weather_from_weather_com('MXJO0043', 'metric') message = data['location']['name'] message = message + ", Temperature " + data['current_conditions']['temperature'] + " C" message = message + ", Atmospheric Pressure " + data['current_conditions']['barometer']['reading'][:-3] + " mbar" return message def functionDataActuator(status): print "Data Actuator Status %s" % status def functionDataActuatorMqttOnMessage(mosq, obj, msg): print "Data Sensor Mqtt Subscribe Message!" functionDataActuator(msg.payload) def functionDataActuatorMqttSubscribe(): mqttclient = paho.Client() mqttclient.on_message = functionDataActuatorMqttOnMessage mqttclient.connect("test.mosquitto.org", 1883, 60) mqttclient.subscribe("IoT101/DataActuator", 0) while mqttclient.loop() == 0: pass def functionDataSensor(): netdata = psutil.net_io_counters() data = netdata.packets_sent + netdata.packets_recv return data def functionDataSensorMqttOnPublish(mosq, obj, msg): print "Data Sensor Mqtt Published!" def functionDataSensorMqttPublish(): mqttclient = paho.Client() mqttclient.on_publish = functionDataSensorMqttOnPublish mqttclient.connect("test.mosquitto.org", 1883, 60) while True: data = functionDataSensor() topic = "IoT101/DataSensor" mqttclient.publish(topic, data) time.sleep(1) def functionSignalHandler(signal, frame): sys.exit(0) if __name__ == '__main__': signal.signal(signal.SIGINT, functionSignalHandler) threadmqttpublish = Thread(target=functionDataSensorMqttPublish) threadmqttpublish.start() threadmqttsubscribe = Thread(target=functionDataActuatorMqttSubscribe) threadmqttsubscribe.start() while True: print "Hello Internet of Things 101" print "Data Sensor: %s " % functionDataSensor() print "API Weather: %s " % functionApiWeather() time.sleep(5) # End of File
apache-2.0
Bitcoin-ABC/bitcoin-abc
test/functional/test_framework/key.py
1
13283
#!/usr/bin/env python3 # Copyright (c) 2019 Pieter Wuille # Copyright (c) 2019-2020 The Bitcoin developers """Test-only secp256k1 elliptic curve implementation WARNING: This code is slow, uses bad randomness, does not properly protect keys, and is trivially vulnerable to side channel attacks. Do not use for anything but tests. """ import hashlib import random from .address import byte_to_base58 def modinv(a, n): """Compute the modular inverse of a modulo n See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#Modular_integers """ t1, t2 = 0, 1 r1, r2 = n, a while r2 != 0: q = r1 // r2 t1, t2 = t2, t1 - q * t2 r1, r2 = r2, r1 - q * r2 if r1 > 1: return None if t1 < 0: t1 += n return t1 def jacobi_symbol(n, k): """Compute the Jacobi symbol of n modulo k See http://en.wikipedia.org/wiki/Jacobi_symbol """ assert k > 0 and k & 1 n %= k t = 0 while n != 0: while n & 1 == 0: n >>= 1 r = k & 7 t ^= (r == 3 or r == 5) n, k = k, n t ^= (n & k & 3 == 3) n = n % k if k == 1: return -1 if t else 1 return 0 def modsqrt(a, p): """Compute the square root of a modulo p For p = 3 mod 4, if a square root exists, it is equal to a**((p+1)/4) mod p. """ assert(p % 4 == 3) # Only p = 3 mod 4 is implemented sqrt = pow(a, (p + 1) // 4, p) if pow(sqrt, 2, p) == a % p: return sqrt return None class EllipticCurve: def __init__(self, p, a, b): """Initialize elliptic curve y^2 = x^3 + a*x + b over GF(p).""" self.p = p self.a = a % p self.b = b % p def affine(self, p1): """Convert a Jacobian point tuple p1 to affine form, or None if at infinity.""" x1, y1, z1 = p1 if z1 == 0: return None inv = modinv(z1, self.p) inv_2 = (inv**2) % self.p inv_3 = (inv_2 * inv) % self.p return ((inv_2 * x1) % self.p, (inv_3 * y1) % self.p, 1) def negate(self, p1): """Negate a Jacobian point tuple p1.""" x1, y1, z1 = p1 return (x1, (self.p - y1) % self.p, z1) def on_curve(self, p1): """Determine whether a Jacobian tuple p is on the curve (and not infinity)""" x1, y1, z1 = p1 z2 = pow(z1, 2, self.p) z4 = pow(z2, 2, self.p) return z1 != 0 and (pow(x1, 3, self.p) + self.a * x1 * z4 + self.b * z2 * z4 - pow(y1, 2, self.p)) % self.p == 0 def is_x_coord(self, x): """Test whether x is a valid X coordinate on the curve.""" x_3 = pow(x, 3, self.p) return jacobi_symbol(x_3 + self.a * x + self.b, self.p) != -1 def lift_x(self, x): """Given an X coordinate on the curve, return a corresponding affine point.""" x_3 = pow(x, 3, self.p) v = x_3 + self.a * x + self.b y = modsqrt(v, self.p) if y is None: return None return (x, y, 1) def double(self, p1): """Double a Jacobian tuple p1""" x1, y1, z1 = p1 if z1 == 0: return (0, 1, 0) y1_2 = (y1**2) % self.p y1_4 = (y1_2**2) % self.p x1_2 = (x1**2) % self.p s = (4 * x1 * y1_2) % self.p m = 3 * x1_2 if self.a: m += self.a * pow(z1, 4, self.p) m = m % self.p x2 = (m**2 - 2 * s) % self.p y2 = (m * (s - x2) - 8 * y1_4) % self.p z2 = (2 * y1 * z1) % self.p return (x2, y2, z2) def add_mixed(self, p1, p2): """Add a Jacobian tuple p1 and an affine tuple p2""" x1, y1, z1 = p1 x2, y2, z2 = p2 assert(z2 == 1) if z1 == 0: return p2 z1_2 = (z1**2) % self.p z1_3 = (z1_2 * z1) % self.p u2 = (x2 * z1_2) % self.p s2 = (y2 * z1_3) % self.p if x1 == u2: if (y1 != s2): return (0, 1, 0) return self.double(p1) h = u2 - x1 r = s2 - y1 h_2 = (h**2) % self.p h_3 = (h_2 * h) % self.p u1_h_2 = (x1 * h_2) % self.p x3 = (r**2 - h_3 - 2 * u1_h_2) % self.p y3 = (r * (u1_h_2 - x3) - y1 * h_3) % self.p z3 = (h * z1) % self.p return (x3, y3, z3) def add(self, p1, p2): """Add two Jacobian tuples p1 and p2""" x1, y1, z1 = p1 x2, y2, z2 = p2 if z1 == 0: return p2 if z2 == 0: return p1 if z1 == 1: return self.add_mixed(p2, p1) if z2 == 1: return self.add_mixed(p1, p2) z1_2 = (z1**2) % self.p z1_3 = (z1_2 * z1) % self.p z2_2 = (z2**2) % self.p z2_3 = (z2_2 * z2) % self.p u1 = (x1 * z2_2) % self.p u2 = (x2 * z1_2) % self.p s1 = (y1 * z2_3) % self.p s2 = (y2 * z1_3) % self.p if u1 == u2: if (s1 != s2): return (0, 1, 0) return self.double(p1) h = u2 - u1 r = s2 - s1 h_2 = (h**2) % self.p h_3 = (h_2 * h) % self.p u1_h_2 = (u1 * h_2) % self.p x3 = (r**2 - h_3 - 2 * u1_h_2) % self.p y3 = (r * (u1_h_2 - x3) - s1 * h_3) % self.p z3 = (h * z1 * z2) % self.p return (x3, y3, z3) def mul(self, ps): """Compute a (multi) point multiplication ps is a list of (Jacobian tuple, scalar) pairs. """ r = (0, 1, 0) for i in range(255, -1, -1): r = self.double(r) for (p, n) in ps: if ((n >> i) & 1): r = self.add(r, p) return r SECP256K1 = EllipticCurve(2**256 - 2**32 - 977, 0, 7) SECP256K1_G = ( 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8, 1) SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2 class ECPubKey(): """A secp256k1 public key""" def __init__(self): """Construct an uninitialized public key""" self.valid = False def set(self, data): """Construct a public key from a serialization in compressed or uncompressed format""" if (len(data) == 65 and data[0] == 0x04): p = (int.from_bytes(data[1:33], 'big'), int.from_bytes(data[33:65], 'big'), 1) self.valid = SECP256K1.on_curve(p) if self.valid: self.p = p self.compressed = False elif (len(data) == 33 and (data[0] == 0x02 or data[0] == 0x03)): x = int.from_bytes(data[1:33], 'big') if SECP256K1.is_x_coord(x): p = SECP256K1.lift_x(x) if (p[1] & 1) != (data[0] & 1): p = SECP256K1.negate(p) self.p = p self.valid = True self.compressed = True else: self.valid = False else: self.valid = False @property def is_compressed(self): return self.compressed @property def is_valid(self): return self.valid def get_bytes(self): assert(self.valid) p = SECP256K1.affine(self.p) if p is None: return None if self.compressed: return bytes([0x02 + (p[1] & 1)]) + p[0].to_bytes(32, 'big') else: return bytes([0x04]) + p[0].to_bytes(32, 'big') + \ p[1].to_bytes(32, 'big') def verify_ecdsa(self, sig, msg, low_s=True): """Verify a strictly DER-encoded ECDSA signature against this pubkey.""" assert(self.valid) if (sig[1] + 2 != len(sig)): return False if (len(sig) < 4): return False if (sig[0] != 0x30): return False if (sig[2] != 0x02): return False rlen = sig[3] if (len(sig) < 6 + rlen): return False if rlen < 1 or rlen > 33: return False if sig[4] >= 0x80: return False if (rlen > 1 and (sig[4] == 0) and not (sig[5] & 0x80)): return False r = int.from_bytes(sig[4:4 + rlen], 'big') if (sig[4 + rlen] != 0x02): return False slen = sig[5 + rlen] if slen < 1 or slen > 33: return False if (len(sig) != 6 + rlen + slen): return False if sig[6 + rlen] >= 0x80: return False if (slen > 1 and (sig[6 + rlen] == 0) and not (sig[7 + rlen] & 0x80)): return False s = int.from_bytes(sig[6 + rlen:6 + rlen + slen], 'big') if r < 1 or s < 1 or r >= SECP256K1_ORDER or s >= SECP256K1_ORDER: return False if low_s and s >= SECP256K1_ORDER_HALF: return False z = int.from_bytes(msg, 'big') w = modinv(s, SECP256K1_ORDER) u1 = z * w % SECP256K1_ORDER u2 = r * w % SECP256K1_ORDER R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, u1), (self.p, u2)])) if R is None or R[0] != r: return False return True def verify_schnorr(self, sig, msg32): assert self.is_valid assert len(sig) == 64 assert len(msg32) == 32 Rx = sig[:32] s = int.from_bytes(sig[32:], 'big') e = int.from_bytes( hashlib.sha256( Rx + self.get_bytes() + msg32).digest(), 'big') nege = SECP256K1_ORDER - e R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, s), (self.p, nege)])) if R is None: return False if jacobi_symbol(R[1], SECP256K1.p) == -1: return False return R[0] == int.from_bytes(Rx, 'big') class ECKey(): """A secp256k1 private key""" def __init__(self): self.valid = False def set(self, secret, compressed): """Construct a private key object with given 32-byte secret and compressed flag.""" assert(len(secret) == 32) secret = int.from_bytes(secret, 'big') self.valid = (secret > 0 and secret < SECP256K1_ORDER) if self.valid: self.secret = secret self.compressed = compressed def generate(self, compressed=True): """Generate a random private key (compressed or uncompressed).""" self.set( random.randrange( 1, SECP256K1_ORDER).to_bytes( 32, 'big'), compressed) def get_bytes(self): """Retrieve the 32-byte representation of this key.""" assert(self.valid) return self.secret.to_bytes(32, 'big') @property def is_valid(self): return self.valid @property def is_compressed(self): return self.compressed def get_pubkey(self): """Compute an ECPubKey object for this secret key.""" assert(self.valid) ret = ECPubKey() p = SECP256K1.mul([(SECP256K1_G, self.secret)]) ret.p = p ret.valid = True ret.compressed = self.compressed return ret def sign_ecdsa(self, msg, low_s=True): """Construct a DER-encoded ECDSA signature with this key.""" assert(self.valid) z = int.from_bytes(msg, 'big') # Note: no RFC6979, but a simple random nonce (some tests rely on # distinct transactions for the same operation) k = random.randrange(1, SECP256K1_ORDER) R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, k)])) r = R[0] % SECP256K1_ORDER s = (modinv(k, SECP256K1_ORDER) * (z + self.secret * r)) % SECP256K1_ORDER if low_s and s > SECP256K1_ORDER_HALF: s = SECP256K1_ORDER - s rb = r.to_bytes((r.bit_length() + 8) // 8, 'big') sb = s.to_bytes((s.bit_length() + 8) // 8, 'big') return b'\x30' + \ bytes([4 + len(rb) + len(sb), 2, len(rb)]) + \ rb + bytes([2, len(sb)]) + sb def sign_schnorr(self, msg32): """Create Schnorr signature (BIP-Schnorr convention).""" assert self.valid assert len(msg32) == 32 pubkey = self.get_pubkey() assert pubkey.is_valid k = random.randrange(1, SECP256K1_ORDER) R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, k)])) if jacobi_symbol(R[1], SECP256K1.p) == -1: k = SECP256K1_ORDER - k Rx = R[0].to_bytes(32, 'big') e = int.from_bytes( hashlib.sha256( Rx + pubkey.get_bytes() + msg32).digest(), 'big') s = (k + e * int.from_bytes(self.get_bytes(), 'big')) % SECP256K1_ORDER sig = Rx + s.to_bytes(32, 'big') assert pubkey.verify_schnorr(sig, msg32) return sig def bytes_to_wif(b, compressed=True): if compressed: b += b'\x01' return byte_to_base58(b, 239) def generate_wif_key(): # Makes a WIF privkey for imports k = ECKey() k.generate() return bytes_to_wif(k.get_bytes(), k.is_compressed)
mit
shahankhatch/scikit-learn
examples/text/document_clustering.py
230
8356
""" ======================================= Clustering text documents using k-means ======================================= This is an example showing how the scikit-learn can be used to cluster documents by topics using a bag-of-words approach. This example uses a scipy.sparse matrix to store the features instead of standard numpy arrays. Two feature extraction methods can be used in this example: - TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most frequent words to features indices and hence compute a word occurrence frequency (sparse) matrix. The word frequencies are then reweighted using the Inverse Document Frequency (IDF) vector collected feature-wise over the corpus. - HashingVectorizer hashes word occurrences to a fixed dimensional space, possibly with collisions. The word count vectors are then normalized to each have l2-norm equal to one (projected to the euclidean unit-ball) which seems to be important for k-means to work in high dimensional space. HashingVectorizer does not provide IDF weighting as this is a stateless model (the fit method does nothing). When IDF weighting is needed it can be added by pipelining its output to a TfidfTransformer instance. Two algorithms are demoed: ordinary k-means and its more scalable cousin minibatch k-means. Additionally, latent sematic analysis can also be used to reduce dimensionality and discover latent patterns in the data. It can be noted that k-means (and minibatch k-means) are very sensitive to feature scaling and that in this case the IDF weighting helps improve the quality of the clustering by quite a lot as measured against the "ground truth" provided by the class label assignments of the 20 newsgroups dataset. This improvement is not visible in the Silhouette Coefficient which is small for both as this measure seem to suffer from the phenomenon called "Concentration of Measure" or "Curse of Dimensionality" for high dimensional datasets such as text data. Other measures such as V-measure and Adjusted Rand Index are information theoretic based evaluation scores: as they are only based on cluster assignments rather than distances, hence not affected by the curse of dimensionality. Note: as k-means is optimizing a non-convex objective function, it will likely end up in a local optimum. Several runs with independent random init might be necessary to get a good convergence. """ # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com> # Lars Buitinck <L.J.Buitinck@uva.nl> # License: BSD 3 clause from __future__ import print_function from sklearn.datasets import fetch_20newsgroups from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Normalizer from sklearn import metrics from sklearn.cluster import KMeans, MiniBatchKMeans import logging from optparse import OptionParser import sys from time import time import numpy as np # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') # parse commandline arguments op = OptionParser() op.add_option("--lsa", dest="n_components", type="int", help="Preprocess documents with latent semantic analysis.") op.add_option("--no-minibatch", action="store_false", dest="minibatch", default=True, help="Use ordinary k-means algorithm (in batch mode).") op.add_option("--no-idf", action="store_false", dest="use_idf", default=True, help="Disable Inverse Document Frequency feature weighting.") op.add_option("--use-hashing", action="store_true", default=False, help="Use a hashing feature vectorizer") op.add_option("--n-features", type=int, default=10000, help="Maximum number of features (dimensions)" " to extract from text.") op.add_option("--verbose", action="store_true", dest="verbose", default=False, help="Print progress reports inside k-means algorithm.") print(__doc__) op.print_help() (opts, args) = op.parse_args() if len(args) > 0: op.error("this script takes no arguments.") sys.exit(1) ############################################################################### # Load some categories from the training set categories = [ 'alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space', ] # Uncomment the following to do the analysis on all the categories #categories = None print("Loading 20 newsgroups dataset for categories:") print(categories) dataset = fetch_20newsgroups(subset='all', categories=categories, shuffle=True, random_state=42) print("%d documents" % len(dataset.data)) print("%d categories" % len(dataset.target_names)) print() labels = dataset.target true_k = np.unique(labels).shape[0] print("Extracting features from the training dataset using a sparse vectorizer") t0 = time() if opts.use_hashing: if opts.use_idf: # Perform an IDF normalization on the output of HashingVectorizer hasher = HashingVectorizer(n_features=opts.n_features, stop_words='english', non_negative=True, norm=None, binary=False) vectorizer = make_pipeline(hasher, TfidfTransformer()) else: vectorizer = HashingVectorizer(n_features=opts.n_features, stop_words='english', non_negative=False, norm='l2', binary=False) else: vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features, min_df=2, stop_words='english', use_idf=opts.use_idf) X = vectorizer.fit_transform(dataset.data) print("done in %fs" % (time() - t0)) print("n_samples: %d, n_features: %d" % X.shape) print() if opts.n_components: print("Performing dimensionality reduction using LSA") t0 = time() # Vectorizer results are normalized, which makes KMeans behave as # spherical k-means for better results. Since LSA/SVD results are # not normalized, we have to redo the normalization. svd = TruncatedSVD(opts.n_components) normalizer = Normalizer(copy=False) lsa = make_pipeline(svd, normalizer) X = lsa.fit_transform(X) print("done in %fs" % (time() - t0)) explained_variance = svd.explained_variance_ratio_.sum() print("Explained variance of the SVD step: {}%".format( int(explained_variance * 100))) print() ############################################################################### # Do the actual clustering if opts.minibatch: km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1, init_size=1000, batch_size=1000, verbose=opts.verbose) else: km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1, verbose=opts.verbose) print("Clustering sparse data with %s" % km) t0 = time() km.fit(X) print("done in %0.3fs" % (time() - t0)) print() print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_)) print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_)) print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_)) print("Adjusted Rand-Index: %.3f" % metrics.adjusted_rand_score(labels, km.labels_)) print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(X, km.labels_, sample_size=1000)) print() if not opts.use_hashing: print("Top terms per cluster:") if opts.n_components: original_space_centroids = svd.inverse_transform(km.cluster_centers_) order_centroids = original_space_centroids.argsort()[:, ::-1] else: order_centroids = km.cluster_centers_.argsort()[:, ::-1] terms = vectorizer.get_feature_names() for i in range(true_k): print("Cluster %d:" % i, end='') for ind in order_centroids[i, :10]: print(' %s' % terms[ind], end='') print()
bsd-3-clause
TeamExodus/external_chromium_org
chrome/test/ispy/common/image_tools.py
124
10124
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Utilities for performing pixel-by-pixel image comparision.""" import itertools import StringIO from PIL import Image def _AreTheSameSize(images): """Returns whether a set of images are the size size. Args: images: a list of images to compare. Returns: boolean. Raises: Exception: One image or fewer is passed in. """ if len(images) > 1: return all(images[0].size == img.size for img in images[1:]) else: raise Exception('No images passed in.') def _GetDifferenceWithMask(image1, image2, mask=None, masked_color=(225, 225, 225, 255), same_color=(255, 255, 255, 255), different_color=(210, 0, 0, 255)): """Returns an image representing the difference between the two images. This function computes the difference between two images taking into account a mask if it is provided. The final three arguments represent the coloration of the generated image. Args: image1: the first image to compare. image2: the second image to compare. mask: an optional mask image consisting of only black and white pixels where white pixels indicate the portion of the image to be masked out. masked_color: the color of a masked section in the resulting image. same_color: the color of an unmasked section that is the same. between images 1 and 2 in the resulting image. different_color: the color of an unmasked section that is different between images 1 and 2 in the resulting image. Returns: A 2-tuple with an image representing the unmasked difference between the two input images and the number of different pixels. Raises: Exception: if image1, image2, and mask are not the same size. """ image_mask = mask if not mask: image_mask = Image.new('RGBA', image1.size, (0, 0, 0, 255)) if not _AreTheSameSize([image1, image2, image_mask]): raise Exception('images and mask must be the same size.') image_diff = Image.new('RGBA', image1.size, (0, 0, 0, 255)) data = [] diff_pixels = 0 for m, px1, px2 in itertools.izip(image_mask.getdata(), image1.getdata(), image2.getdata()): if m == (255, 255, 255, 255): data.append(masked_color) elif px1 == px2: data.append(same_color) else: data.append(different_color) diff_pixels += 1 image_diff.putdata(data) return (image_diff, diff_pixels) def CreateMask(images): """Computes a mask for a set of images. Returns a difference mask that is computed from the images which are passed in. The mask will have a white pixel anywhere that the input images differ and a black pixel everywhere else. Args: images: list of images to compute the mask from. Returns: an image of only black and white pixels where white pixels represent areas in the input images that have differences. Raises: Exception: if the images passed in are not of the same size. Exception: if fewer than one image is passed in. """ if not images: raise Exception('mask must be created from one or more images.') mask = Image.new('RGBA', images[0].size, (0, 0, 0, 255)) image = images[0] for other_image in images[1:]: mask = _GetDifferenceWithMask( image, other_image, mask, masked_color=(255, 255, 255, 255), same_color=(0, 0, 0, 255), different_color=(255, 255, 255, 255))[0] return mask def AddMasks(masks): """Combines a list of mask images into one mask image. Args: masks: a list of mask-images. Returns: a new mask that represents the sum of the masked regions of the passed in list of mask-images. Raises: Exception: if masks is an empty list, or if masks are not the same size. """ if not masks: raise Exception('masks must be a list containing at least one image.') if len(masks) > 1 and not _AreTheSameSize(masks): raise Exception('masks in list must be of the same size.') white = (255, 255, 255, 255) black = (0, 0, 0, 255) masks_data = [mask.getdata() for mask in masks] image = Image.new('RGBA', masks[0].size, black) image.putdata([white if white in px_set else black for px_set in itertools.izip(*masks_data)]) return image def ConvertDiffToMask(diff): """Converts a Diff image into a Mask image. Args: diff: the diff image to convert. Returns: a new mask image where everything that was masked or different in the diff is now masked. """ white = (255, 255, 255, 255) black = (0, 0, 0, 255) diff_data = diff.getdata() image = Image.new('RGBA', diff.size, black) image.putdata([black if px == white else white for px in diff_data]) return image def VisualizeImageDifferences(image1, image2, mask=None): """Returns an image repesenting the unmasked differences between two images. Iterates through the pixel values of two images and an optional mask. If the pixel values are the same, or the pixel is masked, (0,0,0) is stored for that pixel. Otherwise, (255,255,255) is stored. This ultimately produces an image where unmasked differences between the two images are white pixels, and everything else is black. Args: image1: an RGB image image2: another RGB image of the same size as image1. mask: an optional RGB image consisting of only white and black pixels where the white pixels represent the parts of the images to be masked out. Returns: A 2-tuple with an image representing the unmasked difference between the two input images and the number of different pixels. Raises: Exception: if the two images and optional mask are different sizes. """ return _GetDifferenceWithMask(image1, image2, mask) def InflateMask(image, passes): """A function that adds layers of pixels around the white edges of a mask. This function evaluates a 'frontier' of valid pixels indices. Initially, this frontier contains all indices in the image. However, with each pass only the pixels' indices which were added to the mask by inflation are added to the next pass's frontier. This gives the algorithm a large upfront cost that scales negligably when the number of passes is increased. Args: image: the RGBA PIL.Image mask to inflate. passes: the number of passes to inflate the image by. Returns: A RGBA PIL.Image. """ inflated = Image.new('RGBA', image.size) new_dataset = list(image.getdata()) old_dataset = list(image.getdata()) frontier = set(range(len(old_dataset))) new_frontier = set() l = [-1, 1] def _ShadeHorizontal(index, px): col = index % image.size[0] if px == (255, 255, 255, 255): for x in l: if 0 <= col + x < image.size[0]: if old_dataset[index + x] != (255, 255, 255, 255): new_frontier.add(index + x) new_dataset[index + x] = (255, 255, 255, 255) def _ShadeVertical(index, px): row = index / image.size[0] if px == (255, 255, 255, 255): for x in l: if 0 <= row + x < image.size[1]: if old_dataset[index + image.size[0] * x] != (255, 255, 255, 255): new_frontier.add(index + image.size[0] * x) new_dataset[index + image.size[0] * x] = (255, 255, 255, 255) for _ in range(passes): for index in frontier: _ShadeHorizontal(index, old_dataset[index]) _ShadeVertical(index, old_dataset[index]) old_dataset, new_dataset = new_dataset, new_dataset frontier, new_frontier = new_frontier, set() inflated.putdata(new_dataset) return inflated def TotalDifferentPixels(image1, image2, mask=None): """Computes the number of different pixels between two images. Args: image1: the first RGB image to be compared. image2: the second RGB image to be compared. mask: an optional RGB image of only black and white pixels where white pixels indicate the parts of the image to be masked out. Returns: the number of differing pixels between the images. Raises: Exception: if the images to be compared and the mask are not the same size. """ image_mask = mask if not mask: image_mask = Image.new('RGBA', image1.size, (0, 0, 0, 255)) if _AreTheSameSize([image1, image2, image_mask]): total_diff = 0 for px1, px2, m in itertools.izip(image1.getdata(), image2.getdata(), image_mask.getdata()): if m == (255, 255, 255, 255): continue elif px1 != px2: total_diff += 1 else: continue return total_diff else: raise Exception('images and mask must be the same size') def SameImage(image1, image2, mask=None): """Returns a boolean representing whether the images are the same. Returns a boolean indicating whether two images are similar enough to be considered the same. Essentially wraps the TotalDifferentPixels function. Args: image1: an RGB image to compare. image2: an RGB image to compare. mask: an optional image of only black and white pixels where white pixels are masked out Returns: True if the images are similar, False otherwise. Raises: Exception: if the images (and mask) are different sizes. """ different_pixels = TotalDifferentPixels(image1, image2, mask) return different_pixels == 0 def EncodePNG(image): """Returns the PNG file-contents of the image. Args: image: an RGB image to be encoded. Returns: a base64 encoded string representing the image. """ f = StringIO.StringIO() image.save(f, 'PNG') encoded_image = f.getvalue() f.close() return encoded_image def DecodePNG(png): """Returns a RGB image from PNG file-contents. Args: encoded_image: PNG file-contents of an RGB image. Returns: an RGB image """ return Image.open(StringIO.StringIO(png))
bsd-3-clause
fbradyirl/home-assistant
homeassistant/components/smappee/sensor.py
1
8496
"""Support for monitoring a Smappee energy sensor.""" from datetime import timedelta import logging from homeassistant.const import ENERGY_KILO_WATT_HOUR, POWER_WATT from homeassistant.helpers.entity import Entity from . import DATA_SMAPPEE _LOGGER = logging.getLogger(__name__) SENSOR_PREFIX = "Smappee" SENSOR_TYPES = { "solar": ["Solar", "mdi:white-balance-sunny", "local", POWER_WATT, "solar"], "active_power": [ "Active Power", "mdi:power-plug", "local", POWER_WATT, "active_power", ], "current": ["Current", "mdi:gauge", "local", "A", "current"], "voltage": ["Voltage", "mdi:gauge", "local", "V", "voltage"], "active_cosfi": ["Power Factor", "mdi:gauge", "local", "%", "active_cosfi"], "alwayson_today": [ "Always On Today", "mdi:gauge", "remote", ENERGY_KILO_WATT_HOUR, "alwaysOn", ], "solar_today": [ "Solar Today", "mdi:white-balance-sunny", "remote", ENERGY_KILO_WATT_HOUR, "solar", ], "power_today": [ "Power Today", "mdi:power-plug", "remote", ENERGY_KILO_WATT_HOUR, "consumption", ], "water_sensor_1": ["Water Sensor 1", "mdi:water", "water", "m3", "value1"], "water_sensor_2": ["Water Sensor 2", "mdi:water", "water", "m3", "value2"], "water_sensor_temperature": [ "Water Sensor Temperature", "mdi:temperature-celsius", "water", "°", "temperature", ], "water_sensor_humidity": [ "Water Sensor Humidity", "mdi:water-percent", "water", "%", "humidity", ], "water_sensor_battery": [ "Water Sensor Battery", "mdi:battery", "water", "%", "battery", ], } SCAN_INTERVAL = timedelta(seconds=30) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Smappee sensor.""" smappee = hass.data[DATA_SMAPPEE] dev = [] if smappee.is_remote_active: for location_id in smappee.locations.keys(): for sensor in SENSOR_TYPES: if "remote" in SENSOR_TYPES[sensor]: dev.append( SmappeeSensor( smappee, location_id, sensor, SENSOR_TYPES[sensor] ) ) elif "water" in SENSOR_TYPES[sensor]: for items in smappee.info[location_id].get("sensors"): dev.append( SmappeeSensor( smappee, location_id, "{}:{}".format(sensor, items.get("id")), SENSOR_TYPES[sensor], ) ) if smappee.is_local_active: for location_id in smappee.locations.keys(): for sensor in SENSOR_TYPES: if "local" in SENSOR_TYPES[sensor]: if smappee.is_remote_active: dev.append( SmappeeSensor( smappee, location_id, sensor, SENSOR_TYPES[sensor] ) ) else: dev.append( SmappeeSensor(smappee, None, sensor, SENSOR_TYPES[sensor]) ) add_entities(dev, True) class SmappeeSensor(Entity): """Implementation of a Smappee sensor.""" def __init__(self, smappee, location_id, sensor, attributes): """Initialize the Smappee sensor.""" self._smappee = smappee self._location_id = location_id self._attributes = attributes self._sensor = sensor self.data = None self._state = None self._name = self._attributes[0] self._icon = self._attributes[1] self._type = self._attributes[2] self._unit_of_measurement = self._attributes[3] self._smappe_name = self._attributes[4] @property def name(self): """Return the name of the sensor.""" if self._location_id: location_name = self._smappee.locations[self._location_id] else: location_name = "Local" return "{} {} {}".format(SENSOR_PREFIX, location_name, self._name) @property def icon(self): """Icon to use in the frontend.""" return self._icon @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement of this entity, if any.""" return self._unit_of_measurement @property def device_state_attributes(self): """Return the state attributes of the device.""" attr = {} if self._location_id: attr["Location Id"] = self._location_id attr["Location Name"] = self._smappee.locations[self._location_id] return attr def update(self): """Get the latest data from Smappee and update the state.""" self._smappee.update() if self._sensor in ["alwayson_today", "solar_today", "power_today"]: data = self._smappee.consumption[self._location_id] if data: consumption = data.get("consumptions")[-1] _LOGGER.debug("%s %s", self._sensor, consumption) value = consumption.get(self._smappe_name) self._state = round(value / 1000, 2) elif self._sensor == "active_cosfi": cosfi = self._smappee.active_cosfi() _LOGGER.debug("%s %s", self._sensor, cosfi) if cosfi: self._state = round(cosfi, 2) elif self._sensor == "current": current = self._smappee.active_current() _LOGGER.debug("%s %s", self._sensor, current) if current: self._state = round(current, 2) elif self._sensor == "voltage": voltage = self._smappee.active_voltage() _LOGGER.debug("%s %s", self._sensor, voltage) if voltage: self._state = round(voltage, 3) elif self._sensor == "active_power": data = self._smappee.instantaneous _LOGGER.debug("%s %s", self._sensor, data) if data: value1 = [ float(i["value"]) for i in data if i["key"].endswith("phase0ActivePower") ] value2 = [ float(i["value"]) for i in data if i["key"].endswith("phase1ActivePower") ] value3 = [ float(i["value"]) for i in data if i["key"].endswith("phase2ActivePower") ] active_power = sum(value1 + value2 + value3) / 1000 self._state = round(active_power, 2) elif self._sensor == "solar": data = self._smappee.instantaneous _LOGGER.debug("%s %s", self._sensor, data) if data: value1 = [ float(i["value"]) for i in data if i["key"].endswith("phase3ActivePower") ] value2 = [ float(i["value"]) for i in data if i["key"].endswith("phase4ActivePower") ] value3 = [ float(i["value"]) for i in data if i["key"].endswith("phase5ActivePower") ] power = sum(value1 + value2 + value3) / 1000 self._state = round(power, 2) elif self._type == "water": sensor_name, sensor_id = self._sensor.split(":") data = self._smappee.sensor_consumption[self._location_id].get( int(sensor_id) ) if data: tempdata = data.get("records") if tempdata: consumption = tempdata[-1] _LOGGER.debug("%s (%s) %s", sensor_name, sensor_id, consumption) value = consumption.get(self._smappe_name) self._state = value
apache-2.0
allmightyspiff/softlayer-python
SoftLayer/CLI/object_storage/credential/__init__.py
3
1288
"""Manages Object Storage S3 Credentials.""" # :license: MIT, see LICENSE for more details. import importlib import os import click CONTEXT = {'help_option_names': ['-h', '--help'], 'max_content_width': 999} class CapacityCommands(click.MultiCommand): """Loads module for object storage S3 credentials related commands.""" def __init__(self, **attrs): click.MultiCommand.__init__(self, **attrs) self.path = os.path.dirname(__file__) def list_commands(self, ctx): """List all sub-commands.""" commands = [] for filename in os.listdir(self.path): if filename == '__init__.py': continue if filename.endswith('.py'): commands.append(filename[:-3].replace("_", "-")) commands.sort() return commands def get_command(self, ctx, cmd_name): """Get command for click.""" path = "%s.%s" % (__name__, cmd_name) path = path.replace("-", "_") module = importlib.import_module(path) return getattr(module, 'cli') # Required to get the sub-sub-sub command to work. @click.group(cls=CapacityCommands, context_settings=CONTEXT) def cli(): """Base command for all object storage credentials S3 related concerns"""
mit
mganeva/mantid
Framework/PythonInterface/mantid/plots/modest_image/modest_image.py
1
10141
# v0.2 obtained on March 12, 2019 """ Modification of Chris Beaumont's mpl-modest-image package to allow the use of set_extent. """ from __future__ import print_function, division import matplotlib rcParams = matplotlib.rcParams import matplotlib.image as mi import matplotlib.colors as mcolors import matplotlib.cbook as cbook from matplotlib.transforms import IdentityTransform, Affine2D import numpy as np IDENTITY_TRANSFORM = IdentityTransform() class ModestImage(mi.AxesImage): """ Computationally modest image class. ModestImage is an extension of the Matplotlib AxesImage class better suited for the interactive display of larger images. Before drawing, ModestImage resamples the data array based on the screen resolution and view window. This has very little affect on the appearance of the image, but can substantially cut down on computation since calculations of unresolved or clipped pixels are skipped. The interface of ModestImage is the same as AxesImage. However, it does not currently support setting the 'extent' property. There may also be weird coordinate warping operations for images that I'm not aware of. Don't expect those to work either. """ def __init__(self, *args, **kwargs): self._full_res = None self._full_extent = kwargs.get('extent', None) super(ModestImage, self).__init__(*args, **kwargs) self.invalidate_cache() def set_data(self, A): """ Set the image array ACCEPTS: numpy/PIL Image A """ self._full_res = A self._A = A if self._A.dtype != np.uint8 and not np.can_cast(self._A.dtype, np.float): raise TypeError("Image data can not convert to float") if (self._A.ndim not in (2, 3) or (self._A.ndim == 3 and self._A.shape[-1] not in (3, 4))): raise TypeError("Invalid dimensions for image data") self.invalidate_cache() def invalidate_cache(self): self._bounds = None self._imcache = None self._rgbacache = None self._oldxslice = None self._oldyslice = None self._sx, self._sy = None, None self._pixel2world_cache = None self._world2pixel_cache = None def set_extent(self, extent): self._full_extent = extent self.invalidate_cache() mi.AxesImage.set_extent(self, extent) def get_array(self): """Override to return the full-resolution array""" return self._full_res @property def _pixel2world(self): if self._pixel2world_cache is None: # Pre-compute affine transforms to convert between the 'world' # coordinates of the axes (what is shown by the axis labels) to # 'pixel' coordinates in the underlying array. extent = self._full_extent if extent is None: self._pixel2world_cache = IDENTITY_TRANSFORM else: self._pixel2world_cache = Affine2D() self._pixel2world.translate(+0.5, +0.5) self._pixel2world.scale((extent[1] - extent[0]) / self._full_res.shape[1], (extent[3] - extent[2]) / self._full_res.shape[0]) self._pixel2world.translate(extent[0], extent[2]) self._world2pixel_cache = None return self._pixel2world_cache @property def _world2pixel(self): if self._world2pixel_cache is None: self._world2pixel_cache = self._pixel2world.inverted() return self._world2pixel_cache def _scale_to_res(self): """ Change self._A and _extent to render an image whose resolution is matched to the eventual rendering. """ # Find out how we need to slice the array to make sure we match the # resolution of the display. We pass self._world2pixel which matters # for cases where the extent has been set. x0, x1, sx, y0, y1, sy = extract_matched_slices(axes=self.axes, shape=self._full_res.shape, transform=self._world2pixel) # Check whether we've already calculated what we need, and if so just # return without doing anything further. if (self._bounds is not None and sx >= self._sx and sy >= self._sy and x0 >= self._bounds[0] and x1 <= self._bounds[1] and y0 >= self._bounds[2] and y1 <= self._bounds[3]): return # Slice the array using the slices determined previously to optimally # match the display self._A = self._full_res[y0:y1:sy, x0:x1:sx] self._A = cbook.safe_masked_invalid(self._A) # We now determine the extent of the subset of the image, by determining # it first in pixel space, and converting it to the 'world' coordinates. # See https://github.com/matplotlib/matplotlib/issues/8693 for a # demonstration of why origin='upper' and extent=None needs to be # special-cased. if self.origin == 'upper' and self._full_extent is None: xmin, xmax, ymin, ymax = x0 - .5, x1 - .5, y1 - .5, y0 - .5 else: xmin, xmax, ymin, ymax = x0 - .5, x1 - .5, y0 - .5, y1 - .5 xmin, ymin, xmax, ymax = self._pixel2world.transform([(xmin, ymin), (xmax, ymax)]).ravel() mi.AxesImage.set_extent(self, [xmin, xmax, ymin, ymax]) # self.set_extent([xmin, xmax, ymin, ymax]) # Finally, we cache the current settings to avoid re-computing similar # arrays in future. self._sx = sx self._sy = sy self._bounds = (x0, x1, y0, y1) self.changed() def draw(self, renderer, *args, **kwargs): if self._full_res.shape is None: return self._scale_to_res() super(ModestImage, self).draw(renderer, *args, **kwargs) def main(): from time import time import matplotlib.pyplot as plt x, y = np.mgrid[0:2000, 0:2000] data = np.sin(x / 10.) * np.cos(y / 30.) f = plt.figure() ax = f.add_subplot(111) # try switching between artist = ModestImage(ax, data=data) ax.set_aspect('equal') artist.norm.vmin = -1 artist.norm.vmax = 1 ax.add_artist(artist) t0 = time() plt.gcf().canvas.draw() t1 = time() print("Draw time for %s: %0.1f ms" % (artist.__class__.__name__, (t1 - t0) * 1000)) plt.show() def imshow(axes, X, cmap=None, norm=None, aspect=None, interpolation=None, alpha=None, vmin=None, vmax=None, origin=None, extent=None, shape=None, filternorm=1, filterrad=4.0, imlim=None, resample=None, url=None, **kwargs): """Similar to matplotlib's imshow command, but produces a ModestImage Unlike matplotlib version, must explicitly specify axes """ if not axes._hold: axes.cla() if norm is not None: assert(isinstance(norm, mcolors.Normalize)) if aspect is None: aspect = rcParams['image.aspect'] axes.set_aspect(aspect) im = ModestImage(axes, cmap=cmap, norm=norm, interpolation=interpolation, origin=origin, extent=extent, filternorm=filternorm, filterrad=filterrad, resample=resample, **kwargs) im.set_data(X) im.set_alpha(alpha) axes._set_artist_props(im) if im.get_clip_path() is None: # image does not already have clipping set, clip to axes patch im.set_clip_path(axes.patch) # if norm is None and shape is None: # im.set_clim(vmin, vmax) if vmin is not None or vmax is not None: im.set_clim(vmin, vmax) elif norm is None: im.autoscale_None() im.set_url(url) # update ax.dataLim, and, if autoscaling, set viewLim # to tightly fit the image, regardless of dataLim. im.set_extent(im.get_extent()) axes.images.append(im) im._remove_method = lambda h: axes.images.remove(h) return im def extract_matched_slices(axes=None, shape=None, extent=None, transform=IDENTITY_TRANSFORM): """Determine the slice parameters to use, matched to the screen. :param ax: Axes object to query. It's extent and pixel size determine the slice parameters :param shape: Tuple of the full image shape to slice into. Upper boundaries for slices will be cropped to fit within this shape. :rtype: tulpe of x0, x1, sx, y0, y1, sy Indexing the full resolution array as array[y0:y1:sy, x0:x1:sx] returns a view well-matched to the axes' resolution and extent """ # Find extent in display pixels (this gives the resolution we need # to sample the array to) ext = (axes.transAxes.transform([(1, 1)]) - axes.transAxes.transform([(0, 0)]))[0] # Find the extent of the axes in 'world' coordinates xlim, ylim = axes.get_xlim(), axes.get_ylim() # Transform the limits to pixel coordinates ind0 = transform.transform([min(xlim), min(ylim)]) ind1 = transform.transform([max(xlim), max(ylim)]) def _clip(val, lo, hi): return int(max(min(val, hi), lo)) # Determine the range of pixels to extract from the array, including a 5 # pixel margin all around. We ensure that the shape of the resulting array # will always be at least (1, 1) even if there is really no overlap, to # avoid issues. y0 = _clip(ind0[1] - 5, 0, shape[0] - 1) y1 = _clip(ind1[1] + 5, 1, shape[0]) x0 = _clip(ind0[0] - 5, 0, shape[1] - 1) x1 = _clip(ind1[0] + 5, 1, shape[1]) # Determine the strides that can be used when extracting the array sy = int(max(1, min((y1 - y0) / 5., np.ceil(abs((ind1[1] - ind0[1]) / ext[1]))))) sx = int(max(1, min((x1 - x0) / 5., np.ceil(abs((ind1[0] - ind0[0]) / ext[0]))))) return x0, x1, sx, y0, y1, sy if __name__ == "__main__": main()
gpl-3.0
robiame/AndroidGeodata
pil/GimpGradientFile.py
1
3292
# # Python Imaging Library # $Id$ # # stuff to read (and render) GIMP gradient files # # History: # 97-08-23 fl Created # # Copyright (c) Secret Labs AB 1997. # Copyright (c) Fredrik Lundh 1997. # # See the README file for information on usage and redistribution. # from math import pi, log, sin, sqrt import string # -------------------------------------------------------------------- # Stuff to translate curve segments to palette values (derived from # the corresponding code in GIMP, written by Federico Mena Quintero. # See the GIMP distribution for more information.) # EPSILON = 1e-10 def linear(middle, pos): if pos <= middle: if middle < EPSILON: return 0.0 else: return 0.5 * pos / middle else: pos = pos - middle middle = 1.0 - middle if middle < EPSILON: return 1.0 else: return 0.5 + 0.5 * pos / middle def curved(middle, pos): return pos ** (log(0.5) / log(max(middle, EPSILON))) def sine(middle, pos): return (sin((-pi / 2.0) + pi * linear(middle, pos)) + 1.0) / 2.0 def sphere_increasing(middle, pos): return sqrt(1.0 - (linear(middle, pos) - 1.0) ** 2) def sphere_decreasing(middle, pos): return 1.0 - sqrt(1.0 - linear(middle, pos) ** 2) SEGMENTS = [ linear, curved, sine, sphere_increasing, sphere_decreasing ] class GradientFile: gradient = None def getpalette(self, entries = 256): palette = [] ix = 0 x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix] for i in range(entries): x = i / float(entries-1) while x1 < x: ix = ix + 1 x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix] w = x1 - x0 if w < EPSILON: scale = segment(0.5, 0.5) else: scale = segment((xm - x0) / w, (x - x0) / w) # expand to RGBA r = chr(int(255 * ((rgb1[0] - rgb0[0]) * scale + rgb0[0]) + 0.5)) g = chr(int(255 * ((rgb1[1] - rgb0[1]) * scale + rgb0[1]) + 0.5)) b = chr(int(255 * ((rgb1[2] - rgb0[2]) * scale + rgb0[2]) + 0.5)) a = chr(int(255 * ((rgb1[3] - rgb0[3]) * scale + rgb0[3]) + 0.5)) # add to palette palette.append(r + g + b + a) return string.join(palette, ""), "RGBA" ## # File handler for GIMP's gradient format. class GimpGradientFile(GradientFile): def __init__(self, fp): if fp.readline()[:13] != "GIMP Gradient": raise SyntaxError, "not a GIMP gradient file" count = int(fp.readline()) gradient = [] for i in range(count): s = string.split(fp.readline()) w = map(float, s[:11]) x0, x1 = w[0], w[2] xm = w[1] rgb0 = w[3:7] rgb1 = w[7:11] segment = SEGMENTS[int(s[11])] cspace = int(s[12]) if cspace != 0: raise IOError, "cannot handle HSV colour space" gradient.append((x0, x1, xm, rgb0, rgb1, segment)) self.gradient = gradient
mit
nguyeho7/CZ_NER
src/webservice/server.py
1
1188
from flask import Flask from flask import request, render_template, jsonify from src.common.NER_utils import transform_dataset_web from src.CRF_NER.CRF_NER import parse_commands import pycrfsuite model = "1_nbr" def init(filename = "model.txt"): with open(filename) as f: line = f.read() tokens = line.strip().split(' ') label = tokens[0] params = tokens[1:] return label, params _, params = init() app = Flask(__name__) def wrap_text(tag, token): if tag == "O": return token return "<{} {}>".format(tag, token) @app.route("/") def my_form(): return render_template("my-form.html") @app.route("/annotate", methods=['POST', 'GET']) def my_for_post(): text = request.args.get('sentence', 0, type=str) tagger = pycrfsuite.Tagger() tagger.open(model+".crfmodel") features, tokens = transform_dataset_web(text, params, merge = "supertype") print(features) predictions = tagger.tag(features) print(predictions) tagger.close() output = " ".join(wrap_text(tag, token) for tag, token in zip(predictions, tokens)) return jsonify(result=output) if __name__ == "__main__": app.run()
mit
jaymiejones86/jaymiejones.com
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/lexers/_scilab_builtins.py
364
31261
# -*- coding: utf-8 -*- """ pygments.lexers._scilab_builtins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Builtin list for the ScilabLexer. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ # These lists are generated automatically. # Run the following in a Scilab script: # # varType=["functions", "commands", "macros", "variables" ]; # fd = mopen('list.txt','wt'); # # for j=1:size(varType,"*") # myStr=""; # a=completion("",varType(j)); # myStr=varType(j)+"_kw = ["; # for i=1:size(a,"*") # myStr = myStr + """" + a(i) + """"; # if size(a,"*") <> i then # myStr = myStr + ","; end # end # myStr = myStr + "]"; # mputl(myStr,fd); # end # mclose(fd); # # Then replace "$" by "\\$" manually. functions_kw = ["%XMLAttr_6","%XMLAttr_e","%XMLAttr_i_XMLElem","%XMLAttr_length","%XMLAttr_p","%XMLAttr_size","%XMLDoc_6","%XMLDoc_e","%XMLDoc_i_XMLList","%XMLDoc_p","%XMLElem_6","%XMLElem_e","%XMLElem_i_XMLDoc","%XMLElem_i_XMLElem","%XMLElem_i_XMLList","%XMLElem_p","%XMLList_6","%XMLList_e","%XMLList_i_XMLElem","%XMLList_i_XMLList","%XMLList_length","%XMLList_p","%XMLList_size","%XMLNs_6","%XMLNs_e","%XMLNs_i_XMLElem","%XMLNs_p","%XMLSet_6","%XMLSet_e","%XMLSet_length","%XMLSet_p","%XMLSet_size","%XMLValid_p","%b_i_XMLList","%c_i_XMLAttr","%c_i_XMLDoc","%c_i_XMLElem","%c_i_XMLList","%ce_i_XMLList","%fptr_i_XMLList","%h_i_XMLList","%hm_i_XMLList","%i_abs","%i_cumprod","%i_cumsum","%i_diag","%i_i_XMLList","%i_matrix","%i_max","%i_maxi","%i_min","%i_mini","%i_mput","%i_p","%i_prod","%i_sum","%i_tril","%i_triu","%ip_i_XMLList","%l_i_XMLList","%lss_i_XMLList","%mc_i_XMLList","%msp_full","%msp_i_XMLList","%msp_spget","%p_i_XMLList","%ptr_i_XMLList","%r_i_XMLList","%s_i_XMLList","%sp_i_XMLList","%spb_i_XMLList","%st_i_XMLList","Calendar","ClipBoard","Matplot","Matplot1","PlaySound","TCL_DeleteInterp","TCL_DoOneEvent","TCL_EvalFile","TCL_EvalStr","TCL_ExistArray","TCL_ExistInterp","TCL_ExistVar","TCL_GetVar","TCL_GetVersion","TCL_SetVar","TCL_UnsetVar","TCL_UpVar","_","_code2str","_str2code","about","abs","acos","addcb","addf","addhistory","addinter","amell","and","argn","arl2_ius","ascii","asin","atan","backslash","balanc","banner","base2dec","basename","bdiag","beep","besselh","besseli","besselj","besselk","bessely","beta","bezout","bfinit","blkfc1i","blkslvi","bool2s","browsehistory","browsevar","bsplin3val","buildDocv2","buildouttb","bvode","c_link","calerf","call","callblk","captions","cd","cdfbet","cdfbin","cdfchi","cdfchn","cdff","cdffnc","cdfgam","cdfnbn","cdfnor","cdfpoi","cdft","ceil","champ","champ1","chdir","chol","clc","clean","clear","clear_pixmap","clearfun","clearglobal","closeEditor","closeXcos","code2str","coeff","comp","completion","conj","contour2di","contr","conv2","convstr","copy","copyfile","corr","cos","coserror","createdir","cshep2d","ctree2","ctree3","ctree4","cumprod","cumsum","curblock","curblockc","dasrt","dassl","data2sig","debug","dec2base","deff","definedfields","degree","delbpt","delete","deletefile","delip","delmenu","det","dgettext","dhinf","diag","diary","diffobjs","disp","dispbpt","displayhistory","disposefftwlibrary","dlgamma","dnaupd","dneupd","double","draw","drawaxis","drawlater","drawnow","dsaupd","dsearch","dseupd","duplicate","editor","editvar","emptystr","end_scicosim","ereduc","errcatch","errclear","error","eval_cshep2d","exec","execstr","exists","exit","exp","expm","exportUI","export_to_hdf5","eye","fadj2sp","fec","feval","fft","fftw","fftw_flags","fftw_forget_wisdom","fftwlibraryisloaded","file","filebrowser","fileext","fileinfo","fileparts","filesep","find","findBD","findfiles","floor","format","fort","fprintfMat","freq","frexp","fromc","fromjava","fscanfMat","fsolve","fstair","full","fullpath","funcprot","funptr","gamma","gammaln","geom3d","get","get_absolute_file_path","get_fftw_wisdom","getblocklabel","getcallbackobject","getdate","getdebuginfo","getdefaultlanguage","getdrives","getdynlibext","getenv","getfield","gethistory","gethistoryfile","getinstalledlookandfeels","getio","getlanguage","getlongpathname","getlookandfeel","getmd5","getmemory","getmodules","getos","getpid","getrelativefilename","getscicosvars","getscilabmode","getshortpathname","gettext","getvariablesonstack","getversion","glist","global","glue","grand","grayplot","grep","gsort","gstacksize","havewindow","helpbrowser","hess","hinf","historymanager","historysize","host","iconvert","iconvert","ieee","ilib_verbose","imag","impl","import_from_hdf5","imult","inpnvi","int","int16","int2d","int32","int3d","int8","interp","interp2d","interp3d","intg","intppty","inttype","inv","is_handle_valid","isalphanum","isascii","isdef","isdigit","isdir","isequal","isequalbitwise","iserror","isfile","isglobal","isletter","isreal","iswaitingforinput","javaclasspath","javalibrarypath","kron","lasterror","ldiv","ldivf","legendre","length","lib","librarieslist","libraryinfo","linear_interpn","lines","link","linmeq","list","load","loadScicos","loadfftwlibrary","loadhistory","log","log1p","lsq","lsq_splin","lsqrsolve","lsslist","lstcat","lstsize","ltitr","lu","ludel","lufact","luget","lusolve","macr2lst","macr2tree","matfile_close","matfile_listvar","matfile_open","matfile_varreadnext","matfile_varwrite","matrix","max","maxfiles","mclearerr","mclose","meof","merror","messagebox","mfprintf","mfscanf","mget","mgeti","mgetl","mgetstr","min","mlist","mode","model2blk","mopen","move","movefile","mprintf","mput","mputl","mputstr","mscanf","mseek","msprintf","msscanf","mtell","mtlb_mode","mtlb_sparse","mucomp","mulf","nearfloat","newaxes","newest","newfun","nnz","notify","number_properties","ode","odedc","ones","opentk","optim","or","ordmmd","parallel_concurrency","parallel_run","param3d","param3d1","part","pathconvert","pathsep","phase_simulation","plot2d","plot2d1","plot2d2","plot2d3","plot2d4","plot3d","plot3d1","pointer_xproperty","poly","ppol","pppdiv","predef","print","printf","printfigure","printsetupbox","prod","progressionbar","prompt","pwd","qld","qp_solve","qr","raise_window","rand","rankqr","rat","rcond","rdivf","read","read4b","readb","readgateway","readmps","real","realtime","realtimeinit","regexp","relocate_handle","remez","removedir","removelinehistory","res_with_prec","resethistory","residu","resume","return","ricc","ricc_old","rlist","roots","rotate_axes","round","rpem","rtitr","rubberbox","save","saveafterncommands","saveconsecutivecommands","savehistory","schur","sci_haltscicos","sci_tree2","sci_tree3","sci_tree4","sciargs","scicos_debug","scicos_debug_count","scicos_time","scicosim","scinotes","sctree","semidef","set","set_blockerror","set_fftw_wisdom","set_xproperty","setbpt","setdefaultlanguage","setenv","setfield","sethistoryfile","setlanguage","setlookandfeel","setmenu","sfact","sfinit","show_pixmap","show_window","showalluimenushandles","sident","sig2data","sign","simp","simp_mode","sin","size","slash","sleep","sorder","sparse","spchol","spcompack","spec","spget","splin","splin2d","splin3d","spones","sprintf","sqrt","stacksize","str2code","strcat","strchr","strcmp","strcspn","strindex","string","stringbox","stripblanks","strncpy","strrchr","strrev","strsplit","strspn","strstr","strsubst","strtod","strtok","subf","sum","svd","swap_handles","symfcti","syredi","system_getproperty","system_setproperty","ta2lpd","tan","taucs_chdel","taucs_chfact","taucs_chget","taucs_chinfo","taucs_chsolve","tempname","testmatrix","timer","tlist","tohome","tokens","toolbar","toprint","tr_zer","tril","triu","type","typename","uiDisplayTree","uicontextmenu","uicontrol","uigetcolor","uigetdir","uigetfile","uigetfont","uimenu","uint16","uint32","uint8","uipopup","uiputfile","uiwait","ulink","umf_ludel","umf_lufact","umf_luget","umf_luinfo","umf_lusolve","umfpack","unglue","unix","unsetmenu","unzoom","updatebrowsevar","usecanvas","user","var2vec","varn","vec2var","waitbar","warnBlockByUID","warning","what","where","whereis","who","winsid","with_embedded_jre","with_module","writb","write","write4b","x_choose","x_choose_modeless","x_dialog","x_mdialog","xarc","xarcs","xarrows","xchange","xchoicesi","xclick","xcos","xcosAddToolsMenu","xcosConfigureXmlFile","xcosDiagramToScilab","xcosPalCategoryAdd","xcosPalDelete","xcosPalDisable","xcosPalEnable","xcosPalGenerateIcon","xcosPalLoad","xcosPalMove","xcosUpdateBlock","xdel","xfarc","xfarcs","xfpoly","xfpolys","xfrect","xget","xgetech","xgetmouse","xgraduate","xgrid","xlfont","xls_open","xls_read","xmlAddNs","xmlAsNumber","xmlAsText","xmlDTD","xmlDelete","xmlDocument","xmlDump","xmlElement","xmlFormat","xmlGetNsByHref","xmlGetNsByPrefix","xmlGetOpenDocs","xmlIsValidObject","xmlNs","xmlRead","xmlReadStr","xmlRelaxNG","xmlRemove","xmlSchema","xmlSetAttributes","xmlValidate","xmlWrite","xmlXPath","xname","xpause","xpoly","xpolys","xrect","xrects","xs2bmp","xs2eps","xs2gif","xs2jpg","xs2pdf","xs2png","xs2ppm","xs2ps","xs2svg","xsegs","xset","xsetech","xstring","xstringb","xtitle","zeros","znaupd","zneupd","zoom_rect"] commands_kw = ["abort","apropos","break","case","catch","clc","clear","continue","do","else","elseif","end","endfunction","exit","for","function","help","if","pause","pwd","quit","resume","return","select","then","try","what","while","who"] macros_kw = ["%0_i_st","%3d_i_h","%Block_xcosUpdateBlock","%TNELDER_p","%TNELDER_string","%TNMPLOT_p","%TNMPLOT_string","%TOPTIM_p","%TOPTIM_string","%TSIMPLEX_p","%TSIMPLEX_string","%_gsort","%_strsplit","%ar_p","%asn","%b_a_b","%b_a_s","%b_c_s","%b_c_spb","%b_cumprod","%b_cumsum","%b_d_s","%b_diag","%b_e","%b_f_s","%b_f_spb","%b_g_s","%b_g_spb","%b_h_s","%b_h_spb","%b_i_b","%b_i_ce","%b_i_h","%b_i_hm","%b_i_s","%b_i_sp","%b_i_spb","%b_i_st","%b_iconvert","%b_l_b","%b_l_s","%b_m_b","%b_m_s","%b_matrix","%b_n_hm","%b_o_hm","%b_p_s","%b_prod","%b_r_b","%b_r_s","%b_s_b","%b_s_s","%b_string","%b_sum","%b_tril","%b_triu","%b_x_b","%b_x_s","%c_a_c","%c_b_c","%c_b_s","%c_diag","%c_e","%c_eye","%c_f_s","%c_i_c","%c_i_ce","%c_i_h","%c_i_hm","%c_i_lss","%c_i_r","%c_i_s","%c_i_st","%c_matrix","%c_n_l","%c_n_st","%c_o_l","%c_o_st","%c_ones","%c_rand","%c_tril","%c_triu","%cblock_c_cblock","%cblock_c_s","%cblock_e","%cblock_f_cblock","%cblock_p","%cblock_size","%ce_6","%ce_c_ce","%ce_e","%ce_f_ce","%ce_i_ce","%ce_i_s","%ce_i_st","%ce_matrix","%ce_p","%ce_size","%ce_string","%ce_t","%champdat_i_h","%choose","%diagram_xcos","%dir_p","%fptr_i_st","%grayplot_i_h","%h_i_st","%hm_1_hm","%hm_1_s","%hm_2_hm","%hm_2_s","%hm_3_hm","%hm_3_s","%hm_4_hm","%hm_4_s","%hm_5","%hm_a_hm","%hm_a_r","%hm_a_s","%hm_abs","%hm_and","%hm_bool2s","%hm_c_hm","%hm_ceil","%hm_conj","%hm_cos","%hm_cumprod","%hm_cumsum","%hm_d_hm","%hm_d_s","%hm_degree","%hm_e","%hm_exp","%hm_f_hm","%hm_fft","%hm_find","%hm_floor","%hm_g_hm","%hm_h_hm","%hm_i_b","%hm_i_ce","%hm_i_hm","%hm_i_i","%hm_i_p","%hm_i_r","%hm_i_s","%hm_i_st","%hm_iconvert","%hm_imag","%hm_int","%hm_isnan","%hm_isreal","%hm_j_hm","%hm_j_s","%hm_k_hm","%hm_k_s","%hm_log","%hm_m_p","%hm_m_r","%hm_m_s","%hm_matrix","%hm_maxi","%hm_mean","%hm_median","%hm_mini","%hm_n_b","%hm_n_c","%hm_n_hm","%hm_n_i","%hm_n_p","%hm_n_s","%hm_o_b","%hm_o_c","%hm_o_hm","%hm_o_i","%hm_o_p","%hm_o_s","%hm_ones","%hm_or","%hm_p","%hm_prod","%hm_q_hm","%hm_r_s","%hm_rand","%hm_real","%hm_round","%hm_s","%hm_s_hm","%hm_s_r","%hm_s_s","%hm_sign","%hm_sin","%hm_size","%hm_sqrt","%hm_st_deviation","%hm_string","%hm_sum","%hm_x_hm","%hm_x_p","%hm_x_s","%hm_zeros","%i_1_s","%i_2_s","%i_3_s","%i_4_s","%i_Matplot","%i_a_i","%i_a_s","%i_and","%i_ascii","%i_b_s","%i_bezout","%i_champ","%i_champ1","%i_contour","%i_contour2d","%i_d_i","%i_d_s","%i_e","%i_fft","%i_g_i","%i_gcd","%i_h_i","%i_i_ce","%i_i_h","%i_i_hm","%i_i_i","%i_i_s","%i_i_st","%i_j_i","%i_j_s","%i_l_s","%i_lcm","%i_length","%i_m_i","%i_m_s","%i_mfprintf","%i_mprintf","%i_msprintf","%i_n_s","%i_o_s","%i_or","%i_p_i","%i_p_s","%i_plot2d","%i_plot2d1","%i_plot2d2","%i_q_s","%i_r_i","%i_r_s","%i_round","%i_s_i","%i_s_s","%i_sign","%i_string","%i_x_i","%i_x_s","%ip_a_s","%ip_i_st","%ip_m_s","%ip_n_ip","%ip_o_ip","%ip_p","%ip_s_s","%ip_string","%k","%l_i_h","%l_i_s","%l_i_st","%l_isequal","%l_n_c","%l_n_l","%l_n_m","%l_n_p","%l_n_s","%l_n_st","%l_o_c","%l_o_l","%l_o_m","%l_o_p","%l_o_s","%l_o_st","%lss_a_lss","%lss_a_p","%lss_a_r","%lss_a_s","%lss_c_lss","%lss_c_p","%lss_c_r","%lss_c_s","%lss_e","%lss_eye","%lss_f_lss","%lss_f_p","%lss_f_r","%lss_f_s","%lss_i_ce","%lss_i_lss","%lss_i_p","%lss_i_r","%lss_i_s","%lss_i_st","%lss_inv","%lss_l_lss","%lss_l_p","%lss_l_r","%lss_l_s","%lss_m_lss","%lss_m_p","%lss_m_r","%lss_m_s","%lss_n_lss","%lss_n_p","%lss_n_r","%lss_n_s","%lss_norm","%lss_o_lss","%lss_o_p","%lss_o_r","%lss_o_s","%lss_ones","%lss_r_lss","%lss_r_p","%lss_r_r","%lss_r_s","%lss_rand","%lss_s","%lss_s_lss","%lss_s_p","%lss_s_r","%lss_s_s","%lss_size","%lss_t","%lss_v_lss","%lss_v_p","%lss_v_r","%lss_v_s","%lt_i_s","%m_n_l","%m_o_l","%mc_i_h","%mc_i_s","%mc_i_st","%mc_n_st","%mc_o_st","%mc_string","%mps_p","%mps_string","%msp_a_s","%msp_abs","%msp_e","%msp_find","%msp_i_s","%msp_i_st","%msp_length","%msp_m_s","%msp_maxi","%msp_n_msp","%msp_nnz","%msp_o_msp","%msp_p","%msp_sparse","%msp_spones","%msp_t","%p_a_lss","%p_a_r","%p_c_lss","%p_c_r","%p_cumprod","%p_cumsum","%p_d_p","%p_d_r","%p_d_s","%p_det","%p_e","%p_f_lss","%p_f_r","%p_i_ce","%p_i_h","%p_i_hm","%p_i_lss","%p_i_p","%p_i_r","%p_i_s","%p_i_st","%p_inv","%p_j_s","%p_k_p","%p_k_r","%p_k_s","%p_l_lss","%p_l_p","%p_l_r","%p_l_s","%p_m_hm","%p_m_lss","%p_m_r","%p_matrix","%p_n_l","%p_n_lss","%p_n_r","%p_o_l","%p_o_lss","%p_o_r","%p_o_sp","%p_p_s","%p_prod","%p_q_p","%p_q_r","%p_q_s","%p_r_lss","%p_r_p","%p_r_r","%p_r_s","%p_s_lss","%p_s_r","%p_simp","%p_string","%p_sum","%p_v_lss","%p_v_p","%p_v_r","%p_v_s","%p_x_hm","%p_x_r","%p_y_p","%p_y_r","%p_y_s","%p_z_p","%p_z_r","%p_z_s","%r_a_hm","%r_a_lss","%r_a_p","%r_a_r","%r_a_s","%r_c_lss","%r_c_p","%r_c_r","%r_c_s","%r_clean","%r_cumprod","%r_d_p","%r_d_r","%r_d_s","%r_det","%r_diag","%r_e","%r_eye","%r_f_lss","%r_f_p","%r_f_r","%r_f_s","%r_i_ce","%r_i_hm","%r_i_lss","%r_i_p","%r_i_r","%r_i_s","%r_i_st","%r_inv","%r_j_s","%r_k_p","%r_k_r","%r_k_s","%r_l_lss","%r_l_p","%r_l_r","%r_l_s","%r_m_hm","%r_m_lss","%r_m_p","%r_m_r","%r_m_s","%r_matrix","%r_n_lss","%r_n_p","%r_n_r","%r_n_s","%r_norm","%r_o_lss","%r_o_p","%r_o_r","%r_o_s","%r_ones","%r_p","%r_p_s","%r_prod","%r_q_p","%r_q_r","%r_q_s","%r_r_lss","%r_r_p","%r_r_r","%r_r_s","%r_rand","%r_s","%r_s_hm","%r_s_lss","%r_s_p","%r_s_r","%r_s_s","%r_simp","%r_size","%r_string","%r_sum","%r_t","%r_tril","%r_triu","%r_v_lss","%r_v_p","%r_v_r","%r_v_s","%r_x_p","%r_x_r","%r_x_s","%r_y_p","%r_y_r","%r_y_s","%r_z_p","%r_z_r","%r_z_s","%s_1_hm","%s_1_i","%s_2_hm","%s_2_i","%s_3_hm","%s_3_i","%s_4_hm","%s_4_i","%s_5","%s_a_b","%s_a_hm","%s_a_i","%s_a_ip","%s_a_lss","%s_a_msp","%s_a_r","%s_a_sp","%s_and","%s_b_i","%s_b_s","%s_c_b","%s_c_cblock","%s_c_lss","%s_c_r","%s_c_sp","%s_d_b","%s_d_i","%s_d_p","%s_d_r","%s_d_sp","%s_e","%s_f_b","%s_f_cblock","%s_f_lss","%s_f_r","%s_f_sp","%s_g_b","%s_g_s","%s_h_b","%s_h_s","%s_i_b","%s_i_c","%s_i_ce","%s_i_h","%s_i_hm","%s_i_i","%s_i_lss","%s_i_p","%s_i_r","%s_i_s","%s_i_sp","%s_i_spb","%s_i_st","%s_j_i","%s_k_hm","%s_k_p","%s_k_r","%s_k_sp","%s_l_b","%s_l_hm","%s_l_i","%s_l_lss","%s_l_p","%s_l_r","%s_l_s","%s_l_sp","%s_m_b","%s_m_hm","%s_m_i","%s_m_ip","%s_m_lss","%s_m_msp","%s_m_r","%s_matrix","%s_n_hm","%s_n_i","%s_n_l","%s_n_lss","%s_n_r","%s_n_st","%s_o_hm","%s_o_i","%s_o_l","%s_o_lss","%s_o_r","%s_o_st","%s_or","%s_p_b","%s_p_i","%s_pow","%s_q_hm","%s_q_i","%s_q_p","%s_q_r","%s_q_sp","%s_r_b","%s_r_i","%s_r_lss","%s_r_p","%s_r_r","%s_r_s","%s_r_sp","%s_s_b","%s_s_hm","%s_s_i","%s_s_ip","%s_s_lss","%s_s_r","%s_s_sp","%s_simp","%s_v_lss","%s_v_p","%s_v_r","%s_v_s","%s_x_b","%s_x_hm","%s_x_i","%s_x_r","%s_y_p","%s_y_r","%s_y_sp","%s_z_p","%s_z_r","%s_z_sp","%sn","%sp_a_s","%sp_a_sp","%sp_and","%sp_c_s","%sp_ceil","%sp_cos","%sp_cumprod","%sp_cumsum","%sp_d_s","%sp_d_sp","%sp_diag","%sp_e","%sp_exp","%sp_f_s","%sp_floor","%sp_gsort","%sp_i_ce","%sp_i_h","%sp_i_s","%sp_i_sp","%sp_i_st","%sp_int","%sp_inv","%sp_k_s","%sp_k_sp","%sp_l_s","%sp_l_sp","%sp_length","%sp_norm","%sp_or","%sp_p_s","%sp_prod","%sp_q_s","%sp_q_sp","%sp_r_s","%sp_r_sp","%sp_round","%sp_s_s","%sp_s_sp","%sp_sin","%sp_sqrt","%sp_string","%sp_sum","%sp_tril","%sp_triu","%sp_y_s","%sp_y_sp","%sp_z_s","%sp_z_sp","%spb_and","%spb_c_b","%spb_cumprod","%spb_cumsum","%spb_diag","%spb_e","%spb_f_b","%spb_g_b","%spb_g_spb","%spb_h_b","%spb_h_spb","%spb_i_b","%spb_i_ce","%spb_i_h","%spb_i_st","%spb_or","%spb_prod","%spb_sum","%spb_tril","%spb_triu","%st_6","%st_c_st","%st_e","%st_f_st","%st_i_b","%st_i_c","%st_i_fptr","%st_i_h","%st_i_i","%st_i_ip","%st_i_lss","%st_i_msp","%st_i_p","%st_i_r","%st_i_s","%st_i_sp","%st_i_spb","%st_i_st","%st_matrix","%st_n_c","%st_n_l","%st_n_mc","%st_n_p","%st_n_s","%st_o_c","%st_o_l","%st_o_mc","%st_o_p","%st_o_s","%st_o_tl","%st_p","%st_size","%st_string","%st_t","%ticks_i_h","%xls_e","%xls_p","%xlssheet_e","%xlssheet_p","%xlssheet_size","%xlssheet_string","DominationRank","G_make","IsAScalar","NDcost","OS_Version","PlotSparse","ReadHBSparse","ReadmiMatrix","TCL_CreateSlave","WritemiMatrix","abcd","abinv","accept_func_default","accept_func_vfsa","acf","acosd","acosh","acoshm","acosm","acot","acotd","acoth","acsc","acscd","acsch","add_demo","add_help_chapter","add_module_help_chapter","add_param","add_profiling","adj2sp","aff2ab","ana_style","analpf","analyze","aplat","apropos","arhnk","arl2","arma2p","armac","armax","armax1","arobasestring2strings","arsimul","ascii2string","asciimat","asec","asecd","asech","asind","asinh","asinhm","asinm","assert_checkalmostequal","assert_checkequal","assert_checkerror","assert_checkfalse","assert_checkfilesequal","assert_checktrue","assert_comparecomplex","assert_computedigits","assert_cond2reltol","assert_cond2reqdigits","assert_generror","atand","atanh","atanhm","atanm","atomsAutoload","atomsAutoloadAdd","atomsAutoloadDel","atomsAutoloadList","atomsCategoryList","atomsCheckModule","atomsDepTreeShow","atomsGetConfig","atomsGetInstalled","atomsGetLoaded","atomsGetLoadedPath","atomsInstall","atomsIsInstalled","atomsIsLoaded","atomsList","atomsLoad","atomsRemove","atomsRepositoryAdd","atomsRepositoryDel","atomsRepositoryList","atomsRestoreConfig","atomsSaveConfig","atomsSearch","atomsSetConfig","atomsShow","atomsSystemInit","atomsSystemUpdate","atomsTest","atomsUpdate","atomsVersion","augment","auread","auwrite","balreal","bench_run","bilin","bilt","bin2dec","binomial","bitand","bitcmp","bitget","bitor","bitset","bitxor","black","blanks","bloc2exp","bloc2ss","block_parameter_error","bode","bstap","buttmag","bvodeS","bytecode","bytecodewalk","cainv","calendar","calfrq","canon","casc","cat","cat_code","cb_m2sci_gui","ccontrg","cell","cell2mat","cellstr","center","cepstrum","cfspec","char","chart","cheb1mag","cheb2mag","check_gateways","check_help","check_modules_xml","check_versions","chepol","chfact","chsolve","classmarkov","clean_help","clock","cls2dls","cmb_lin","cmndred","cmoment","coding_ga_binary","coding_ga_identity","coff","coffg","colcomp","colcompr","colinout","colregul","companion","complex","compute_initial_temp","cond","cond2sp","condestsp","config","configure_msifort","configure_msvc","cont_frm","cont_mat","contrss","conv","convert_to_float","convertindex","convol","convol2d","copfac","correl","cosd","cosh","coshm","cosm","cotd","cotg","coth","cothm","covar","createfun","createstruct","crossover_ga_binary","crossover_ga_default","csc","cscd","csch","csgn","csim","cspect","ctr_gram","czt","dae","daeoptions","damp","datafit","date","datenum","datevec","dbphi","dcf","ddp","dec2bin","dec2hex","dec2oct","del_help_chapter","del_module_help_chapter","demo_begin","demo_choose","demo_compiler","demo_end","demo_file_choice","demo_folder_choice","demo_function_choice","demo_gui","demo_mdialog","demo_message","demo_run","demo_viewCode","denom","derivat","derivative","des2ss","des2tf","detectmsifort64tools","detectmsvc64tools","determ","detr","detrend","devtools_run_builder","dft","dhnorm","diff","diophant","dir","dirname","dispfiles","dllinfo","dscr","dsimul","dt_ility","dtsi","edit","edit_error","eigenmarkov","ell1mag","enlarge_shape","entropy","eomday","epred","eqfir","eqiir","equil","equil1","erf","erfc","erfcx","erfinv","etime","eval","evans","evstr","expression2code","extract_help_examples","factor","factorial","factors","faurre","ffilt","fft2","fftshift","fieldnames","filt_sinc","filter","findABCD","findAC","findBDK","findR","find_freq","find_links","find_scicos_version","findm","findmsifortcompiler","findmsvccompiler","findx0BD","firstnonsingleton","fit_dat","fix","fixedpointgcd","flipdim","flts","fminsearch","format_txt","fourplan","fprintf","frep2tf","freson","frfit","frmag","fscanf","fseek_origin","fsfirlin","fspec","fspecg","fstabst","ftest","ftuneq","fullfile","fullrf","fullrfk","fun2string","g_margin","gainplot","gamitg","gcare","gcd","gencompilationflags_unix","generateBlockImage","generateBlockImages","generic_i_ce","generic_i_h","generic_i_hm","generic_i_s","generic_i_st","genlib","genlib_old","genmarkov","geomean","getDiagramVersion","getModelicaPath","get_file_path","get_function_path","get_param","get_profile","get_scicos_version","getd","getscilabkeywords","getshell","gettklib","gfare","gfrancis","givens","glever","gmres","group","gschur","gspec","gtild","h2norm","h_cl","h_inf","h_inf_st","h_norm","hallchart","halt","hank","hankelsv","harmean","haveacompiler","head_comments","help","help_from_sci","help_skeleton","hermit","hex2dec","hilb","hilbert","horner","householder","hrmt","htrianr","hypermat","ifft","iir","iirgroup","iirlp","iirmod","ilib_build","ilib_compile","ilib_for_link","ilib_gen_Make","ilib_gen_Make_unix","ilib_gen_cleaner","ilib_gen_gateway","ilib_gen_loader","ilib_include_flag","ilib_mex_build","im_inv","importScicosDiagram","importScicosPal","importXcosDiagram","imrep2ss","ind2sub","inistate","init_ga_default","init_param","initial_scicos_tables","input","instruction2code","intc","intdec","integrate","interp1","interpln","intersect","intl","intsplin","inttrap","inv_coeff","invr","invrs","invsyslin","iqr","isLeapYear","is_absolute_path","is_param","iscell","iscellstr","isempty","isfield","isinf","isnan","isnum","issparse","isstruct","isvector","jmat","justify","kalm","karmarkar","kernel","kpure","krac2","kroneck","lattn","launchtest","lcf","lcm","lcmdiag","leastsq","leqe","leqr","lev","levin","lex_sort","lft","lin","lin2mu","lincos","lindquist","linf","linfn","linsolve","linspace","list2vec","list_param","listfiles","listfunctions","listvarinfile","lmisolver","lmitool","loadXcosLibs","loadmatfile","loadwave","log10","log2","logm","logspace","lqe","lqg","lqg2stan","lqg_ltr","lqr","ls","lyap","m2sci_gui","m_circle","macglov","macrovar","mad","makecell","manedit","mapsound","markp2ss","matfile2sci","mdelete","mean","meanf","median","mese","meshgrid","mfft","mfile2sci","minreal","minss","mkdir","modulo","moment","mrfit","msd","mstr2sci","mtlb","mtlb_0","mtlb_a","mtlb_all","mtlb_any","mtlb_axes","mtlb_axis","mtlb_beta","mtlb_box","mtlb_choices","mtlb_close","mtlb_colordef","mtlb_cond","mtlb_conv","mtlb_cov","mtlb_cumprod","mtlb_cumsum","mtlb_dec2hex","mtlb_delete","mtlb_diag","mtlb_diff","mtlb_dir","mtlb_double","mtlb_e","mtlb_echo","mtlb_error","mtlb_eval","mtlb_exist","mtlb_eye","mtlb_false","mtlb_fft","mtlb_fftshift","mtlb_filter","mtlb_find","mtlb_findstr","mtlb_fliplr","mtlb_fopen","mtlb_format","mtlb_fprintf","mtlb_fread","mtlb_fscanf","mtlb_full","mtlb_fwrite","mtlb_get","mtlb_grid","mtlb_hold","mtlb_i","mtlb_ifft","mtlb_image","mtlb_imp","mtlb_int16","mtlb_int32","mtlb_int8","mtlb_is","mtlb_isa","mtlb_isfield","mtlb_isletter","mtlb_isspace","mtlb_l","mtlb_legendre","mtlb_linspace","mtlb_logic","mtlb_logical","mtlb_loglog","mtlb_lower","mtlb_max","mtlb_mean","mtlb_median","mtlb_mesh","mtlb_meshdom","mtlb_min","mtlb_more","mtlb_num2str","mtlb_ones","mtlb_pcolor","mtlb_plot","mtlb_prod","mtlb_qr","mtlb_qz","mtlb_rand","mtlb_randn","mtlb_rcond","mtlb_realmax","mtlb_realmin","mtlb_repmat","mtlb_s","mtlb_semilogx","mtlb_semilogy","mtlb_setstr","mtlb_size","mtlb_sort","mtlb_sortrows","mtlb_sprintf","mtlb_sscanf","mtlb_std","mtlb_strcmp","mtlb_strcmpi","mtlb_strfind","mtlb_strrep","mtlb_subplot","mtlb_sum","mtlb_t","mtlb_toeplitz","mtlb_tril","mtlb_triu","mtlb_true","mtlb_type","mtlb_uint16","mtlb_uint32","mtlb_uint8","mtlb_upper","mtlb_var","mtlb_zeros","mu2lin","mutation_ga_binary","mutation_ga_default","mvcorrel","mvvacov","nancumsum","nand2mean","nanmax","nanmean","nanmeanf","nanmedian","nanmin","nanstdev","nansum","narsimul","ndgrid","ndims","nehari","neigh_func_csa","neigh_func_default","neigh_func_fsa","neigh_func_vfsa","neldermead_cget","neldermead_configure","neldermead_costf","neldermead_defaultoutput","neldermead_destroy","neldermead_display","neldermead_function","neldermead_get","neldermead_log","neldermead_new","neldermead_restart","neldermead_search","neldermead_updatesimp","nextpow2","nfreq","nicholschart","nlev","nmplot_cget","nmplot_configure","nmplot_contour","nmplot_destroy","nmplot_display","nmplot_function","nmplot_get","nmplot_historyplot","nmplot_log","nmplot_new","nmplot_outputcmd","nmplot_restart","nmplot_search","nmplot_simplexhistory","noisegen","nonreg_test_run","norm","now","null","num2cell","numdiff","numer","nyquist","nyquistfrequencybounds","obs_gram","obscont","observer","obsv_mat","obsvss","oct2dec","odeoptions","optim_ga","optim_moga","optim_nsga","optim_nsga2","optim_sa","optimbase_cget","optimbase_checkbounds","optimbase_checkcostfun","optimbase_checkx0","optimbase_configure","optimbase_destroy","optimbase_display","optimbase_function","optimbase_get","optimbase_hasbounds","optimbase_hasconstraints","optimbase_hasnlcons","optimbase_histget","optimbase_histset","optimbase_incriter","optimbase_isfeasible","optimbase_isinbounds","optimbase_isinnonlincons","optimbase_log","optimbase_logshutdown","optimbase_logstartup","optimbase_new","optimbase_outputcmd","optimbase_outstruct","optimbase_proj2bnds","optimbase_set","optimbase_stoplog","optimbase_terminate","optimget","optimplotfunccount","optimplotfval","optimplotx","optimset","optimsimplex_center","optimsimplex_check","optimsimplex_compsomefv","optimsimplex_computefv","optimsimplex_deltafv","optimsimplex_deltafvmax","optimsimplex_destroy","optimsimplex_dirmat","optimsimplex_fvmean","optimsimplex_fvstdev","optimsimplex_fvvariance","optimsimplex_getall","optimsimplex_getallfv","optimsimplex_getallx","optimsimplex_getfv","optimsimplex_getn","optimsimplex_getnbve","optimsimplex_getve","optimsimplex_getx","optimsimplex_gradientfv","optimsimplex_log","optimsimplex_new","optimsimplex_print","optimsimplex_reflect","optimsimplex_setall","optimsimplex_setallfv","optimsimplex_setallx","optimsimplex_setfv","optimsimplex_setn","optimsimplex_setnbve","optimsimplex_setve","optimsimplex_setx","optimsimplex_shrink","optimsimplex_size","optimsimplex_sort","optimsimplex_tostring","optimsimplex_xbar","orth","p_margin","pack","pareto_filter","parrot","pbig","pca","pcg","pdiv","pen2ea","pencan","pencost","penlaur","perctl","perl","perms","permute","pertrans","pfactors","pfss","phasemag","phaseplot","phc","pinv","playsnd","plotprofile","plzr","pmodulo","pol2des","pol2str","polar","polfact","prbs_a","prettyprint","primes","princomp","profile","proj","projsl","projspec","psmall","pspect","qmr","qpsolve","quart","quaskro","rafiter","randpencil","range","rank","read_csv","readxls","recompilefunction","recons","reglin","regress","remezb","remove_param","remove_profiling","repfreq","replace_Ix_by_Fx","repmat","reset_profiling","resize_matrix","returntoscilab","rhs2code","ric_desc","riccati","rmdir","routh_t","rowcomp","rowcompr","rowinout","rowregul","rowshuff","rref","sample","samplef","samwr","savematfile","savewave","scanf","sci2exp","sciGUI_init","sci_sparse","scicos_getvalue","scicos_simulate","scicos_workspace_init","scisptdemo","scitest","sdiff","sec","secd","sech","selection_ga_elitist","selection_ga_random","sensi","set_param","setdiff","sgrid","show_margins","show_pca","showprofile","signm","sinc","sincd","sind","sinh","sinhm","sinm","sm2des","sm2ss","smga","smooth","solve","sound","soundsec","sp2adj","spaninter","spanplus","spantwo","specfact","speye","sprand","spzeros","sqroot","sqrtm","squarewave","squeeze","srfaur","srkf","ss2des","ss2ss","ss2tf","sscanf","sskf","ssprint","ssrand","st_deviation","st_i_generic","st_ility","stabil","statgain","stdev","stdevf","steadycos","strange","strcmpi","struct","sub2ind","sva","svplot","sylm","sylv","sysconv","sysdiag","sysfact","syslin","syssize","system","systmat","tabul","tand","tanh","tanhm","tanm","tbx_build_blocks","tbx_build_cleaner","tbx_build_gateway","tbx_build_gateway_clean","tbx_build_gateway_loader","tbx_build_help","tbx_build_help_loader","tbx_build_loader","tbx_build_macros","tbx_build_src","tbx_builder","tbx_builder_gateway","tbx_builder_gateway_lang","tbx_builder_help","tbx_builder_help_lang","tbx_builder_macros","tbx_builder_src","tbx_builder_src_lang","temp_law_csa","temp_law_default","temp_law_fsa","temp_law_huang","temp_law_vfsa","test_clean","test_on_columns","test_run","test_run_level","testexamples","tf2des","tf2ss","thrownan","tic","time_id","toc","toeplitz","tokenpos","toolboxes","trace","trans","translatepaths","tree2code","trfmod","trianfml","trimmean","trisolve","trzeros","typeof","ui_observer","union","unique","unit_test_run","unix_g","unix_s","unix_w","unix_x","unobs","unpack","variance","variancef","vec2list","vectorfind","ver","warnobsolete","wavread","wavwrite","wcenter","weekday","wfir","wfir_gui","whereami","who_user","whos","wiener","wigner","winclose","window","winlist","with_javasci","with_macros_source","with_modelica_compiler","with_pvm","with_texmacs","with_tk","write_csv","xcosBlockEval","xcosBlockInterface","xcosCodeGeneration","xcosConfigureModelica","xcosPal","xcosPalAdd","xcosPalAddBlock","xcosPalExport","xcosShowBlockWarning","xcosValidateBlockSet","xcosValidateCompareBlock","xcos_compile","xcos_run","xcos_simulate","xcos_workspace_init","xmltochm","xmltoformat","xmltohtml","xmltojar","xmltopdf","xmltops","xmltoweb","yulewalk","zeropen","zgrid","zpbutt","zpch1","zpch2","zpell"] builtin_consts = ["\\$","%F","%T","%e","%eps","%f","%fftw","%gui","%i","%inf","%io","%modalWarning","%nan","%pi","%s","%t","%tk","%toolboxes","%toolboxes_dir","%z","PWD","SCI","SCIHOME","TMPDIR","a","ans","assertlib","atomslib","cacsdlib","compatibility_functilib","corelib","data_structureslib","demo_toolslib","development_toolslib","differential_equationlib","dynamic_linklib","elementary_functionslib","fd","fileiolib","functionslib","genetic_algorithmslib","helptoolslib","home","i","integerlib","interpolationlib","iolib","j","linear_algebralib","m2scilib","matiolib","modules_managerlib","myStr","neldermeadlib","optimbaselib","optimizationlib","optimsimplexlib","output_streamlib","overloadinglib","parameterslib","polynomialslib","scicos_autolib","scicos_utilslib","scinoteslib","signal_processinglib","simulated_annealinglib","soundlib","sparselib","special_functionslib","spreadsheetlib","statisticslib","stringlib","tclscilib","timelib","umfpacklib","varType","xcoslib"]
mit
jhawkesworth/ansible
lib/ansible/modules/system/filesystem.py
20
13642
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2013, Alexander Bulimov <lazywolf0@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- author: - Alexander Bulimov (@abulimov) module: filesystem short_description: Makes a filesystem description: - This module creates a filesystem. version_added: "1.2" options: fstype: choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap ] description: - Filesystem type to be created. - reiserfs support was added in 2.2. - lvm support was added in 2.5. - since 2.5, I(dev) can be an image file. - vfat support was added in 2.5 - ocfs2 support was added in 2.6 - f2fs support was added in 2.7 - swap support was added in 2.8 required: yes aliases: [type] dev: description: - Target path to device or image file. required: yes aliases: [device] force: description: - If C(yes), allows to create new filesystem on devices that already has filesystem. type: bool default: 'no' resizefs: description: - If C(yes), if the block device and filesystem size differ, grow the filesystem into the space. - Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(vfat), C(swap) filesystems. - XFS Will only grow if mounted. - vFAT will likely fail if fatresize < 1.04. type: bool default: 'no' version_added: "2.0" opts: description: - List of options to be passed to mkfs command. requirements: - Uses tools related to the I(fstype) (C(mkfs)) and C(blkid) command. When I(resizefs) is enabled, C(blockdev) command is required too. notes: - Potential filesystem on I(dev) are checked using C(blkid), in case C(blkid) isn't able to detect an existing filesystem, this filesystem is overwritten even if I(force) is C(no). ''' EXAMPLES = ''' - name: Create a ext2 filesystem on /dev/sdb1 filesystem: fstype: ext2 dev: /dev/sdb1 - name: Create a ext4 filesystem on /dev/sdb1 and check disk blocks filesystem: fstype: ext4 dev: /dev/sdb1 opts: -cc ''' from distutils.version import LooseVersion import os import re import stat from ansible.module_utils.basic import AnsibleModule, get_platform class Device(object): def __init__(self, module, path): self.module = module self.path = path def size(self): """ Return size in bytes of device. Returns int """ statinfo = os.stat(self.path) if stat.S_ISBLK(statinfo.st_mode): blockdev_cmd = self.module.get_bin_path("blockdev", required=True) _, devsize_in_bytes, _ = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True) return int(devsize_in_bytes) elif os.path.isfile(self.path): return os.path.getsize(self.path) else: self.module.fail_json(changed=False, msg="Target device not supported: %s" % self) def __str__(self): return self.path class Filesystem(object): GROW = None MKFS = None MKFS_FORCE_FLAGS = '' LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'} def __init__(self, module): self.module = module @property def fstype(self): return type(self).__name__ def get_fs_size(self, dev): """ Return size in bytes of filesystem on device. Returns int """ raise NotImplementedError() def create(self, opts, dev): if self.module.check_mode: return mkfs = self.module.get_bin_path(self.MKFS, required=True) if opts is None: cmd = "%s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, dev) else: cmd = "%s %s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, opts, dev) self.module.run_command(cmd, check_rc=True) def grow_cmd(self, dev): cmd = self.module.get_bin_path(self.GROW, required=True) return [cmd, str(dev)] def grow(self, dev): """Get dev and fs size and compare. Returns stdout of used command.""" devsize_in_bytes = dev.size() try: fssize_in_bytes = self.get_fs_size(dev) except NotImplementedError: self.module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % self.fstype) if not fssize_in_bytes < devsize_in_bytes: self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev)) elif self.module.check_mode: self.module.exit_json(changed=True, msg="Resizing filesystem %s on device %s" % (self.fstype, dev)) else: _, out, _ = self.module.run_command(self.grow_cmd(dev), check_rc=True) return out class Ext(Filesystem): MKFS_FORCE_FLAGS = '-F' GROW = 'resize2fs' def get_fs_size(self, dev): cmd = self.module.get_bin_path('tune2fs', required=True) # Get Block count and Block size _, size, _ = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV) for line in size.splitlines(): if 'Block count:' in line: block_count = int(line.split(':')[1].strip()) elif 'Block size:' in line: block_size = int(line.split(':')[1].strip()) return block_size * block_count class Ext2(Ext): MKFS = 'mkfs.ext2' class Ext3(Ext): MKFS = 'mkfs.ext3' class Ext4(Ext): MKFS = 'mkfs.ext4' class XFS(Filesystem): MKFS = 'mkfs.xfs' MKFS_FORCE_FLAGS = '-f' GROW = 'xfs_growfs' def get_fs_size(self, dev): cmd = self.module.get_bin_path('xfs_growfs', required=True) _, size, _ = self.module.run_command([cmd, '-n', str(dev)], check_rc=True, environ_update=self.LANG_ENV) for line in size.splitlines(): col = line.split('=') if col[0].strip() == 'data': if col[1].strip() != 'bsize': self.module.fail_json(msg='Unexpected output format from xfs_growfs (could not locate "bsize")') if col[2].split()[1] != 'blocks': self.module.fail_json(msg='Unexpected output format from xfs_growfs (could not locate "blocks")') block_size = int(col[2].split()[0]) block_count = int(col[3].split(',')[0]) return block_size * block_count class Reiserfs(Filesystem): MKFS = 'mkfs.reiserfs' MKFS_FORCE_FLAGS = '-f' class Btrfs(Filesystem): MKFS = 'mkfs.btrfs' def __init__(self, module): super(Btrfs, self).__init__(module) _, stdout, stderr = self.module.run_command('%s --version' % self.MKFS, check_rc=True) match = re.search(r" v([0-9.]+)", stdout) if not match: # v0.20-rc1 use stderr match = re.search(r" v([0-9.]+)", stderr) if match: # v0.20-rc1 doesn't have --force parameter added in following version v3.12 if LooseVersion(match.group(1)) >= LooseVersion('3.12'): self.MKFS_FORCE_FLAGS = '-f' else: self.MKFS_FORCE_FLAGS = '' else: # assume version is greater or equal to 3.12 self.MKFS_FORCE_FLAGS = '-f' self.module.warn('Unable to identify mkfs.btrfs version (%r, %r)' % (stdout, stderr)) class Ocfs2(Filesystem): MKFS = 'mkfs.ocfs2' MKFS_FORCE_FLAGS = '-Fx' class F2fs(Filesystem): MKFS = 'mkfs.f2fs' GROW = 'resize.f2fs' @property def MKFS_FORCE_FLAGS(self): mkfs = self.module.get_bin_path(self.MKFS, required=True) cmd = "%s %s" % (mkfs, os.devnull) _, out, _ = self.module.run_command(cmd, check_rc=False, environ_update=self.LANG_ENV) # Looking for " F2FS-tools: mkfs.f2fs Ver: 1.10.0 (2018-01-30)" # mkfs.f2fs displays version since v1.2.0 match = re.search(r"F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) \(", out) if match is not None: # Since 1.9.0, mkfs.f2fs check overwrite before make filesystem # before that version -f switch wasn't used if LooseVersion(match.group(1)) >= LooseVersion('1.9.0'): return '-f' return '' def get_fs_size(self, dev): cmd = self.module.get_bin_path('dump.f2fs', required=True) # Get sector count and sector size _, dump, _ = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV) sector_size = None sector_count = None for line in dump.splitlines(): if 'Info: sector size = ' in line: # expected: 'Info: sector size = 512' sector_size = int(line.split()[4]) elif 'Info: total FS sectors = ' in line: # expected: 'Info: total FS sectors = 102400 (50 MB)' sector_count = int(line.split()[5]) if None not in (sector_size, sector_count): break else: self.module.warn("Unable to process dump.f2fs output '%s'", '\n'.join(dump)) self.module.fail_json(msg="Unable to process dump.f2fs output for %s" % dev) return sector_size * sector_count class VFAT(Filesystem): if get_platform() == 'FreeBSD': MKFS = "newfs_msdos" else: MKFS = 'mkfs.vfat' GROW = 'fatresize' def get_fs_size(self, dev): cmd = self.module.get_bin_path(self.GROW, required=True) _, output, _ = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV) for line in output.splitlines()[1:]: param, value = line.split(':', 1) if param.strip() == 'Size': return int(value.strip()) self.module.fail_json(msg="fatresize failed to provide filesystem size for %s" % dev) def grow_cmd(self, dev): cmd = self.module.get_bin_path(self.GROW) return [cmd, "-s", str(dev.size()), str(dev.path)] class LVM(Filesystem): MKFS = 'pvcreate' MKFS_FORCE_FLAGS = '-f' GROW = 'pvresize' def get_fs_size(self, dev): cmd = self.module.get_bin_path('pvs', required=True) _, size, _ = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True) block_count = int(size) return block_count class Swap(Filesystem): MKFS = 'mkswap' MKFS_FORCE_FLAGS = '-f' FILESYSTEMS = { 'ext2': Ext2, 'ext3': Ext3, 'ext4': Ext4, 'ext4dev': Ext4, 'f2fs': F2fs, 'reiserfs': Reiserfs, 'xfs': XFS, 'btrfs': Btrfs, 'vfat': VFAT, 'ocfs2': Ocfs2, 'LVM2_member': LVM, 'swap': Swap, } def main(): friendly_names = { 'lvm': 'LVM2_member', } fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set(friendly_names.keys()) # There is no "single command" to manipulate filesystems, so we map them all out and their options module = AnsibleModule( argument_spec=dict( fstype=dict(required=True, aliases=['type'], choices=list(fstypes)), dev=dict(required=True, aliases=['device']), opts=dict(), force=dict(type='bool', default=False), resizefs=dict(type='bool', default=False), ), supports_check_mode=True, ) dev = module.params['dev'] fstype = module.params['fstype'] opts = module.params['opts'] force = module.params['force'] resizefs = module.params['resizefs'] if fstype in friendly_names: fstype = friendly_names[fstype] changed = False try: klass = FILESYSTEMS[fstype] except KeyError: module.fail_json(changed=False, msg="module does not support this filesystem (%s) yet." % fstype) if not os.path.exists(dev): module.fail_json(msg="Device %s not found." % dev) dev = Device(module, dev) cmd = module.get_bin_path('blkid', required=True) rc, raw_fs, err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev)) # In case blkid isn't able to identify an existing filesystem, device is considered as empty, # then this existing filesystem would be overwritten even if force isn't enabled. fs = raw_fs.strip() filesystem = klass(module) same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype] if same_fs and not resizefs and not force: module.exit_json(changed=False) elif same_fs and resizefs: if not filesystem.GROW: module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype) out = filesystem.grow(dev) # Sadly there is no easy way to determine if this has changed. For now, just say "true" and move on. # in the future, you would have to parse the output to determine this. # thankfully, these are safe operations if no change is made. module.exit_json(changed=True, msg=out) elif fs and not force: module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite" % (dev, fs), rc=rc, err=err) # create fs filesystem.create(opts, dev) changed = True module.exit_json(changed=changed) if __name__ == '__main__': main()
gpl-3.0
sahutd/youtube-dl
devscripts/make_supportedsites.py
96
1149
#!/usr/bin/env python from __future__ import unicode_literals import io import optparse import os import sys # Import youtube_dl ROOT_DIR = os.path.join(os.path.dirname(__file__), '..') sys.path.append(ROOT_DIR) import youtube_dl def main(): parser = optparse.OptionParser(usage='%prog OUTFILE.md') options, args = parser.parse_args() if len(args) != 1: parser.error('Expected an output filename') outfile, = args def gen_ies_md(ies): for ie in ies: ie_md = '**{0}**'.format(ie.IE_NAME) ie_desc = getattr(ie, 'IE_DESC', None) if ie_desc is False: continue if ie_desc is not None: ie_md += ': {0}'.format(ie.IE_DESC) if not ie.working(): ie_md += ' (Currently broken)' yield ie_md ies = sorted(youtube_dl.gen_extractors(), key=lambda i: i.IE_NAME.lower()) out = '# Supported sites\n' + ''.join( ' - ' + md + '\n' for md in gen_ies_md(ies)) with io.open(outfile, 'w', encoding='utf-8') as outf: outf.write(out) if __name__ == '__main__': main()
unlicense
gurneyalex/odoo
addons/sale/wizard/payment_acquirer_onboarding_wizard.py
6
1520
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import api, fields, models class PaymentWizard(models.TransientModel): """ Override for the sale quotation onboarding panel. """ _inherit = 'payment.acquirer.onboarding.wizard' _name = 'sale.payment.acquirer.onboarding.wizard' _description = 'Sale Payment acquire onboarding wizard' def _get_default_payment_method(self): return self.env.company.sale_onboarding_payment_method or 'digital_signature' payment_method = fields.Selection(selection_add=[ ('digital_signature', 'Online signature'), ('paypal', "PayPal"), ('stripe', "Credit card (via Stripe)"), ('other', "Other payment acquirer"), ('manual', "Custom payment instructions"), ], default=_get_default_payment_method) # def _set_payment_acquirer_onboarding_step_done(self): """ Override. """ self.env.company.sudo().set_onboarding_step_done('sale_onboarding_order_confirmation_state') def add_payment_methods(self, *args, **kwargs): self.env.company.sale_onboarding_payment_method = self.payment_method if self.payment_method == 'digital_signature': self.env.company.portal_confirmation_sign = True if self.payment_method in ('paypal', 'stripe', 'other', 'manual'): self.env.company.portal_confirmation_pay = True return super(PaymentWizard, self).add_payment_methods(*args, **kwargs)
agpl-3.0
gw0/myhdl
myhdl/test/conversion/general/test_bin2gray.py
2
1547
from __future__ import absolute_import import os path = os.path from myhdl import * from myhdl.conversion import verify def bin2gray2(B, G, width): """ Gray encoder. B -- input intbv signal, binary encoded G -- output intbv signal, gray encoded width -- bit width """ @instance def logic(): Bext = intbv(0)[width+1:] while 1: yield B Bext[:] = B for i in range(width): G.next[i] = Bext[i+1] ^ Bext[i] return logic def bin2gray(B, G, width): """ Gray encoder. B -- input intbv signal, binary encoded G -- output intbv signal, gray encoded width -- bit width """ @always_comb def logic(): Bext = intbv(0)[width+1:] Bext[:] = B for i in range(width): G.next[i] = Bext[i+1] ^ Bext[i] return logic def bin2grayBench(width, bin2gray): B = Signal(intbv(0)[width:]) G = Signal(intbv(0)[width:]) bin2gray_inst = bin2gray(B, G, width) n = 2**width @instance def stimulus(): for i in range(n): B.next = i yield delay(10) #print "B: " + bin(B, width) + "| G_v: " + bin(G_v, width) #print bin(G, width) #print bin(G_v, width) print("%d" % G) return stimulus, bin2gray_inst def test1(): assert verify(bin2grayBench, width=8, bin2gray=bin2gray) == 0 def test2(): assert verify(bin2grayBench, width=8, bin2gray=bin2gray2) == 0
lgpl-2.1
rebearteta/social-ideation
app/migrations/0007_auto_20150908_1518.py
2
1475
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('app', '0006_auto_20150825_1513'), ] operations = [ migrations.CreateModel( name='SocialNetworkAppUser', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('external_id', models.CharField(max_length=50, editable=False)), ('email', models.EmailField(max_length=254, editable=False)), ('access_token', models.CharField(max_length=300, editable=False)), ('access_token_exp', models.DateTimeField(editable=False)), ], ), migrations.AlterField( model_name='author', name='social_network', field=models.ForeignKey(to='app.SocialNetworkAppUser', null=True), ), migrations.AlterField( model_name='socialnetworkapp', name='page_id', field=models.CharField(help_text=b'Id of the page/group/account used to hold the content generate in the consultation platform', max_length=50, null=True, blank=True), ), migrations.AddField( model_name='socialnetworkappuser', name='snapp', field=models.ForeignKey(editable=False, to='app.SocialNetworkApp'), ), ]
mit
sharkeyck/Lidar
lidar_edited.py
1
4303
#Display Data from Neato LIDAR #based on code from Nicolas "Xevel" Saugnier #requires vpython and pyserial import thread, time, sys, traceback, math, struct COM_PORT = "COM4" # example: 5 == "COM6" == "/dev/tty5" BAUD_RATE = 115200 FPS = 60 OFFSET = 140 from visual import * point = points(pos=[(0,0,0) for i in range(360)], size=5, color=(0 , 1, 0)) #green, good point lidar_representation = ring(pos=(0,0,0), axis=(0,1,0), radius=OFFSET-1, thickness=1, color = color.yellow) lidar_representation.visible = True label_speed = label(pos = (0,-500,0), xoffset=1, box=False, opacity=0.1) label_errors = label(pos = (0,-1000,0), xoffset=1, text="errors: 0", visible = False, box=False) def reset_display( angle ): angle_rad = angle * math.pi / 180.0 c = math.cos(angle_rad) s = -math.sin(angle_rad) #reset the point display point.pos[angle] = vector( 0, 0, 0 ) def parse_point_data( angle, data ): dist_mm = data[0] | (( data[1] & 0x3f) << 8) # distance is coded on 14 bits quality = data[2] | (data[3] << 8) # quality is on 16 bits is_bad_data = data[1] & 0x80 is_low_quality = data[1] & 0x40 parsed_data = (dist_mm, is_bad_data, is_low_quality) return parsed_data def update_point( angle, data ): """Updates the view of a sample. Takes the angle (an int, from 0 to 359) and the list of four bytes of data in the order they arrived. """ (dist_mm, is_bad_data, is_low_quality) = parse_point_data(angle, data) angle_rad = angle * math.pi / 180.0 c = math.cos(angle_rad) s = -math.sin(angle_rad) dist_x = dist_mm*c dist_y = dist_mm*s reset_display(angle) # display the sample if is_bad_data: return else: point.pos[angle] = vector( dist_x,0, dist_y) if is_low_quality: point.color[angle] =(0.4, 0, 0) #red for low quality else: point.color[angle] =(0, 1, 0) #green for good quality def check_sum(data): """Compute and return the check_sum as an int. data is a list of 22 bytes (as ints), in the order they arrived in. last 2 bytes should be ignored, as they are the provided checksum. """ #group the data by word, little-endian data_list = [] for t in range(10): data_list.append( data[2*t] + (data[2*t+1]<<8) ) # compute the check_sum on 32 bits chk32 = 0 for d in data_list: chk32 = (chk32 << 1) + d check_sum = (chk32 & 0x7FFF) + ( chk32 >> 15 ) # wrap around to fit into 15 bits check_sum = check_sum & 0x7FFF # truncate to 15 bits return int( check_sum ) def read_lidar(): #data string: <start> <index> <speed_L> <speed_H> [Data 0] [Data 1] [Data 2] [Data 3] <checksum_L> <checksum_H> START_BYTE = 0xFA INDEX_OFFSET = 0xA0 NUM_PACKETS = 90 PACKET_BODY_SIZE = 20 DATA_POINTS = 4 check_sum_errors = 0 index = 0 while True: #start byte full_data = ser.read(1) if ord(full_data) != START_BYTE : continue #position index full_data += ser.read(1) index = ord(full_data[1]) - INDEX_OFFSET if index < 0 or index > NUM_PACKETS : continue full_data += ser.read(PACKET_BODY_SIZE) data = [[] for inc in range(DATA_POINTS)] (speed_rpm, data[0], data[1], data[2], data[3], incoming_check_sum) = struct.unpack('x x H 4s 4s 4s 4s H', full_data) # verify that the received checksum is equal to the one computed from the data if check_sum([ord(b) for b in full_data]) == incoming_check_sum: speed_rpm = float( speed_rpm ) / 64.0 label_speed.text = "RPM : " + str(speed_rpm) b_data = [bytearray(d) for d in data] else: # the checksum does not match, something went wrong... check_sum_errors +=1 label_errors.text = "errors: "+str(check_sum_errors) # give the samples an error state b_data = [[0, 0x80, 0, 0] for d in data] for d in b_data: update_point(index * 4 + inc, d) import serial ser = serial.Serial(COM_PORT, BAUD_RATE) th = thread.start_new_thread(read_lidar, ()) while True: rate(FPS) # synchonous repaint at 60fps
apache-2.0
leifdenby/numpy
numpy/lib/tests/test_polynomial.py
116
5092
from __future__ import division, absolute_import, print_function ''' >>> p = np.poly1d([1.,2,3]) >>> p poly1d([ 1., 2., 3.]) >>> print(p) 2 1 x + 2 x + 3 >>> q = np.poly1d([3.,2,1]) >>> q poly1d([ 3., 2., 1.]) >>> print(q) 2 3 x + 2 x + 1 >>> print(np.poly1d([1.89999+2j, -3j, -5.12345678, 2+1j])) 3 2 (1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j) >>> print(np.poly1d([-3, -2, -1])) 2 -3 x - 2 x - 1 >>> p(0) 3.0 >>> p(5) 38.0 >>> q(0) 1.0 >>> q(5) 86.0 >>> p * q poly1d([ 3., 8., 14., 8., 3.]) >>> p / q (poly1d([ 0.33333333]), poly1d([ 1.33333333, 2.66666667])) >>> p + q poly1d([ 4., 4., 4.]) >>> p - q poly1d([-2., 0., 2.]) >>> p ** 4 poly1d([ 1., 8., 36., 104., 214., 312., 324., 216., 81.]) >>> p(q) poly1d([ 9., 12., 16., 8., 6.]) >>> q(p) poly1d([ 3., 12., 32., 40., 34.]) >>> np.asarray(p) array([ 1., 2., 3.]) >>> len(p) 2 >>> p[0], p[1], p[2], p[3] (3.0, 2.0, 1.0, 0) >>> p.integ() poly1d([ 0.33333333, 1. , 3. , 0. ]) >>> p.integ(1) poly1d([ 0.33333333, 1. , 3. , 0. ]) >>> p.integ(5) poly1d([ 0.00039683, 0.00277778, 0.025 , 0. , 0. , 0. , 0. , 0. ]) >>> p.deriv() poly1d([ 2., 2.]) >>> p.deriv(2) poly1d([ 2.]) >>> q = np.poly1d([1.,2,3], variable='y') >>> print(q) 2 1 y + 2 y + 3 >>> q = np.poly1d([1.,2,3], variable='lambda') >>> print(q) 2 1 lambda + 2 lambda + 3 >>> np.polydiv(np.poly1d([1,0,-1]), np.poly1d([1,1])) (poly1d([ 1., -1.]), poly1d([ 0.])) ''' import numpy as np from numpy.testing import ( run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, assert_almost_equal, rundocs ) class TestDocs(TestCase): def test_doctests(self): return rundocs() def test_roots(self): assert_array_equal(np.roots([1, 0, 0]), [0, 0]) def test_str_leading_zeros(self): p = np.poly1d([4, 3, 2, 1]) p[3] = 0 assert_equal(str(p), " 2\n" "3 x + 2 x + 1") p = np.poly1d([1, 2]) p[0] = 0 p[1] = 0 assert_equal(str(p), " \n0") def test_polyfit(self): c = np.array([3., 2., 1.]) x = np.linspace(0, 2, 7) y = np.polyval(c, x) err = [1, -1, 1, -1, 1, -1, 1] weights = np.arange(8, 1, -1)**2/7.0 # check 1D case m, cov = np.polyfit(x, y+err, 2, cov=True) est = [3.8571, 0.2857, 1.619] assert_almost_equal(est, m, decimal=4) val0 = [[2.9388, -5.8776, 1.6327], [-5.8776, 12.7347, -4.2449], [1.6327, -4.2449, 2.3220]] assert_almost_equal(val0, cov, decimal=4) m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True) assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4) val = [[8.7929, -10.0103, 0.9756], [-10.0103, 13.6134, -1.8178], [0.9756, -1.8178, 0.6674]] assert_almost_equal(val, cov2, decimal=4) # check 2D (n,1) case y = y[:, np.newaxis] c = c[:, np.newaxis] assert_almost_equal(c, np.polyfit(x, y, 2)) # check 2D (n,2) case yy = np.concatenate((y, y), axis=1) cc = np.concatenate((c, c), axis=1) assert_almost_equal(cc, np.polyfit(x, yy, 2)) m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True) assert_almost_equal(est, m[:, 0], decimal=4) assert_almost_equal(est, m[:, 1], decimal=4) assert_almost_equal(val0, cov[:, :, 0], decimal=4) assert_almost_equal(val0, cov[:, :, 1], decimal=4) def test_objects(self): from decimal import Decimal p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')]) p2 = p * Decimal('1.333333333333333') assert_(p2[1] == Decimal("3.9999999999999990")) p2 = p.deriv() assert_(p2[1] == Decimal('8.0')) p2 = p.integ() assert_(p2[3] == Decimal("1.333333333333333333333333333")) assert_(p2[2] == Decimal('1.5')) assert_(np.issubdtype(p2.coeffs.dtype, np.object_)) p = np.poly([Decimal(1), Decimal(2)]) assert_equal(np.poly([Decimal(1), Decimal(2)]), [1, Decimal(-3), Decimal(2)]) def test_complex(self): p = np.poly1d([3j, 2j, 1j]) p2 = p.integ() assert_((p2.coeffs == [1j, 1j, 1j, 0]).all()) p2 = p.deriv() assert_((p2.coeffs == [6j, 2j]).all()) def test_integ_coeffs(self): p = np.poly1d([3, 2, 1]) p2 = p.integ(3, k=[9, 7, 6]) assert_( (p2.coeffs == [1/4./5., 1/3./4., 1/2./3., 9/1./2., 7, 6]).all()) def test_zero_dims(self): try: np.poly(np.zeros((0, 0))) except ValueError: pass def test_poly_int_overflow(self): """ Regression test for gh-5096. """ v = np.arange(1, 21) assert_almost_equal(np.poly(v), np.poly(np.diag(v))) if __name__ == "__main__": run_module_suite()
bsd-3-clause
victortxa/fatiando
fatiando/gravmag/eqlayer.py
6
14837
""" Equivalent layer processing. Use the classes here to estimate an equivalent layer from potential field data. Then you can use the estimated layer to perform tranformations (gridding, continuation, derivation, reduction to the pole, etc.) by forward modeling the layer. Use :mod:`fatiando.gravmag.sphere` for forward modeling. **Algorithms** * :class:`~fatiando.gravmag.eqlayer.EQLGravity` and :class:`~fatiando.gravmag.eqlayer.EQLTotalField`: The classic (space domain) equivalent layer as formulated in Li and Oldenburg (2010) or Oliveira Jr. et al (2012). Doesn't have wavelet compression or other tweaks. * :class:`~fatiando.gravmag.eqlayer.PELGravity` and :class:`~fatiando.gravmag.eqlayer.PELTotalField`: The polynomial equivalent layer of Oliveira Jr. et al (2012). A fast and memory efficient algorithm. Both of these require special regularization (:class:`~fatiando.gravmag.eqlayer.PELSmoothness`). **References** Li, Y., and D. W. Oldenburg (2010), Rapid construction of equivalent sources using wavelets, Geophysics, 75(3), L51-L59, doi:10.1190/1.3378764. Oliveira Jr., V. C., V. C. F. Barbosa, and L. Uieda (2012), Polynomial equivalent layer, Geophysics, 78(1), G1-G13, doi:10.1190/geo2012-0196.1. ---- """ from __future__ import division, absolute_import from future.builtins import super, range import numpy import scipy.sparse from . import sphere as kernel from ..utils import dircos, safe_dot from ..inversion import Misfit, Smoothness class EQLBase(Misfit): """ Base class for the classic equivalent layer. """ def __init__(self, x, y, z, data, grid): super().__init__(data=data, nparams=len(grid), islinear=True) self.x = x self.y = y self.z = z self.grid = grid def predicted(self, p): """ Calculate the data predicted by a given parameter vector. Parameters: * p : 1d-array (optional) The parameter vector with the estimated physical properties of the layer. If not given, will use the value calculated by ``.fit()``. Returns: * result : 1d-array The predicted data vector. """ return safe_dot(self.jacobian(p), p) class EQLGravity(EQLBase): """ Estimate an equivalent layer from gravity data. .. note:: Assumes x = North, y = East, z = Down. Parameters: * x, y, z : 1d-arrays The x, y, z coordinates of each data point. * data : 1d-array The gravity data at each point. * grid : :class:`~fatiando.mesher.PointGrid` The sources in the equivalent layer. Will invert for the density of each point in the grid. * field : string Which gravitational field is the data. Options are: ``'gz'`` (gravity anomaly), ``'gxx'``, ``'gxy'``, ..., ``'gzz'`` (gravity gradient tensor). Defaults to ``'gz'``. """ def __init__(self, x, y, z, data, grid, field='gz'): super().__init__(x, y, z, data, grid) self.field = field def jacobian(self, p): """ Calculate the Jacobian matrix for a given parameter vector. """ x = self.x y = self.y z = self.z func = getattr(kernel, self.field) jac = numpy.empty((self.ndata, self.nparams), dtype=numpy.float) for i, c in enumerate(self.grid): jac[:, i] = func(x, y, z, [c], dens=1.) return jac class EQLTotalField(EQLBase): """ Estimate an equivalent layer from total field magnetic anomaly data. .. note:: Assumes x = North, y = East, z = Down. Parameters: * x, y, z : 1d-arrays The x, y, z coordinates of each data point. * data : 1d-array The total field anomaly data at each point. * inc, dec : floats The inclination and declination of the inducing field * grid : :class:`~fatiando.mesher.PointGrid` The sources in the equivalent layer. Will invert for the magnetization intensity of each point in the grid. * sinc, sdec : None or floats The inclination and declination of the equivalent layer. Use these if there is remanent magnetization and the total magnetization of the layer if different from the induced magnetization. If there is only induced magnetization, use None """ def __init__(self, x, y, z, data, inc, dec, grid, sinc=None, sdec=None): super().__init__(x, y, z, data, grid) self.inc, self.dec = inc, dec self.sinc = sinc if sinc is not None else inc self.sdec = sdec if sdec is not None else dec def jacobian(self, p): """ Calculate the Jacobian matrix for a given parameter vector. """ x = self.x y = self.y z = self.z inc, dec = self.inc, self.dec mag = dircos(self.sinc, self.sdec) jac = numpy.empty((self.ndata, self.nparams), dtype=float) for i, c in enumerate(self.grid): jac[:, i] = kernel.tf(x, y, z, [c], inc, dec, pmag=mag) return jac class PELBase(EQLBase): """ Base class for the Polynomial Equivalent Layer. .. note:: Overloads *fit* to convert the estimated coefficients to physical properties. The coefficients are stored in the ``coeffs_`` attribute. """ def __init__(self, x, y, z, data, grid, windows, degree): super().__init__(x, y, z, data, grid) self.nparams = windows[0]*windows[1]*ncoeffs(degree) self.windows = windows self.degree = degree def fmt_estimate(self, coefs): """ Convert the estimated polynomial coefficients to physical property values along the layer. Parameters: * coefs : 1d-array The estimated parameter vector with the polynomial coefficients Returns: * estimate : 1d-array The converted physical property values along the layer. """ ny, nx = self.windows pergrid = ncoeffs(self.degree) estimate = numpy.empty(self.grid.shape, dtype=float) grids = self.grid.split(self.windows) k = 0 ystart = 0 gny, gnx = grids[0].shape for i in range(ny): yend = ystart + gny xstart = 0 for j in range(nx): xend = xstart + gnx g = grids[k] bk = _bkmatrix(g, self.degree) window_coefs = coefs[k * pergrid:(k + 1) * pergrid] window_props = safe_dot(bk, window_coefs).reshape(g.shape) estimate[ystart:yend, xstart:xend] = window_props xstart = xend k += 1 ystart = yend self.coeffs_ = coefs return estimate.ravel() def _bkmatrix(grid, degree): """ Make the Bk polynomial coefficient matrix for a given PointGrid. This matrix converts the coefficients into physical property values. Parameters: * grid : :class:`~fatiando.mesher.PointGrid` The sources in the equivalent layer * degree : int The degree of the bivariate polynomial Returns: * bk : 2d-array The matrix Examples: >>> from fatiando.mesher import PointGrid >>> grid = PointGrid((0, 1, 0, 2), 10, (2, 2)) >>> print _bkmatrix(grid, 2) [[ 1. 0. 0. 0. 0. 0.] [ 1. 2. 0. 4. 0. 0.] [ 1. 0. 1. 0. 0. 1.] [ 1. 2. 1. 4. 2. 1.]] >>> print _bkmatrix(grid, 1) [[ 1. 0. 0.] [ 1. 2. 0.] [ 1. 0. 1.] [ 1. 2. 1.]] >>> print _bkmatrix(grid, 3) [[ 1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [ 1. 2. 0. 4. 0. 0. 8. 0. 0. 0.] [ 1. 0. 1. 0. 0. 1. 0. 0. 0. 1.] [ 1. 2. 1. 4. 2. 1. 8. 4. 2. 1.]] """ bmatrix = numpy.transpose( [(grid.x**i)*(grid.y**j) for l in range(1, degree + 2) for i, j in zip(range(l), range(l - 1, -1, -1))]) return bmatrix def ncoeffs(degree): """ Calculate the number of coefficients in a bivarite polynomail. Parameters: * degree : int The degree of the polynomial Returns: * n : int The number of coefficients Examples: >>> ncoeffs(1) 3 >>> ncoeffs(2) 6 >>> ncoeffs(3) 10 >>> ncoeffs(4) 15 """ return sum(range(1, degree + 2)) class PELGravity(PELBase): """ Estimate a polynomial equivalent layer from gravity data. .. note:: Assumes x = North, y = East, z = Down. Parameters: * x, y, z : 1d-arrays The x, y, z coordinates of each data point. * data : 1d-array The gravity data at each point. * grid : :class:`~fatiando.mesher.PointGrid` The sources in the equivalent layer. Will invert for the density of each point in the grid. * windows : tuple = (ny, nx) The number of windows that the layer will be divided in the y and x directions, respectively * degree : int The degree of the bivariate polynomials used in each window of the PEL * field : string Which gravitational field is the data. Options are: ``'gz'`` (gravity anomaly), ``'gxx'``, ``'gxy'``, ..., ``'gzz'`` (gravity gradient tensor). Defaults to ``'gz'``. """ def __init__(self, x, y, z, data, grid, windows, degree, field='gz'): super().__init__(x, y, z, data, grid, windows, degree) self.field = field def jacobian(self, p): """ Calculate the Jacobian matrix for a given parameter vector. """ x = self.x y = self.y z = self.z func = getattr(kernel, self.field) grids = self.grid.split(self.windows) pergrid = ncoeffs(self.degree) jac = numpy.empty((self.ndata, self.nparams), dtype=float) gk = numpy.empty((self.ndata, grids[0].size), dtype=float) for i, grid in enumerate(grids): bk = _bkmatrix(grid, self.degree) for k, c in enumerate(grid): gk[:, k] = func(x, y, z, [c], dens=1.) jac[:, i*pergrid:(i + 1)*pergrid] = safe_dot(gk, bk) return jac class PELTotalField(PELBase): """ Estimate a polynomial equivalent layer from magnetic total field anomaly. .. note:: Assumes x = North, y = East, z = Down. Parameters: * x, y, z : 1d-arrays The x, y, z coordinates of each data point. * data : 1d-array The total field magnetic anomaly data at each point. * inc, dec : floats The inclination and declination of the inducing field * grid : :class:`~fatiando.mesher.PointGrid` The sources in the equivalent layer. Will invert for the magnetization intensity of each point in the grid. * windows : tuple = (ny, nx) The number of windows that the layer will be divided in the y and x directions, respectively * degree : int The degree of the bivariate polynomials used in each window of the PEL * sinc, sdec : None or floats The inclination and declination of the equivalent layer. Use these if there is remanent magnetization and the total magnetization of the layer if different from the induced magnetization. If there is only induced magnetization, use None """ def __init__(self, x, y, z, data, inc, dec, grid, windows, degree, sinc=None, sdec=None): super().__init__(x, y, z, data, grid, windows, degree) self.inc, self.dec = inc, dec self.sinc = sinc if sinc is not None else inc self.sdec = sdec if sdec is not None else dec def jacobian(self, p): """ Calculate the Jacobian matrix for a given parameter vector. """ x = self.x y = self.y z = self.z inc, dec = self.inc, self.dec mag = dircos(self.sinc, self.sdec) grids = self.grid.split(self.windows) pergrid = ncoeffs(self.degree) jac = numpy.empty((self.ndata, self.nparams), dtype=float) gk = numpy.empty((self.ndata, grids[0].size), dtype=float) for i, grid in enumerate(grids): bk = _bkmatrix(grid, self.degree) for k, c in enumerate(grid): gk[:, k] = kernel.tf(x, y, z, [c], inc, dec, pmag=mag) jac[:, i*pergrid:(i + 1)*pergrid] = safe_dot(gk, bk) return jac class PELSmoothness(Smoothness): """ Regularization to "join" neighboring windows in the PEL. Use this with :class:`~fatiando.gravmag.eqlayer.PELGravity` and :class:`~fatiando.gravmag.eqlayer.PELTotalField`. Parameters passed to PELSmoothness must be the same as passed to the PEL solvers. Parameters: * grid : :class:`~fatiando.mesher.PointGrid` The sources in the equivalent layer. * windows : tuple = (ny, nx) The number of windows that the layer will be divided in the y and x directions, respectively. * degree : int The degree of the bivariate polynomials used in each window of the PEL See the docstring of :class:`~fatiando.gravmag.eqlayer.PELGravity` for an example usage. """ def __init__(self, grid, windows, degree): super().__init__(_pel_fdmatrix(windows, grid, degree)) def _pel_fdmatrix(windows, grid, degree): """ Makes the finite difference matrix for PEL smoothness. """ ny, nx = windows grids = grid.split(windows) gsize = grids[0].size gny, gnx = grids[0].shape nderivs = (nx - 1) * grid.shape[0] + (ny - 1) * grid.shape[1] rmatrix = scipy.sparse.lil_matrix((nderivs, grid.size)) deriv = 0 # derivatives in x for k in range(0, len(grids) - ny): bottom = k * gsize + gny * (gnx - 1) top = (k + ny) * gsize for i in range(gny): rmatrix[deriv, bottom + i] = -1. rmatrix[deriv, top + 1] = 1. deriv += 1 # derivatives in y for k in range(0, len(grids)): if (k + 1) % ny == 0: continue right = k * gsize + gny - 1 left = (k + 1) * gsize for i in range(gnx): rmatrix[deriv, right + i * gny] = -1. rmatrix[deriv, left + i * gny] = 1. deriv += 1 rmatrix = rmatrix.tocsr() # Make the RB matrix because R is for the sources, B converts it to # coefficients. pergrid = ncoeffs(degree) ncoefs = len(grids) * pergrid fdmatrix = numpy.empty((nderivs, ncoefs), dtype=float) st = 0 for i, g in enumerate(grids): bk = _bkmatrix(g, degree) en = st + g.size fdmatrix[:, i*pergrid:(i + 1)*pergrid] = safe_dot(rmatrix[:, st:en], bk) st = en return fdmatrix
bsd-3-clause
gangadharkadam/vervefrappe
frappe/tests/__init__.py
70
1107
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals def insert_test_data(doctype, sort_fn=None): import frappe.model data = get_test_doclist(doctype) if sort_fn: data = sorted(data, key=sort_fn) for doclist in data: frappe.insert(doclist) def get_test_doclist(doctype, name=None): """get test doclist, collection of doclists""" import os, frappe from frappe import conf from frappe.modules.utils import peval_doclist from frappe.modules import scrub doctype = scrub(doctype) doctype_path = os.path.join(os.path.dirname(os.path.abspath(conf.__file__)), conf.test_data_path, doctype) if name: with open(os.path.join(doctype_path, scrub(name) + ".json"), 'r') as txtfile: doclist = peval_doclist(txtfile.read()) return doclist else: all_doclists = [] for fname in filter(lambda n: n.endswith(".json"), os.listdir(doctype_path)): with open(os.path.join(doctype_path, scrub(fname)), 'r') as txtfile: all_doclists.append(peval_doclist(txtfile.read())) return all_doclists
mit
naokits/adminkun_viewer_old
Server/gaeo/gaeo/view/helper/form.py
1
6971
# -*- coding: utf-8 -*- # # Copyright 2008 GAEO Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """" GAEO view form helpers """ def country_select(id = "", name = "", **opts): """ Create a select field that lists all country/region selection - id: the id of the select field - name: the name of the select field - opts: - class: class attribute - style: style attribute """ countries = ["Afghanistan","Åland Islands","Albania","Algeria","American Samoa","Andorra","Angola", "Anguilla","Antarctica","Antigua and Barbuda","Argentina","Armenia","Aruba","Australia","Austria", "Azerbaijan","Bahamas","Bahrain","Bangladesh","Barbados","Belarus","Belgium","Belize","Benin", "Bermuda","Bhutan","Bolivia","Bosnia and Herzegovina","Botswana","Bouvet Island","Brazil", "British Indian Ocean Territory","Brunei Darussalam","Bulgaria","Burkina Faso","Burundi","Cambodia", "Cameroon","Canada","Cape Verde","Cayman Islands","Central African Republic","Chad","Chile","China", "Christmas Island","Cocos (Keeling) Islands","Colombia","Comoros","Congo", "Congo, The Democratic Republic of the","Cook Islands","Costa Rica","Côte d'Ivoire","Croatia","Cuba", "Cyprus","Czech Republic","Denmark","Djibouti","Dominica","Dominican Republic","Ecuador","Egypt", "El Salvador","Equatorial Guinea","Eritrea","Estonia","Ethiopia","Falkland Islands (Malvinas)", "Faroe Islands","Fiji","Finland","France","French Guiana","French Polynesia", "French Southern Territories","Gabon","Gambia","Georgia","Germany","Ghana","Gibraltar","Greece","Greenland","Grenada","Guadeloupe","Guam","Guatemala","Guernsey","Guinea", "Guinea-Bissau","Guyana","Haiti","Heard Island and McDonald Islands","Holy See (Vatican City State)", "Honduras","Hong Kong","Hungary","Iceland","India","Indonesia","Iran, Islamic Republic of","Iraq", "Ireland","Isle of Man","Israel","Italy","Jamaica","Japan","Jersey","Jordan","Kazakhstan","Kenya", "Kiribati","Korea, Democratic People's Republic of","Korea, Republic of","Kuwait","Kyrgyzstan", "Lao People's Democratic Republic","Latvia","Lebanon","Lesotho","Liberia","Libyan Arab Jamahiriya", "Liechtenstein","Lithuania","Luxembourg","Macao","Macedonia, Republic of", "Madagascar","Malawi","Malaysia","Maldives","Mali","Malta","Marshall Islands","Martinique", "Mauritania","Mauritius","Mayotte","Mexico","Micronesia, Federated States of","Moldova", "Monaco","Mongolia","Montenegro","Montserrat","Morocco","Mozambique","Myanmar","Namibia","Nauru", "Nepal","Netherlands","Netherlands Antilles","New Caledonia","New Zealand","Nicaragua","Niger", "Nigeria","Niue","Norfolk Island","Northern Mariana Islands","Norway","Oman","Pakistan","Palau", "Palestinian Territory, Occupied","Panama","Papua New Guinea","Paraguay","Peru","Philippines", "Pitcairn","Poland","Portugal","Puerto Rico","Qatar","Reunion","Romania","Russian Federation", "Rwanda","Saint Barthélemy","Saint Helena","Saint Kitts and Nevis","Saint Lucia", "Saint Martin (French part)","Saint Pierre and Miquelon","Saint Vincent and the Grenadines","Samoa","San Marino", "Sao Tome and Principe","Saudi Arabia","Senegal","Serbia","Seychelles","Sierra Leone","Singapore", "Slovakia","Slovenia","Solomon Islands","Somalia","South Africa", "South Georgia and the South Sandwich Islands","Spain","Sri Lanka","Sudan","Suriname", "Svalbard and Jan Mayen","Swaziland","Sweden","Switzerland","Syrian Arab Republic", "Taiwan","Tajikistan","Tanzania, United Republic of","Thailand","Timor-Leste", "Togo","Tokelau","Tonga","Trinidad and Tobago","Tunisia","Turkey","Turkmenistan", "Turks and Caicos Islands","Tuvalu","Uganda","Ukraine","United Arab Emirates","United Kingdom", "United States","United States Minor Outlying Islands","Uruguay","Uzbekistan","Vanuatu","Venezuela", "Viet Nam","Virgin Islands, British","Virgin Islands, U.S.","Wallis and Futuna","Western Sahara", "Yemen","Zambia","Zimbabwe"] html = '<select id="%s" name="%s">' % (id, name) for c in countries: html += '<option>%s</option>' % c html += '</select>' return html def date_select(id = "", name = "", **opts): """ Create a date select fields """ import time now = time.localtime(time.time()) field = {} html = '' default_date = opts.get('default', (now[0], now[1], now[2])) # Year Field # start year start_year = opts.get('start_year', now[0]-5) end_year = opts.get('end_year', now[0]+5) field['year'] = '<select id="%s_y" name="%s[y]">\n' % (id, name) for y in range(start_year, end_year+1): if default_date[0] == y: field['year'] += '<option selected>%d</option>\n' % y else: field['year'] += '<option>%d</option>\n' % y field['year'] += '</select>\n' if opts.has_key('year_label'): field['year'] = '<label for="%s_y">%s</label> %s' % (id, opts['year_label'], field['year']) # Month Field months = opts.get('months', [1,2,3,4,5,6,7,8,9,10,11,12]) field['month'] = '<select id="%s_m" name="%s[m]">\n' % (id, name) for m in months: if default_date[1] == m: field['month'] += '<option selected>%d</option>\n' % m else: field['month'] += '<option>%d</option>\n' % m field['month'] += '</select>\n' if opts.has_key('month_label'): field['month'] = '<label for="%s_m">%s</label> %s' % (id, opts['month_label'], field['name']) # Date Field field['date'] = '<select id="%s_d" name="%s[d]">\n' % (id, name) for d in range(1, 32): if default_date[2] == d: field['date'] += '<option selected>%d</option>\n' % d else: field['date'] += '<option>%d</option>\n' % d field['date'] += '</select>\n' if opts.has_key('date_label'): field['date'] = '<label for="%s_d">%s</label> %s' % (id, opts['date_label'], field['date']) # field order field_order = opts.get('order', ['year', 'month', 'date']) for o in field_order: html += field[o] return html
mit
walac/build-mozharness
configs/luciddream/linux_config.py
1
1816
# This is a template config file for luciddream production. import os import platform HG_SHARE_BASE_DIR = "/builds/hg-shared" if platform.system().lower() == 'darwin': xre_url = "http://tooltool.pvt.build.mozilla.org/build/sha512/4d8d7a37d90c34a2a2fda3066a8fe85c189b183d05389cb957fc6fed31f10a6924e50c1b84488ff61c015293803f58a3aed5d4819374d04c8e0ee2b9e3997278" else: xre_url = "http://tooltool.pvt.build.mozilla.org/build/sha512/dc9503b21c87b5a469118746f99e4f41d73888972ce735fa10a80f6d218086da0e3da525d9a4cd8e4ea497ec199fef720e4a525873d77a1af304ac505e076462" config = { # mozharness script options "xre_url": xre_url, "b2gdesktop_url": "http://ftp.mozilla.org/pub/mozilla.org/b2g/nightly/2015/03/2015-03-09-00-25-06-mozilla-b2g37_v2_2/b2g-37.0.multi.linux-i686.tar.bz2", # mozharness configuration "vcs_share_base": HG_SHARE_BASE_DIR, "exes": { 'python': '/tools/buildbot/bin/python', 'virtualenv': ['/tools/buildbot/bin/python', '/tools/misc-python/virtualenv.py'], 'tooltool.py': "/tools/tooltool.py", 'gittool.py': '%(abs_tools_dir)s/buildfarm/utils/gittool.py', }, "find_links": [ "http://pypi.pvt.build.mozilla.org/pub", "http://pypi.pub.build.mozilla.org/pub", ], "pip_index": False, "buildbot_json_path": "buildprops.json", "default_blob_upload_servers": [ "https://blobupload.elasticbeanstalk.com", ], "blob_uploader_auth_file": os.path.join(os.getcwd(), "oauth.txt"), # will handle in-tree config as subsequent patch # "in_tree_config": "config/mozharness/luciddream.py", "download_symbols": "ondemand", "download_minidump_stackwalk": True, "tooltool_servers": ["http://tooltool.pvt.build.mozilla.org/build/"], "tooltool_cache": "/builds/tooltool_cache", }
mpl-2.0
shams169/pythonProject
ContactsDir/env/lib/python3.6/site-packages/pip/_vendor/requests/packages/chardet/chardistribution.py
2755
9226
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE, EUCTW_TYPICAL_DISTRIBUTION_RATIO) from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE, EUCKR_TYPICAL_DISTRIBUTION_RATIO) from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE, GB2312_TYPICAL_DISTRIBUTION_RATIO) from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE, BIG5_TYPICAL_DISTRIBUTION_RATIO) from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE, JIS_TYPICAL_DISTRIBUTION_RATIO) from .compat import wrap_ord ENOUGH_DATA_THRESHOLD = 1024 SURE_YES = 0.99 SURE_NO = 0.01 MINIMUM_DATA_THRESHOLD = 3 class CharDistributionAnalysis: def __init__(self): # Mapping table to get frequency order from char order (get from # GetOrder()) self._mCharToFreqOrder = None self._mTableSize = None # Size of above table # This is a constant value which varies from language to language, # used in calculating confidence. See # http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html # for further detail. self._mTypicalDistributionRatio = None self.reset() def reset(self): """reset analyser, clear any state""" # If this flag is set to True, detection is done and conclusion has # been made self._mDone = False self._mTotalChars = 0 # Total characters encountered # The number of characters whose frequency order is less than 512 self._mFreqChars = 0 def feed(self, aBuf, aCharLen): """feed a character with known length""" if aCharLen == 2: # we only care about 2-bytes character in our distribution analysis order = self.get_order(aBuf) else: order = -1 if order >= 0: self._mTotalChars += 1 # order is valid if order < self._mTableSize: if 512 > self._mCharToFreqOrder[order]: self._mFreqChars += 1 def get_confidence(self): """return confidence based on existing data""" # if we didn't receive any character in our consideration range, # return negative answer if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD: return SURE_NO if self._mTotalChars != self._mFreqChars: r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars) * self._mTypicalDistributionRatio)) if r < SURE_YES: return r # normalize confidence (we don't want to be 100% sure) return SURE_YES def got_enough_data(self): # It is not necessary to receive all data to draw conclusion. # For charset detection, certain amount of data is enough return self._mTotalChars > ENOUGH_DATA_THRESHOLD def get_order(self, aBuf): # We do not handle characters based on the original encoding string, # but convert this encoding string to a number, here called order. # This allows multiple encodings of a language to share one frequency # table. return -1 class EUCTWDistributionAnalysis(CharDistributionAnalysis): def __init__(self): CharDistributionAnalysis.__init__(self) self._mCharToFreqOrder = EUCTWCharToFreqOrder self._mTableSize = EUCTW_TABLE_SIZE self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO def get_order(self, aBuf): # for euc-TW encoding, we are interested # first byte range: 0xc4 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that first_char = wrap_ord(aBuf[0]) if first_char >= 0xC4: return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1 else: return -1 class EUCKRDistributionAnalysis(CharDistributionAnalysis): def __init__(self): CharDistributionAnalysis.__init__(self) self._mCharToFreqOrder = EUCKRCharToFreqOrder self._mTableSize = EUCKR_TABLE_SIZE self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO def get_order(self, aBuf): # for euc-KR encoding, we are interested # first byte range: 0xb0 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that first_char = wrap_ord(aBuf[0]) if first_char >= 0xB0: return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1 else: return -1 class GB2312DistributionAnalysis(CharDistributionAnalysis): def __init__(self): CharDistributionAnalysis.__init__(self) self._mCharToFreqOrder = GB2312CharToFreqOrder self._mTableSize = GB2312_TABLE_SIZE self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO def get_order(self, aBuf): # for GB2312 encoding, we are interested # first byte range: 0xb0 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1]) if (first_char >= 0xB0) and (second_char >= 0xA1): return 94 * (first_char - 0xB0) + second_char - 0xA1 else: return -1 class Big5DistributionAnalysis(CharDistributionAnalysis): def __init__(self): CharDistributionAnalysis.__init__(self) self._mCharToFreqOrder = Big5CharToFreqOrder self._mTableSize = BIG5_TABLE_SIZE self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO def get_order(self, aBuf): # for big5 encoding, we are interested # first byte range: 0xa4 -- 0xfe # second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe # no validation needed here. State machine has done that first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1]) if first_char >= 0xA4: if second_char >= 0xA1: return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63 else: return 157 * (first_char - 0xA4) + second_char - 0x40 else: return -1 class SJISDistributionAnalysis(CharDistributionAnalysis): def __init__(self): CharDistributionAnalysis.__init__(self) self._mCharToFreqOrder = JISCharToFreqOrder self._mTableSize = JIS_TABLE_SIZE self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO def get_order(self, aBuf): # for sjis encoding, we are interested # first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe # second byte range: 0x40 -- 0x7e, 0x81 -- oxfe # no validation needed here. State machine has done that first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1]) if (first_char >= 0x81) and (first_char <= 0x9F): order = 188 * (first_char - 0x81) elif (first_char >= 0xE0) and (first_char <= 0xEF): order = 188 * (first_char - 0xE0 + 31) else: return -1 order = order + second_char - 0x40 if second_char > 0x7F: order = -1 return order class EUCJPDistributionAnalysis(CharDistributionAnalysis): def __init__(self): CharDistributionAnalysis.__init__(self) self._mCharToFreqOrder = JISCharToFreqOrder self._mTableSize = JIS_TABLE_SIZE self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO def get_order(self, aBuf): # for euc-JP encoding, we are interested # first byte range: 0xa0 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that char = wrap_ord(aBuf[0]) if char >= 0xA0: return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1 else: return -1
mit
s3rvac/git-branch-viewer
docs/conf.py
1
8307
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # git-branch-viewer documentation build configuration file, created by # sphinx-quickstart on Wed May 7 15:45:21 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.dirname(os.getcwd())) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.coverage', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'git-branch-viewer' copyright = '2014, Petr Zemek' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'git-branch-viewerdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'git-branch-viewer.tex', 'git-branch-viewer Documentation', 'Petr Zemek', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'git-branch-viewer', 'git-branch-viewer Documentation', ['Petr Zemek'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'git-branch-viewer', 'git-branch-viewer Documentation', 'Petr Zemek', 'git-branch-viewer', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
bsd-3-clause
xiaoyaozi5566/GEM5_DRAMSim2
src/python/m5/util/region.py
64
9563
# Copyright (c) 2006 Nathan Binkert <nate@binkert.org> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. class _neg_inf(object): '''This object always compares less than any other object''' def __repr__(self): return '<neg_inf>' def __lt__(self, other): return type(self) != type(other) def __le__(self, other): return True def __gt__(self, other): return False def __ge__(self, other): return type(self) == type(other) def __eq__(self, other): return type(self) == type(other) def __ne__(self, other): return type(self) != type(other) neg_inf = _neg_inf() class _pos_inf(object): '''This object always compares greater than any other object''' def __repr__(self): return '<pos_inf>' def __lt__(self, other): return False def __le__(self, other): return type(self) == type(other) def __gt__(self, other): return type(self) != type(other) def __ge__(self, other): return True def __eq__(self, other): return type(self) == type(other) def __ne__(self, other): return type(self) != type(other) pos_inf = _pos_inf() class Region(tuple): '''A region (range) of [start, end). This includes utility functions to compare overlap of regions.''' def __new__(cls, *args): if len(args) == 1: arg = args[0] if isinstance(arg, Region): return arg args = tuple(arg) if len(args) != 2: raise AttributeError, \ "Only one or two arguments allowed, %d provided" % (alen, ) return tuple.__new__(cls, args) def __repr__(self): return 'Region(%s, %s)' % (self[0], self[1]) @property def start(self): return self[0] @property def end(self): return self[1] def __contains__(self, other): '''other is region: True if self and other is fully contained within self. pos: True if other is within the region''' if isinstance(other, tuple): return self[0] <= other[0] and self[1] >= other[1] return self[0] <= other and other < self[1] def __eq__(self, other): '''other is region: True if self and other are identical. pos: True if other is within the region''' if isinstance(other, tuple): return self[0] == other[0] and self[1] == other[1] return self[0] <= other and other < self[1] # @param self is a region. # @param other is a region. # @return if self and other are not identical. def __ne__(self, other): '''other is region: true if they are not identical pos: True if other is not in the region''' if isinstance(other, tuple): return self[0] != other[0] or self[1] != other[1] return other < self[0] or self[1] <= other # @param self is a region. # @param other is a region. # @return if self is less than other and does not overlap self. def __lt__(self, other): "self completely left of other (cannot overlap)" if isinstance(other, tuple): return self[1] <= other[0] return self[1] <= other # @param self is a region. # @param other is a region. # @return if self is less than other. self may overlap other, # but not extend beyond the _end of other. def __le__(self, other): "self extends to the left of other (can overlap)" if isinstance(other, tuple): return self[0] <= other[0] return self[0] <= other # @param self is a region. # @param other is a region. # @return if self is greater than other and does not overlap other. def __gt__(self, other): "self is completely right of other (cannot overlap)" if isinstance(other, tuple): return self[0] >= other[1] return self[0] > other # @param self is a region. # @param other is a region. # @return if self is greater than other. self may overlap other, # but not extend beyond the beginning of other. def __ge__(self, other): "self ex_ends beyond other to the right (can overlap)" if isinstance(other, tuple): return self[1] >= other[1] return self[1] > other class Regions(object): '''A set of regions (ranges). Basically a region with holes. Includes utility functions to merge regions and figure out if something is in one of the regions.''' def __init__(self, *args): self.regions = [] self.extend(*args) def copy(self): copy = Regions() copy.regions.extend(self.regions) return copy def append(self, *args): self.regions.append(Region(*args)) def extend(self, *args): self.regions.extend(Region(a) for a in args) def __contains__(self, position): for region in self.regions: if position in region: return True return False def __len__(self): return len(self.regions) def __iand__(self, other): A = self.regions B = other.regions R = [] i = 0 j = 0 while i < len(self) and j < len(other): a = A[i] b = B[j] if a[1] <= b[0]: # A is completely before B. Skip A i += 1 elif a[0] <= b[0]: if a[1] <= b[1]: # A and B overlap with B not left of A and A not right of B R.append(Region(b[0], a[1])) # Advance A because nothing is left i += 1 if a[1] == b[1]: # Advance B too j += 1 else: # A and B overlap with B completely within the bounds of A R.append(Region(b[0], b[1])) # Advance only B because some of A may still be useful j += 1 elif b[1] <= a[0]: # B is completely before A. Skip B. j += 1 else: assert b[0] < a[0] if b[1] <= a[1]: # A and B overlap with A not left of B and B not right of A R.append(Region(a[0], b[1])) # Advance B because nothing is left j += 1 if a[1] == b[1]: # Advance A too i += 1 else: # A and B overlap with A completely within the bounds of B R.append(Region(a[0], a[1])) # Advance only A because some of B may still be useful i += 1 self.regions = R return self def __and__(self, other): result = self.copy() result &= other return result def __repr__(self): return 'Regions(%s)' % ([(r[0], r[1]) for r in self.regions], ) if __name__ == '__main__': x = Regions(*((i, i + 1) for i in xrange(0,30,2))) y = Regions(*((i, i + 4) for i in xrange(0,30,5))) z = Region(6,7) n = Region(9,10) def test(left, right): print "%s == %s: %s" % (left, right, left == right) print "%s != %s: %s" % (left, right, left != right) print "%s < %s: %s" % (left, right, left < right) print "%s <= %s: %s" % (left, right, left <= right) print "%s > %s: %s" % (left, right, left > right) print "%s >= %s: %s" % (left, right, left >= right) print test(neg_inf, neg_inf) test(neg_inf, pos_inf) test(pos_inf, neg_inf) test(pos_inf, pos_inf) test(neg_inf, 0) test(neg_inf, -11111) test(neg_inf, 11111) test(0, neg_inf) test(-11111, neg_inf) test(11111, neg_inf) test(pos_inf, 0) test(pos_inf, -11111) test(pos_inf, 11111) test(0, pos_inf) test(-11111, pos_inf) test(11111, pos_inf) print x print y print x & y print z print 4 in x print 4 in z print 5 not in x print 6 not in z print z in y print n in y, n not in y
bsd-3-clause
Walt280/cosmos
code/string-algorithms/anagram_search/anagram_search.py
5
1209
""" Part of Cosmos by OpenGenus Foundation""" import collections def removeWhitespace(string): return ''.join(string.split()) """ Checks if two strings are anagrams of each other, ignoring any whitespace. First remove any whitespace and lower all characters of both strings. Then create dictionaries of the counts of every character in each string. As well as keep a set of all characters used in both strings. Check to ensure every unique character are used in both strings the same number of times. """ def isAnagram(string1, string2): charCount1 = collections.Counter(removeWhitespace(string1.lower())) charCount2 = collections.Counter(removeWhitespace(string2.lower())) allChars = set(charCount1.keys()) allChars = allChars.union(charCount2.keys()) for c in allChars: if (charCount1[c] != charCount2[c]): return False return True assert isAnagram("anagram", "not a gram") == False assert isAnagram("anagram", "na a marg") == True assert isAnagram("William Shakespeare", "I am \t a weakish speller") == True assert isAnagram("Madam Curie", "Radium came") == True assert isAnagram("notagram", "notaflam") == False
gpl-3.0
malept/pyoath-toolkit
oath_toolkit/django_otp/hotp/tests.py
1
2720
# -*- coding: utf-8 -*- # # Originally from django-otp 0.2.2: # * django_otp/plugins/otp_hotp/tests.py # # Copyright (c) 2012, Peter Sagerson # Copyright (c) 2014, 2015, Mark Lee # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # - Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # - Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from django.db import IntegrityError from django_otp.tests import TestCase class HOTPTest(TestCase): # The next three tokens tokens = [b'782373', b'313268', b'307722'] def setUp(self): try: alice = self.create_user('alice', 'password') except IntegrityError: # pragma: no cover self.skipTest("Unable to create test user.") else: self.device = alice.otoolkithotpdevice_set.create( secret_hex=b'd2e8a68036f68960b1c30532bb6c56da5934d879', digits=6, window=1, counter=0) def assert_token_verified(self, token, count): self.assertTrue(self.device.verify_token(token)) self.assertEqual(self.device.counter, count) def assert_token_not_verified(self, token): self.assertFalse(self.device.verify_token(token)) self.assertEqual(self.device.counter, 0) def test_normal(self): self.assert_token_verified(self.tokens[0], 1) def test_normal_drift(self): self.assert_token_verified(self.tokens[1], 2) def test_excessive_drift(self): self.assert_token_not_verified(self.tokens[2]) def test_bad_value(self): self.assert_token_not_verified(b'123456')
apache-2.0
mogoweb/webkit_for_android5.1
webkit/Tools/Scripts/webkitpy/tool/commands/upload.py
15
22489
#!/usr/bin/env python # Copyright (c) 2009, 2010 Google Inc. All rights reserved. # Copyright (c) 2009 Apple Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import re import sys from optparse import make_option from webkitpy.tool import steps from webkitpy.common.config.committers import CommitterList from webkitpy.common.net.bugzilla import parse_bug_id_from_changelog from webkitpy.common.system.deprecated_logging import error, log from webkitpy.common.system.user import User from webkitpy.thirdparty.mock import Mock from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand from webkitpy.tool.comments import bug_comment_from_svn_revision from webkitpy.tool.grammar import pluralize, join_with_separators from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand class CommitMessageForCurrentDiff(AbstractDeclarativeCommand): name = "commit-message" help_text = "Print a commit message suitable for the uncommitted changes" def __init__(self): options = [ steps.Options.git_commit, ] AbstractDeclarativeCommand.__init__(self, options=options) def execute(self, options, args, tool): # This command is a useful test to make sure commit_message_for_this_commit # always returns the right value regardless of the current working directory. print "%s" % tool.checkout().commit_message_for_this_commit(options.git_commit).message() class CleanPendingCommit(AbstractDeclarativeCommand): name = "clean-pending-commit" help_text = "Clear r+ on obsolete patches so they do not appear in the pending-commit list." # NOTE: This was designed to be generic, but right now we're only processing patches from the pending-commit list, so only r+ matters. def _flags_to_clear_on_patch(self, patch): if not patch.is_obsolete(): return None what_was_cleared = [] if patch.review() == "+": if patch.reviewer(): what_was_cleared.append("%s's review+" % patch.reviewer().full_name) else: what_was_cleared.append("review+") return join_with_separators(what_was_cleared) def execute(self, options, args, tool): committers = CommitterList() for bug_id in tool.bugs.queries.fetch_bug_ids_from_pending_commit_list(): bug = self._tool.bugs.fetch_bug(bug_id) patches = bug.patches(include_obsolete=True) for patch in patches: flags_to_clear = self._flags_to_clear_on_patch(patch) if not flags_to_clear: continue message = "Cleared %s from obsolete attachment %s so that this bug does not appear in http://webkit.org/pending-commit." % (flags_to_clear, patch.id()) self._tool.bugs.obsolete_attachment(patch.id(), message) # FIXME: This should be share more logic with AssignToCommitter and CleanPendingCommit class CleanReviewQueue(AbstractDeclarativeCommand): name = "clean-review-queue" help_text = "Clear r? on obsolete patches so they do not appear in the pending-commit list." def execute(self, options, args, tool): queue_url = "http://webkit.org/pending-review" # We do this inefficient dance to be more like webkit.org/pending-review # bugs.queries.fetch_bug_ids_from_review_queue() doesn't return # closed bugs, but folks using /pending-review will see them. :( for patch_id in tool.bugs.queries.fetch_attachment_ids_from_review_queue(): patch = self._tool.bugs.fetch_attachment(patch_id) if not patch.review() == "?": continue attachment_obsolete_modifier = "" if patch.is_obsolete(): attachment_obsolete_modifier = "obsolete " elif patch.bug().is_closed(): bug_closed_explanation = " If you would like this patch reviewed, please attach it to a new bug (or re-open this bug before marking it for review again)." else: # Neither the patch was obsolete or the bug was closed, next patch... continue message = "Cleared review? from %sattachment %s so that this bug does not appear in %s.%s" % (attachment_obsolete_modifier, patch.id(), queue_url, bug_closed_explanation) self._tool.bugs.obsolete_attachment(patch.id(), message) class AssignToCommitter(AbstractDeclarativeCommand): name = "assign-to-committer" help_text = "Assign bug to whoever attached the most recent r+'d patch" def _patches_have_commiters(self, reviewed_patches): for patch in reviewed_patches: if not patch.committer(): return False return True def _assign_bug_to_last_patch_attacher(self, bug_id): committers = CommitterList() bug = self._tool.bugs.fetch_bug(bug_id) if not bug.is_unassigned(): assigned_to_email = bug.assigned_to_email() log("Bug %s is already assigned to %s (%s)." % (bug_id, assigned_to_email, committers.committer_by_email(assigned_to_email))) return reviewed_patches = bug.reviewed_patches() if not reviewed_patches: log("Bug %s has no non-obsolete patches, ignoring." % bug_id) return # We only need to do anything with this bug if one of the r+'d patches does not have a valid committer (cq+ set). if self._patches_have_commiters(reviewed_patches): log("All reviewed patches on bug %s already have commit-queue+, ignoring." % bug_id) return latest_patch = reviewed_patches[-1] attacher_email = latest_patch.attacher_email() committer = committers.committer_by_email(attacher_email) if not committer: log("Attacher %s is not a committer. Bug %s likely needs commit-queue+." % (attacher_email, bug_id)) return reassign_message = "Attachment %s was posted by a committer and has review+, assigning to %s for commit." % (latest_patch.id(), committer.full_name) self._tool.bugs.reassign_bug(bug_id, committer.bugzilla_email(), reassign_message) def execute(self, options, args, tool): for bug_id in tool.bugs.queries.fetch_bug_ids_from_pending_commit_list(): self._assign_bug_to_last_patch_attacher(bug_id) class ObsoleteAttachments(AbstractSequencedCommand): name = "obsolete-attachments" help_text = "Mark all attachments on a bug as obsolete" argument_names = "BUGID" steps = [ steps.ObsoletePatches, ] def _prepare_state(self, options, args, tool): return { "bug_id" : args[0] } class AttachToBug(AbstractSequencedCommand): name = "attach-to-bug" help_text = "Attach the the file to the bug" argument_names = "BUGID FILEPATH" steps = [ steps.AttachToBug, ] def _prepare_state(self, options, args, tool): state = {} state["bug_id"] = args[0] state["filepath"] = args[1] return state class AbstractPatchUploadingCommand(AbstractSequencedCommand): def _bug_id(self, options, args, tool, state): # Perfer a bug id passed as an argument over a bug url in the diff (i.e. ChangeLogs). bug_id = args and args[0] if not bug_id: changed_files = self._tool.scm().changed_files(options.git_commit) state["changed_files"] = changed_files bug_id = tool.checkout().bug_id_for_this_commit(options.git_commit, changed_files) return bug_id def _prepare_state(self, options, args, tool): state = {} state["bug_id"] = self._bug_id(options, args, tool, state) if not state["bug_id"]: error("No bug id passed and no bug url found in ChangeLogs.") return state class Post(AbstractPatchUploadingCommand): name = "post" help_text = "Attach the current working directory diff to a bug as a patch file" argument_names = "[BUGID]" steps = [ steps.ValidateChangeLogs, steps.CheckStyle, steps.ConfirmDiff, steps.ObsoletePatches, steps.SuggestReviewers, steps.PostDiff, ] class LandSafely(AbstractPatchUploadingCommand): name = "land-safely" help_text = "Land the current diff via the commit-queue" argument_names = "[BUGID]" long_help = """land-safely updates the ChangeLog with the reviewer listed in bugs.webkit.org for BUGID (or the bug ID detected from the ChangeLog). The command then uploads the current diff to the bug and marks it for commit by the commit-queue.""" show_in_main_help = True steps = [ steps.UpdateChangeLogsWithReviewer, steps.ValidateChangeLogs, steps.ObsoletePatches, steps.PostDiffForCommit, ] class Prepare(AbstractSequencedCommand): name = "prepare" help_text = "Creates a bug (or prompts for an existing bug) and prepares the ChangeLogs" argument_names = "[BUGID]" steps = [ steps.PromptForBugOrTitle, steps.CreateBug, steps.PrepareChangeLog, ] def _prepare_state(self, options, args, tool): bug_id = args and args[0] return { "bug_id" : bug_id } class Upload(AbstractPatchUploadingCommand): name = "upload" help_text = "Automates the process of uploading a patch for review" argument_names = "[BUGID]" show_in_main_help = True steps = [ steps.ValidateChangeLogs, steps.CheckStyle, steps.PromptForBugOrTitle, steps.CreateBug, steps.PrepareChangeLog, steps.EditChangeLog, steps.ConfirmDiff, steps.ObsoletePatches, steps.SuggestReviewers, steps.PostDiff, ] long_help = """upload uploads the current diff to bugs.webkit.org. If no bug id is provided, upload will create a bug. If the current diff does not have a ChangeLog, upload will prepare a ChangeLog. Once a patch is read, upload will open the ChangeLogs for editing using the command in the EDITOR environment variable and will display the diff using the command in the PAGER environment variable.""" def _prepare_state(self, options, args, tool): state = {} state["bug_id"] = self._bug_id(options, args, tool, state) return state class EditChangeLogs(AbstractSequencedCommand): name = "edit-changelogs" help_text = "Opens modified ChangeLogs in $EDITOR" show_in_main_help = True steps = [ steps.EditChangeLog, ] class PostCommits(AbstractDeclarativeCommand): name = "post-commits" help_text = "Attach a range of local commits to bugs as patch files" argument_names = "COMMITISH" def __init__(self): options = [ make_option("-b", "--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log."), make_option("--add-log-as-comment", action="store_true", dest="add_log_as_comment", default=False, help="Add commit log message as a comment when uploading the patch."), make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: description from commit message)"), steps.Options.obsolete_patches, steps.Options.review, steps.Options.request_commit, ] AbstractDeclarativeCommand.__init__(self, options=options, requires_local_commits=True) def _comment_text_for_commit(self, options, commit_message, tool, commit_id): comment_text = None if (options.add_log_as_comment): comment_text = commit_message.body(lstrip=True) comment_text += "---\n" comment_text += tool.scm().files_changed_summary_for_commit(commit_id) return comment_text def execute(self, options, args, tool): commit_ids = tool.scm().commit_ids_from_commitish_arguments(args) if len(commit_ids) > 10: # We could lower this limit, 10 is too many for one bug as-is. error("webkit-patch does not support attaching %s at once. Are you sure you passed the right commit range?" % (pluralize("patch", len(commit_ids)))) have_obsoleted_patches = set() for commit_id in commit_ids: commit_message = tool.scm().commit_message_for_local_commit(commit_id) # Prefer --bug-id=, then a bug url in the commit message, then a bug url in the entire commit diff (i.e. ChangeLogs). bug_id = options.bug_id or parse_bug_id_from_changelog(commit_message.message()) or parse_bug_id_from_changelog(tool.scm().create_patch(git_commit=commit_id)) if not bug_id: log("Skipping %s: No bug id found in commit or specified with --bug-id." % commit_id) continue if options.obsolete_patches and bug_id not in have_obsoleted_patches: state = { "bug_id": bug_id } steps.ObsoletePatches(tool, options).run(state) have_obsoleted_patches.add(bug_id) diff = tool.scm().create_patch(git_commit=commit_id) description = options.description or commit_message.description(lstrip=True, strip_url=True) comment_text = self._comment_text_for_commit(options, commit_message, tool, commit_id) tool.bugs.add_patch_to_bug(bug_id, diff, description, comment_text, mark_for_review=options.review, mark_for_commit_queue=options.request_commit) # FIXME: This command needs to be brought into the modern age with steps and CommitInfo. class MarkBugFixed(AbstractDeclarativeCommand): name = "mark-bug-fixed" help_text = "Mark the specified bug as fixed" argument_names = "[SVN_REVISION]" def __init__(self): options = [ make_option("--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log."), make_option("--comment", action="store", type="string", dest="comment", help="Text to include in bug comment."), make_option("--open", action="store_true", default=False, dest="open_bug", help="Open bug in default web browser (Mac only)."), make_option("--update-only", action="store_true", default=False, dest="update_only", help="Add comment to the bug, but do not close it."), ] AbstractDeclarativeCommand.__init__(self, options=options) # FIXME: We should be using checkout().changelog_entries_for_revision(...) instead here. def _fetch_commit_log(self, tool, svn_revision): if not svn_revision: return tool.scm().last_svn_commit_log() return tool.scm().svn_commit_log(svn_revision) def _determine_bug_id_and_svn_revision(self, tool, bug_id, svn_revision): commit_log = self._fetch_commit_log(tool, svn_revision) if not bug_id: bug_id = parse_bug_id_from_changelog(commit_log) if not svn_revision: match = re.search("^r(?P<svn_revision>\d+) \|", commit_log, re.MULTILINE) if match: svn_revision = match.group('svn_revision') if not bug_id or not svn_revision: not_found = [] if not bug_id: not_found.append("bug id") if not svn_revision: not_found.append("svn revision") error("Could not find %s on command-line or in %s." % (" or ".join(not_found), "r%s" % svn_revision if svn_revision else "last commit")) return (bug_id, svn_revision) def execute(self, options, args, tool): bug_id = options.bug_id svn_revision = args and args[0] if svn_revision: if re.match("^r[0-9]+$", svn_revision, re.IGNORECASE): svn_revision = svn_revision[1:] if not re.match("^[0-9]+$", svn_revision): error("Invalid svn revision: '%s'" % svn_revision) needs_prompt = False if not bug_id or not svn_revision: needs_prompt = True (bug_id, svn_revision) = self._determine_bug_id_and_svn_revision(tool, bug_id, svn_revision) log("Bug: <%s> %s" % (tool.bugs.bug_url_for_bug_id(bug_id), tool.bugs.fetch_bug_dictionary(bug_id)["title"])) log("Revision: %s" % svn_revision) if options.open_bug: tool.user.open_url(tool.bugs.bug_url_for_bug_id(bug_id)) if needs_prompt: if not tool.user.confirm("Is this correct?"): exit(1) bug_comment = bug_comment_from_svn_revision(svn_revision) if options.comment: bug_comment = "%s\n\n%s" % (options.comment, bug_comment) if options.update_only: log("Adding comment to Bug %s." % bug_id) tool.bugs.post_comment_to_bug(bug_id, bug_comment) else: log("Adding comment to Bug %s and marking as Resolved/Fixed." % bug_id) tool.bugs.close_bug_as_fixed(bug_id, bug_comment) # FIXME: Requires unit test. Blocking issue: too complex for now. class CreateBug(AbstractDeclarativeCommand): name = "create-bug" help_text = "Create a bug from local changes or local commits" argument_names = "[COMMITISH]" def __init__(self): options = [ steps.Options.cc, steps.Options.component, make_option("--no-prompt", action="store_false", dest="prompt", default=True, help="Do not prompt for bug title and comment; use commit log instead."), make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review."), make_option("--request-commit", action="store_true", dest="request_commit", default=False, help="Mark the patch as needing auto-commit after review."), ] AbstractDeclarativeCommand.__init__(self, options=options) def create_bug_from_commit(self, options, args, tool): commit_ids = tool.scm().commit_ids_from_commitish_arguments(args) if len(commit_ids) > 3: error("Are you sure you want to create one bug with %s patches?" % len(commit_ids)) commit_id = commit_ids[0] bug_title = "" comment_text = "" if options.prompt: (bug_title, comment_text) = self.prompt_for_bug_title_and_comment() else: commit_message = tool.scm().commit_message_for_local_commit(commit_id) bug_title = commit_message.description(lstrip=True, strip_url=True) comment_text = commit_message.body(lstrip=True) comment_text += "---\n" comment_text += tool.scm().files_changed_summary_for_commit(commit_id) diff = tool.scm().create_patch(git_commit=commit_id) bug_id = tool.bugs.create_bug(bug_title, comment_text, options.component, diff, "Patch", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit) if bug_id and len(commit_ids) > 1: options.bug_id = bug_id options.obsolete_patches = False # FIXME: We should pass through --no-comment switch as well. PostCommits.execute(self, options, commit_ids[1:], tool) def create_bug_from_patch(self, options, args, tool): bug_title = "" comment_text = "" if options.prompt: (bug_title, comment_text) = self.prompt_for_bug_title_and_comment() else: commit_message = tool.checkout().commit_message_for_this_commit(options.git_commit) bug_title = commit_message.description(lstrip=True, strip_url=True) comment_text = commit_message.body(lstrip=True) diff = tool.scm().create_patch(options.git_commit) bug_id = tool.bugs.create_bug(bug_title, comment_text, options.component, diff, "Patch", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit) def prompt_for_bug_title_and_comment(self): bug_title = User.prompt("Bug title: ") print "Bug comment (hit ^D on blank line to end):" lines = sys.stdin.readlines() try: sys.stdin.seek(0, os.SEEK_END) except IOError: # Cygwin raises an Illegal Seek (errno 29) exception when the above # seek() call is made. Ignoring it seems to cause no harm. # FIXME: Figure out a way to get avoid the exception in the first # place. pass comment_text = "".join(lines) return (bug_title, comment_text) def execute(self, options, args, tool): if len(args): if (not tool.scm().supports_local_commits()): error("Extra arguments not supported; patch is taken from working directory.") self.create_bug_from_commit(options, args, tool) else: self.create_bug_from_patch(options, args, tool)
apache-2.0
cgundogan/RIOT
tests/pbkdf2/tests/test_base.py
7
1148
# Copyright (C) 2019 Freie Universität Berlin # # This file is subject to the terms and conditions of the GNU Lesser # General Public License v2.1. See the file LICENSE in the top level # directory for more details. # # Author: Juan Carrano <j.carrano@fu-berlin.de> import sys import base64 from functools import partial from testrunner import run MAX_LINE = 128 def safe_encode(data): """Empty lines will confuse the target, replace them with padding.""" return base64.b64encode(data).decode('ascii') if data else "" def test(vectors, child): def _safe_expect_exact(s): idx = child.expect_exact([s+'\r\n', '{error}\r\n']) assert idx == 0 return idx def _safe_sendline(line): assert len(line) < MAX_LINE _safe_expect_exact('{ready}') child.sendline(line) for passwd, salt, iters, key in vectors: _safe_sendline(passwd) _safe_sendline(safe_encode(salt)) _safe_sendline(str(iters)) expected_key = base64.b64encode(key).decode('ascii') _safe_expect_exact(expected_key) def main(vectors): sys.exit(run(partial(test, vectors)))
lgpl-2.1
noname007/SublimeJEDI
jedi/evaluate/compiled/__init__.py
33
16590
""" Imitate the parser representation. """ import inspect import re import sys import os from functools import partial from jedi._compatibility import builtins as _builtins, unicode from jedi import debug from jedi.cache import underscore_memoization, memoize_method from jedi.evaluate.sys_path import get_sys_path from jedi.parser.tree import Param, Base, Operator, zero_position_modifier from jedi.evaluate.helpers import FakeName from . import fake _sep = os.path.sep if os.path.altsep is not None: _sep += os.path.altsep _path_re = re.compile('(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep))) del _sep class CheckAttribute(object): """Raises an AttributeError if the attribute X isn't available.""" def __init__(self, func): self.func = func # Remove the py in front of e.g. py__call__. self.check_name = func.__name__[2:] def __get__(self, instance, owner): # This might raise an AttributeError. That's wanted. getattr(instance.obj, self.check_name) return partial(self.func, instance) class CompiledObject(Base): # comply with the parser start_pos = 0, 0 path = None # modules have this attribute - set it to None. used_names = {} # To be consistent with modules. def __init__(self, obj, parent=None): self.obj = obj self.parent = parent @property def py__call__(self): def actual(evaluator, params): if inspect.isclass(self.obj): from jedi.evaluate.representation import Instance return [Instance(evaluator, self, params)] else: return list(self._execute_function(evaluator, params)) # Might raise an AttributeError, which is intentional. self.obj.__call__ return actual @CheckAttribute def py__class__(self, evaluator): return CompiledObject(self.obj.__class__, parent=self.parent) @CheckAttribute def py__mro__(self, evaluator): return tuple(create(evaluator, cls, self.parent) for cls in self.obj.__mro__) @CheckAttribute def py__bases__(self, evaluator): return tuple(create(evaluator, cls) for cls in self.obj.__bases__) def py__bool__(self): return bool(self.obj) def py__file__(self): return self.obj.__file__ def is_class(self): return inspect.isclass(self.obj) @property def doc(self): return inspect.getdoc(self.obj) or '' @property def params(self): params_str, ret = self._parse_function_doc() tokens = params_str.split(',') if inspect.ismethoddescriptor(self._cls().obj): tokens.insert(0, 'self') params = [] for p in tokens: parts = [FakeName(part) for part in p.strip().split('=')] if len(parts) > 1: parts.insert(1, Operator(zero_position_modifier, '=', (0, 0))) params.append(Param(parts, self)) return params def __repr__(self): return '<%s: %s>' % (type(self).__name__, repr(self.obj)) @underscore_memoization def _parse_function_doc(self): if self.doc is None: return '', '' return _parse_function_doc(self.doc) def api_type(self): if fake.is_class_instance(self.obj): return 'instance' cls = self._cls().obj if inspect.isclass(cls): return 'class' elif inspect.ismodule(cls): return 'module' elif inspect.isbuiltin(cls) or inspect.ismethod(cls) \ or inspect.ismethoddescriptor(cls): return 'function' @property def type(self): """Imitate the tree.Node.type values.""" cls = self._cls().obj if inspect.isclass(cls): return 'classdef' elif inspect.ismodule(cls): return 'file_input' elif inspect.isbuiltin(cls) or inspect.ismethod(cls) \ or inspect.ismethoddescriptor(cls): return 'funcdef' @underscore_memoization def _cls(self): # Ensures that a CompiledObject is returned that is not an instance (like list) if fake.is_class_instance(self.obj): try: c = self.obj.__class__ except AttributeError: # happens with numpy.core.umath._UFUNC_API (you get it # automatically by doing `import numpy`. c = type(None) return CompiledObject(c, self.parent) return self @property def names_dict(self): # For compatibility with `representation.Class`. return self.names_dicts(False)[0] def names_dicts(self, search_global, is_instance=False): return self._names_dict_ensure_one_dict(is_instance) @memoize_method def _names_dict_ensure_one_dict(self, is_instance): """ search_global shouldn't change the fact that there's one dict, this way there's only one `object`. """ return [LazyNamesDict(self._cls(), is_instance)] def get_subscope_by_name(self, name): if name in dir(self._cls().obj): return CompiledName(self._cls(), name).parent else: raise KeyError("CompiledObject doesn't have an attribute '%s'." % name) def get_index_types(self, evaluator, index_array=()): # If the object doesn't have `__getitem__`, just raise the # AttributeError. if not hasattr(self.obj, '__getitem__'): debug.warning('Tried to call __getitem__ on non-iterable.') return [] if type(self.obj) not in (str, list, tuple, unicode, bytes, bytearray, dict): # Get rid of side effects, we won't call custom `__getitem__`s. return [] result = [] from jedi.evaluate.iterable import create_indexes_or_slices for typ in create_indexes_or_slices(evaluator, index_array): index = None try: index = typ.obj new = self.obj[index] except (KeyError, IndexError, TypeError, AttributeError): # Just try, we don't care if it fails, except for slices. if isinstance(index, slice): result.append(self) else: result.append(CompiledObject(new)) if not result: try: for obj in self.obj: result.append(CompiledObject(obj)) except TypeError: pass # self.obj maynot have an __iter__ method. return result @property def name(self): # might not exist sometimes (raises AttributeError) return FakeName(self._cls().obj.__name__, self) def _execute_function(self, evaluator, params): if self.type != 'funcdef': return for name in self._parse_function_doc()[1].split(): try: bltn_obj = _create_from_name(builtin, builtin, name) except AttributeError: continue else: if isinstance(bltn_obj, CompiledObject) and bltn_obj.obj is None: # We want everything except None. continue for result in evaluator.execute(bltn_obj, params): yield result @property @underscore_memoization def subscopes(self): """ Returns only the faked scopes - the other ones are not important for internal analysis. """ module = self.get_parent_until() faked_subscopes = [] for name in dir(self._cls().obj): f = fake.get_faked(module.obj, self.obj, name) if f: f.parent = self faked_subscopes.append(f) return faked_subscopes def is_scope(self): return True def get_self_attributes(self): return [] # Instance compatibility def get_imports(self): return [] # Builtins don't have imports class LazyNamesDict(object): """ A names_dict instance for compiled objects, resembles the parser.tree. """ def __init__(self, compiled_obj, is_instance): self._compiled_obj = compiled_obj self._is_instance = is_instance def __iter__(self): return (v[0].value for v in self.values()) @memoize_method def __getitem__(self, name): try: getattr(self._compiled_obj.obj, name) except AttributeError: raise KeyError('%s in %s not found.' % (name, self._compiled_obj)) return [CompiledName(self._compiled_obj, name)] def values(self): obj = self._compiled_obj.obj values = [] for name in dir(obj): try: values.append(self[name]) except KeyError: # The dir function can be wrong. pass # dir doesn't include the type names. if not inspect.ismodule(obj) and obj != type and not self._is_instance: values += _type_names_dict.values() return values class CompiledName(FakeName): def __init__(self, obj, name): super(CompiledName, self).__init__(name) self._obj = obj self.name = name def __repr__(self): try: name = self._obj.name # __name__ is not defined all the time except AttributeError: name = None return '<%s: (%s).%s>' % (type(self).__name__, name, self.name) def is_definition(self): return True @property @underscore_memoization def parent(self): module = self._obj.get_parent_until() return _create_from_name(module, self._obj, self.name) @parent.setter def parent(self, value): pass # Just ignore this, FakeName tries to overwrite the parent attribute. def dotted_from_fs_path(fs_path, sys_path=None): """ Changes `/usr/lib/python3.4/email/utils.py` to `email.utils`. I.e. compares the path with sys.path and then returns the dotted_path. If the path is not in the sys.path, just returns None. """ if sys_path is None: sys_path = get_sys_path() if os.path.basename(fs_path).startswith('__init__.'): # We are calculating the path. __init__ files are not interesting. fs_path = os.path.dirname(fs_path) # prefer # - UNIX # /path/to/pythonX.Y/lib-dynload # /path/to/pythonX.Y/site-packages # - Windows # C:\path\to\DLLs # C:\path\to\Lib\site-packages # over # - UNIX # /path/to/pythonX.Y # - Windows # C:\path\to\Lib path = '' for s in sys_path: if (fs_path.startswith(s) and len(path) < len(s)): path = s return _path_re.sub('', fs_path[len(path):].lstrip(os.path.sep)).replace(os.path.sep, '.') def load_module(path=None, name=None): if path is not None: dotted_path = dotted_from_fs_path(path) else: dotted_path = name sys_path = get_sys_path() if dotted_path is None: p, _, dotted_path = path.partition(os.path.sep) sys_path.insert(0, p) temp, sys.path = sys.path, sys_path try: __import__(dotted_path) except RuntimeError: if 'PySide' in dotted_path or 'PyQt' in dotted_path: # RuntimeError: the PyQt4.QtCore and PyQt5.QtCore modules both wrap # the QObject class. # See https://github.com/davidhalter/jedi/pull/483 return None raise except ImportError: # If a module is "corrupt" or not really a Python module or whatever. debug.warning('Module %s not importable.', path) return None finally: sys.path = temp # Just access the cache after import, because of #59 as well as the very # complicated import structure of Python. module = sys.modules[dotted_path] return CompiledObject(module) docstr_defaults = { 'floating point number': 'float', 'character': 'str', 'integer': 'int', 'dictionary': 'dict', 'string': 'str', } def _parse_function_doc(doc): """ Takes a function and returns the params and return value as a tuple. This is nothing more than a docstring parser. TODO docstrings like utime(path, (atime, mtime)) and a(b [, b]) -> None TODO docstrings like 'tuple of integers' """ # parse round parentheses: def func(a, (b,c)) try: count = 0 start = doc.index('(') for i, s in enumerate(doc[start:]): if s == '(': count += 1 elif s == ')': count -= 1 if count == 0: end = start + i break param_str = doc[start + 1:end] except (ValueError, UnboundLocalError): # ValueError for doc.index # UnboundLocalError for undefined end in last line debug.dbg('no brackets found - no param') end = 0 param_str = '' else: # remove square brackets, that show an optional param ( = None) def change_options(m): args = m.group(1).split(',') for i, a in enumerate(args): if a and '=' not in a: args[i] += '=None' return ','.join(args) while True: param_str, changes = re.subn(r' ?\[([^\[\]]+)\]', change_options, param_str) if changes == 0: break param_str = param_str.replace('-', '_') # see: isinstance.__doc__ # parse return value r = re.search('-[>-]* ', doc[end:end + 7]) if r is None: ret = '' else: index = end + r.end() # get result type, which can contain newlines pattern = re.compile(r'(,\n|[^\n-])+') ret_str = pattern.match(doc, index).group(0).strip() # New object -> object() ret_str = re.sub(r'[nN]ew (.*)', r'\1()', ret_str) ret = docstr_defaults.get(ret_str, ret_str) return param_str, ret class Builtin(CompiledObject): @memoize_method def get_by_name(self, name): return self.names_dict[name][0].parent def _a_generator(foo): """Used to have an object to return for generators.""" yield 42 yield foo def _create_from_name(module, parent, name): faked = fake.get_faked(module.obj, parent.obj, name) # only functions are necessary. if faked is not None: faked.parent = parent return faked try: obj = getattr(parent.obj, name) except AttributeError: # happens e.g. in properties of # PyQt4.QtGui.QStyleOptionComboBox.currentText # -> just set it to None obj = None return CompiledObject(obj, parent) builtin = Builtin(_builtins) magic_function_class = CompiledObject(type(load_module), parent=builtin) generator_obj = CompiledObject(_a_generator(1.0)) _type_names_dict = builtin.get_by_name('type').names_dict none_obj = builtin.get_by_name('None') false_obj = builtin.get_by_name('False') true_obj = builtin.get_by_name('True') object_obj = builtin.get_by_name('object') def keyword_from_value(obj): if obj is None: return none_obj elif obj is False: return false_obj elif obj is True: return true_obj else: raise NotImplementedError def compiled_objects_cache(func): def wrapper(evaluator, obj, parent=builtin, module=None): # Do a very cheap form of caching here. key = id(obj), id(parent), id(module) try: return evaluator.compiled_cache[key][0] except KeyError: result = func(evaluator, obj, parent, module) # Need to cache all of them, otherwise the id could be overwritten. evaluator.compiled_cache[key] = result, obj, parent, module return result return wrapper @compiled_objects_cache def create(evaluator, obj, parent=builtin, module=None): """ A very weird interface class to this module. The more options provided the more acurate loading compiled objects is. """ if not inspect.ismodule(obj): faked = fake.get_faked(module and module.obj, obj) if faked is not None: faked.parent = parent return faked try: if parent == builtin and obj.__module__ in ('builtins', '__builtin__'): return builtin.get_by_name(obj.__name__) except AttributeError: pass return CompiledObject(obj, parent)
mit
dvliman/jaikuengine
.google_appengine/lib/django-1.4/django/db/models/fields/subclassing.py
104
1819
""" Convenience routines for creating non-trivial Field subclasses, as well as backwards compatibility utilities. Add SubfieldBase as the __metaclass__ for your Field subclass, implement to_python() and the other necessary methods and everything will work seamlessly. """ class SubfieldBase(type): """ A metaclass for custom Field subclasses. This ensures the model's attribute has the descriptor protocol attached to it. """ def __new__(cls, name, bases, attrs): new_class = super(SubfieldBase, cls).__new__(cls, name, bases, attrs) new_class.contribute_to_class = make_contrib( new_class, attrs.get('contribute_to_class') ) return new_class class Creator(object): """ A placeholder class that provides a way to set the attribute on the model. """ def __init__(self, field): self.field = field def __get__(self, obj, type=None): if obj is None: raise AttributeError('Can only be accessed via an instance.') return obj.__dict__[self.field.name] def __set__(self, obj, value): obj.__dict__[self.field.name] = self.field.to_python(value) def make_contrib(superclass, func=None): """ Returns a suitable contribute_to_class() method for the Field subclass. If 'func' is passed in, it is the existing contribute_to_class() method on the subclass and it is called before anything else. It is assumed in this case that the existing contribute_to_class() calls all the necessary superclass methods. """ def contribute_to_class(self, cls, name): if func: func(self, cls, name) else: super(superclass, self).contribute_to_class(cls, name) setattr(cls, self.name, Creator(self)) return contribute_to_class
apache-2.0
zimhy/shadowsocks_analysis
shadowsocks/common.py
35
6302
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2014 clowwindy # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import absolute_import, division, print_function, \ with_statement import socket import struct import logging def compat_ord(s): if type(s) == int: return s return _ord(s) def compat_chr(d): if bytes == str: return _chr(d) return bytes([d]) _ord = ord _chr = chr ord = compat_ord chr = compat_chr def to_bytes(s): if bytes != str: if type(s) == str: return s.encode('utf-8') return s def to_str(s): if bytes != str: if type(s) == bytes: return s.decode('utf-8') return s def inet_ntop(family, ipstr): if family == socket.AF_INET: return to_bytes(socket.inet_ntoa(ipstr)) elif family == socket.AF_INET6: import re v6addr = ':'.join(('%02X%02X' % (ord(i), ord(j))).lstrip('0') for i, j in zip(ipstr[::2], ipstr[1::2])) v6addr = re.sub('::+', '::', v6addr, count=1) return to_bytes(v6addr) def inet_pton(family, addr): addr = to_str(addr) if family == socket.AF_INET: return socket.inet_aton(addr) elif family == socket.AF_INET6: if '.' in addr: # a v4 addr v4addr = addr[addr.rindex(':') + 1:] v4addr = socket.inet_aton(v4addr) v4addr = map(lambda x: ('%02X' % ord(x)), v4addr) v4addr.insert(2, ':') newaddr = addr[:addr.rindex(':') + 1] + ''.join(v4addr) return inet_pton(family, newaddr) dbyts = [0] * 8 # 8 groups grps = addr.split(':') for i, v in enumerate(grps): if v: dbyts[i] = int(v, 16) else: for j, w in enumerate(grps[::-1]): if w: dbyts[7 - j] = int(w, 16) else: break break return b''.join((chr(i // 256) + chr(i % 256)) for i in dbyts) else: raise RuntimeError("What family?") def patch_socket(): if not hasattr(socket, 'inet_pton'): socket.inet_pton = inet_pton if not hasattr(socket, 'inet_ntop'): socket.inet_ntop = inet_ntop patch_socket() ADDRTYPE_IPV4 = 1 ADDRTYPE_IPV6 = 4 ADDRTYPE_HOST = 3 def pack_addr(address): address_str = to_str(address) for family in (socket.AF_INET, socket.AF_INET6): try: r = socket.inet_pton(family, address_str) if family == socket.AF_INET6: return b'\x04' + r else: return b'\x01' + r except (TypeError, ValueError, OSError, IOError): pass if len(address) > 255: address = address[:255] # TODO return b'\x03' + chr(len(address)) + address def parse_header(data): addrtype = ord(data[0]) dest_addr = None dest_port = None header_length = 0 if addrtype == ADDRTYPE_IPV4: if len(data) >= 7: dest_addr = socket.inet_ntoa(data[1:5]) dest_port = struct.unpack('>H', data[5:7])[0] header_length = 7 else: logging.warn('header is too short') elif addrtype == ADDRTYPE_HOST: if len(data) > 2: addrlen = ord(data[1]) if len(data) >= 2 + addrlen: dest_addr = data[2:2 + addrlen] dest_port = struct.unpack('>H', data[2 + addrlen:4 + addrlen])[0] header_length = 4 + addrlen else: logging.warn('header is too short') else: logging.warn('header is too short') elif addrtype == ADDRTYPE_IPV6: if len(data) >= 19: dest_addr = socket.inet_ntop(socket.AF_INET6, data[1:17]) dest_port = struct.unpack('>H', data[17:19])[0] header_length = 19 else: logging.warn('header is too short') else: logging.warn('unsupported addrtype %d, maybe wrong password' % addrtype) if dest_addr is None: return None return addrtype, to_bytes(dest_addr), dest_port, header_length def test_inet_conv(): ipv4 = b'8.8.4.4' b = inet_pton(socket.AF_INET, ipv4) assert inet_ntop(socket.AF_INET, b) == ipv4 ipv6 = b'2404:6800:4005:805::1011' b = inet_pton(socket.AF_INET6, ipv6) assert inet_ntop(socket.AF_INET6, b) == ipv6 def test_parse_header(): assert parse_header(b'\x03\x0ewww.google.com\x00\x50') == \ (3, b'www.google.com', 80, 18) assert parse_header(b'\x01\x08\x08\x08\x08\x00\x35') == \ (1, b'8.8.8.8', 53, 7) assert parse_header((b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00' b'\x00\x10\x11\x00\x50')) == \ (4, b'2404:6800:4005:805::1011', 80, 19) def test_pack_header(): assert pack_addr(b'8.8.8.8') == b'\x01\x08\x08\x08\x08' assert pack_addr(b'2404:6800:4005:805::1011') == \ b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00\x00\x10\x11' assert pack_addr(b'www.google.com') == b'\x03\x0ewww.google.com' if __name__ == '__main__': test_inet_conv() test_parse_header() test_pack_header()
mit
QuLogic/vispy
examples/demo/gloo/graph.py
17
4902
#!/usr/bin/env python # -*- coding: utf-8 -*- # vispy: gallery 60 """ Dynamic planar graph layout. """ import numpy as np from vispy import gloo, app from vispy.gloo import set_viewport, set_state, clear vert = """ #version 120 // Uniforms // ------------------------------------ uniform mat4 u_model; uniform mat4 u_view; uniform mat4 u_projection; uniform float u_antialias; uniform float u_size; // Attributes // ------------------------------------ attribute vec3 a_position; attribute vec4 a_fg_color; attribute vec4 a_bg_color; attribute float a_linewidth; attribute float a_size; // Varyings // ------------------------------------ varying vec4 v_fg_color; varying vec4 v_bg_color; varying float v_size; varying float v_linewidth; varying float v_antialias; void main (void) { v_size = a_size * u_size; v_linewidth = a_linewidth; v_antialias = u_antialias; v_fg_color = a_fg_color; v_bg_color = a_bg_color; gl_Position = u_projection * u_view * u_model * vec4(a_position*u_size,1.0); gl_PointSize = v_size + 2*(v_linewidth + 1.5*v_antialias); } """ frag = """ #version 120 // Constants // ------------------------------------ // Varyings // ------------------------------------ varying vec4 v_fg_color; varying vec4 v_bg_color; varying float v_size; varying float v_linewidth; varying float v_antialias; // Functions // ------------------------------------ float marker(vec2 P, float size); // Main // ------------------------------------ void main() { float size = v_size +2*(v_linewidth + 1.5*v_antialias); float t = v_linewidth/2.0-v_antialias; // The marker function needs to be linked with this shader float r = marker(gl_PointCoord, size); float d = abs(r) - t; if( r > (v_linewidth/2.0+v_antialias)) { discard; } else if( d < 0.0 ) { gl_FragColor = v_fg_color; } else { float alpha = d/v_antialias; alpha = exp(-alpha*alpha); if (r > 0) gl_FragColor = vec4(v_fg_color.rgb, alpha*v_fg_color.a); else gl_FragColor = mix(v_bg_color, v_fg_color, alpha); } } float marker(vec2 P, float size) { float r = length((P.xy - vec2(0.5,0.5))*size); r -= v_size/2; return r; } """ vs = """ attribute vec3 a_position; attribute vec4 a_fg_color; attribute vec4 a_bg_color; attribute float a_size; attribute float a_linewidth; void main(){ gl_Position = vec4(a_position, 1.); } """ fs = """ void main(){ gl_FragColor = vec4(0., 0., 0., 1.); } """ class Canvas(app.Canvas): def __init__(self, **kwargs): # Initialize the canvas for real app.Canvas.__init__(self, keys='interactive', size=(512, 512), **kwargs) ps = self.pixel_scale self.position = 50, 50 n = 100 ne = 100 data = np.zeros(n, dtype=[('a_position', np.float32, 3), ('a_fg_color', np.float32, 4), ('a_bg_color', np.float32, 4), ('a_size', np.float32, 1), ('a_linewidth', np.float32, 1), ]) edges = np.random.randint(size=(ne, 2), low=0, high=n).astype(np.uint32) data['a_position'] = np.hstack((.25 * np.random.randn(n, 2), np.zeros((n, 1)))) data['a_fg_color'] = 0, 0, 0, 1 color = np.random.uniform(0.5, 1., (n, 3)) data['a_bg_color'] = np.hstack((color, np.ones((n, 1)))) data['a_size'] = np.random.randint(size=n, low=8*ps, high=20*ps) data['a_linewidth'] = 1.*ps u_antialias = 1 self.vbo = gloo.VertexBuffer(data) self.index = gloo.IndexBuffer(edges) self.view = np.eye(4, dtype=np.float32) self.model = np.eye(4, dtype=np.float32) self.projection = np.eye(4, dtype=np.float32) self.program = gloo.Program(vert, frag) self.program.bind(self.vbo) self.program['u_size'] = 1 self.program['u_antialias'] = u_antialias self.program['u_model'] = self.model self.program['u_view'] = self.view self.program['u_projection'] = self.projection set_viewport(0, 0, *self.physical_size) self.program_e = gloo.Program(vs, fs) self.program_e.bind(self.vbo) set_state(clear_color='white', depth_test=False, blend=True, blend_func=('src_alpha', 'one_minus_src_alpha')) self.show() def on_resize(self, event): set_viewport(0, 0, *event.physical_size) def on_draw(self, event): clear(color=True, depth=True) self.program_e.draw('lines', self.index) self.program.draw('points') if __name__ == '__main__': c = Canvas(title="Graph") app.run()
bsd-3-clause
florian-dacosta/OpenUpgrade
addons/resource/tests/test_resource.py
243
32181
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from datetime import datetime, timedelta from dateutil.relativedelta import relativedelta from openerp.addons.resource.tests.common import TestResourceCommon class TestResource(TestResourceCommon): def test_00_intervals(self): intervals = [ ( datetime.strptime('2013-02-04 09:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2013-02-04 11:00:00', '%Y-%m-%d %H:%M:%S') ), ( datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2013-02-04 12:00:00', '%Y-%m-%d %H:%M:%S') ), ( datetime.strptime('2013-02-04 11:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S') ), ( datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S') ), ( datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S') ), ( datetime.strptime('2013-02-04 18:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2013-02-04 19:00:00', '%Y-%m-%d %H:%M:%S') ) ] # Test: interval cleaning cleaned_intervals = self.resource_calendar.interval_clean(intervals) self.assertEqual(len(cleaned_intervals), 3, 'resource_calendar: wrong interval cleaning') # First interval: 03, unchanged self.assertEqual(cleaned_intervals[0][0], datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning') self.assertEqual(cleaned_intervals[0][1], datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning') # Second intreval: 04, 08-14, combining 08-12 and 11-14, 09-11 being inside 08-12 self.assertEqual(cleaned_intervals[1][0], datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning') self.assertEqual(cleaned_intervals[1][1], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning') # Third interval: 04, 17-21, 18-19 being inside 17-21 self.assertEqual(cleaned_intervals[2][0], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning') self.assertEqual(cleaned_intervals[2][1], datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning') # Test: disjoint removal working_interval = (datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2013-02-04 18:00:00', '%Y-%m-%d %H:%M:%S')) result = self.resource_calendar.interval_remove_leaves(working_interval, intervals) self.assertEqual(len(result), 1, 'resource_calendar: wrong leave removal from interval') # First interval: 04, 14-17 self.assertEqual(result[0][0], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval') self.assertEqual(result[0][1], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval') # Test: schedule hours on intervals result = self.resource_calendar.interval_schedule_hours(cleaned_intervals, 5.5) self.assertEqual(len(result), 2, 'resource_calendar: wrong hours scheduling in interval') # First interval: 03, 8-10 untouches self.assertEqual(result[0][0], datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval') self.assertEqual(result[0][1], datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval') # First interval: 04, 08-11:30 self.assertEqual(result[1][0], datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval') self.assertEqual(result[1][1], datetime.strptime('2013-02-04 11:30:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval') # Test: schedule hours on intervals, backwards cleaned_intervals.reverse() result = self.resource_calendar.interval_schedule_hours(cleaned_intervals, 5.5, remove_at_end=False) self.assertEqual(len(result), 2, 'resource_calendar: wrong hours scheduling in interval') # First interval: 03, 8-10 untouches self.assertEqual(result[0][0], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval') self.assertEqual(result[0][1], datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval') # First interval: 04, 08-11:30 self.assertEqual(result[1][0], datetime.strptime('2013-02-04 12:30:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval') self.assertEqual(result[1][1], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval') def test_10_calendar_basics(self): """ Testing basic method of resource.calendar """ cr, uid = self.cr, self.uid # -------------------------------------------------- # Test1: get_next_day # -------------------------------------------------- # Test: next day: next day after day1 is day4 date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date1.date()) self.assertEqual(date, self.date2.date(), 'resource_calendar: wrong next day computing') # Test: next day: next day after day4 is (day1+7) date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date2.date()) self.assertEqual(date, self.date1.date() + relativedelta(days=7), 'resource_calendar: wrong next day computing') # Test: next day: next day after day4+1 is (day1+7) date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date2.date() + relativedelta(days=1)) self.assertEqual(date, self.date1.date() + relativedelta(days=7), 'resource_calendar: wrong next day computing') # Test: next day: next day after day1-1 is day1 date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date1.date() + relativedelta(days=-1)) self.assertEqual(date, self.date1.date(), 'resource_calendar: wrong next day computing') # -------------------------------------------------- # Test2: get_previous_day # -------------------------------------------------- # Test: previous day: previous day before day1 is (day4-7) date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date1.date()) self.assertEqual(date, self.date2.date() + relativedelta(days=-7), 'resource_calendar: wrong previous day computing') # Test: previous day: previous day before day4 is day1 date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date2.date()) self.assertEqual(date, self.date1.date(), 'resource_calendar: wrong previous day computing') # Test: previous day: previous day before day4+1 is day4 date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date2.date() + relativedelta(days=1)) self.assertEqual(date, self.date2.date(), 'resource_calendar: wrong previous day computing') # Test: previous day: previous day before day1-1 is (day4-7) date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date1.date() + relativedelta(days=-1)) self.assertEqual(date, self.date2.date() + relativedelta(days=-7), 'resource_calendar: wrong previous day computing') # -------------------------------------------------- # Test3: misc # -------------------------------------------------- weekdays = self.resource_calendar.get_weekdays(cr, uid, self.calendar_id) self.assertEqual(weekdays, [1, 4], 'resource_calendar: wrong weekdays computing') attendances = self.resource_calendar.get_attendances_for_weekdays(cr, uid, self.calendar_id, [2, 3, 4, 5]) self.assertEqual(set([att.id for att in attendances]), set([self.att2_id, self.att3_id]), 'resource_calendar: wrong attendances filtering by weekdays computing') def test_20_calendar_working_intervals(self): """ Testing working intervals computing method of resource.calendar """ cr, uid = self.cr, self.uid _format = '%Y-%m-%d %H:%M:%S' # Test: day0 without leaves: 1 interval intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1) self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working intervals') self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 09:08:07', _format), 'resource_calendar: wrong working intervals') self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working intervals') # Test: day3 without leaves: 2 interval intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date2) self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working intervals') self.assertEqual(intervals[0][0], datetime.strptime('2013-02-15 10:11:12', _format), 'resource_calendar: wrong working intervals') self.assertEqual(intervals[0][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong working intervals') self.assertEqual(intervals[1][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong working intervals') self.assertEqual(intervals[1][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong working intervals') # Test: day0 with leaves outside range: 1 interval intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=0), compute_leaves=True) self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working intervals') self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 08:00:00', _format), 'resource_calendar: wrong working intervals') self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working intervals') # Test: day0 with leaves: 2 intervals because of leave between 9 ans 12, ending at 15:45:30 intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=8) + relativedelta(days=7), end_dt=self.date1.replace(hour=15, minute=45, second=30) + relativedelta(days=7), compute_leaves=True) self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working intervals') self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:08:07', _format), 'resource_calendar: wrong working intervals') self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong working intervals') self.assertEqual(intervals[1][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong working intervals') self.assertEqual(intervals[1][1], datetime.strptime('2013-02-19 15:45:30', _format), 'resource_calendar: wrong working intervals') def test_30_calendar_working_days(self): """ Testing calendar hours computation on a working day """ cr, uid = self.cr, self.uid _format = '%Y-%m-%d %H:%M:%S' # Test: day1, beginning at 10:30 -> work from 10:30 (arrival) until 16:00 intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=10, minute=30, second=0)) self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval / day computing') self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 10:30:00', _format), 'resource_calendar: wrong working interval / day computing') self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working interval / day computing') # Test: hour computation for same interval, should give 5.5 wh = self.resource_calendar.get_working_hours_of_date(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=10, minute=30, second=0)) self.assertEqual(wh, 5.5, 'resource_calendar: wrong working interval / day time computing') # Test: day1+7 on leave, without leave computation intervals = self.resource_calendar.get_working_intervals_of_day( cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=7) ) # Result: day1 (08->16) self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval/day computing') self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong working interval / day computing') self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong working interval / day computing') # Test: day1+7 on leave, with generic leave computation intervals = self.resource_calendar.get_working_intervals_of_day( cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=7), compute_leaves=True ) # Result: day1 (08->09 + 12->16) self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working interval/day computing') self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong working interval / day computing') self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong working interval / day computing') self.assertEqual(intervals[1][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong working interval / day computing') self.assertEqual(intervals[1][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong working interval / day computing') # Test: day1+14 on leave, with generic leave computation intervals = self.resource_calendar.get_working_intervals_of_day( cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=14), compute_leaves=True ) # Result: day1 (08->16) self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval/day computing') self.assertEqual(intervals[0][0], datetime.strptime('2013-02-26 08:00:00', _format), 'resource_calendar: wrong working interval / day computing') self.assertEqual(intervals[0][1], datetime.strptime('2013-02-26 16:00:00', _format), 'resource_calendar: wrong working interval / day computing') # Test: day1+14 on leave, with resource leave computation intervals = self.resource_calendar.get_working_intervals_of_day( cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=14), compute_leaves=True, resource_id=self.resource1_id ) # Result: nothing, because on leave self.assertEqual(len(intervals), 0, 'resource_calendar: wrong working interval/day computing') def test_40_calendar_hours_scheduling(self): """ Testing calendar hours scheduling """ cr, uid = self.cr, self.uid _format = '%Y-%m-%d %H:%M:%S' # -------------------------------------------------- # Test0: schedule hours backwards (old interval_min_get) # Done without calendar # -------------------------------------------------- # Done without calendar # res = self.resource_calendar.interval_min_get(cr, uid, None, self.date1, 40, resource=False) # res: (datetime.datetime(2013, 2, 7, 9, 8, 7), datetime.datetime(2013, 2, 12, 9, 8, 7)) # -------------------------------------------------- # Test1: schedule hours backwards (old interval_min_get) # -------------------------------------------------- # res = self.resource_calendar.interval_min_get(cr, uid, self.calendar_id, self.date1, 40, resource=False) # (datetime.datetime(2013, 1, 29, 9, 0), datetime.datetime(2013, 1, 29, 16, 0)) # (datetime.datetime(2013, 2, 1, 8, 0), datetime.datetime(2013, 2, 1, 13, 0)) # (datetime.datetime(2013, 2, 1, 16, 0), datetime.datetime(2013, 2, 1, 23, 0)) # (datetime.datetime(2013, 2, 5, 8, 0), datetime.datetime(2013, 2, 5, 16, 0)) # (datetime.datetime(2013, 2, 8, 8, 0), datetime.datetime(2013, 2, 8, 13, 0)) # (datetime.datetime(2013, 2, 8, 16, 0), datetime.datetime(2013, 2, 8, 23, 0)) # (datetime.datetime(2013, 2, 12, 8, 0), datetime.datetime(2013, 2, 12, 9, 0)) res = self.resource_calendar.schedule_hours(cr, uid, self.calendar_id, -40, day_dt=self.date1.replace(minute=0, second=0)) # current day, limited at 09:00 because of day_dt specified -> 1 hour self.assertEqual(res[-1][0], datetime.strptime('2013-02-12 08:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[-1][1], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling') # previous days: 5+7 hours / 8 hours / 5+7 hours -> 32 hours self.assertEqual(res[-2][0], datetime.strptime('2013-02-08 16:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[-2][1], datetime.strptime('2013-02-08 23:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[-3][0], datetime.strptime('2013-02-08 08:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[-3][1], datetime.strptime('2013-02-08 13:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[-4][0], datetime.strptime('2013-02-05 08:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[-4][1], datetime.strptime('2013-02-05 16:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[-5][0], datetime.strptime('2013-02-01 16:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[-5][1], datetime.strptime('2013-02-01 23:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[-6][0], datetime.strptime('2013-02-01 08:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[-6][1], datetime.strptime('2013-02-01 13:00:00', _format), 'resource_calendar: wrong hours scheduling') # 7 hours remaining self.assertEqual(res[-7][0], datetime.strptime('2013-01-29 09:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[-7][1], datetime.strptime('2013-01-29 16:00:00', _format), 'resource_calendar: wrong hours scheduling') # Compute scheduled hours td = timedelta() for item in res: td += item[1] - item[0] self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling') # -------------------------------------------------- # Test2: schedule hours forward (old interval_get) # -------------------------------------------------- # res = self.resource_calendar.interval_get(cr, uid, self.calendar_id, self.date1, 40, resource=False, byday=True) # (datetime.datetime(2013, 2, 12, 9, 0), datetime.datetime(2013, 2, 12, 16, 0)) # (datetime.datetime(2013, 2, 15, 8, 0), datetime.datetime(2013, 2, 15, 13, 0)) # (datetime.datetime(2013, 2, 15, 16, 0), datetime.datetime(2013, 2, 15, 23, 0)) # (datetime.datetime(2013, 2, 22, 8, 0), datetime.datetime(2013, 2, 22, 13, 0)) # (datetime.datetime(2013, 2, 22, 16, 0), datetime.datetime(2013, 2, 22, 23, 0)) # (datetime.datetime(2013, 2, 26, 8, 0), datetime.datetime(2013, 2, 26, 16, 0)) # (datetime.datetime(2013, 3, 1, 8, 0), datetime.datetime(2013, 3, 1, 9, 0)) res = self.resource_calendar.schedule_hours( cr, uid, self.calendar_id, 40, day_dt=self.date1.replace(minute=0, second=0) ) self.assertEqual(res[0][0], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[1][0], datetime.strptime('2013-02-15 08:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[1][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[2][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[2][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[3][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[3][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[4][0], datetime.strptime('2013-02-22 08:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[4][1], datetime.strptime('2013-02-22 13:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[5][0], datetime.strptime('2013-02-22 16:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[5][1], datetime.strptime('2013-02-22 23:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[6][0], datetime.strptime('2013-02-26 08:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[6][1], datetime.strptime('2013-02-26 09:00:00', _format), 'resource_calendar: wrong hours scheduling') td = timedelta() for item in res: td += item[1] - item[0] self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling') # res = self.resource_calendar.interval_get(cr, uid, self.calendar_id, self.date1, 40, resource=self.resource1_id, byday=True) # (datetime.datetime(2013, 2, 12, 9, 0), datetime.datetime(2013, 2, 12, 16, 0)) # (datetime.datetime(2013, 2, 15, 8, 0), datetime.datetime(2013, 2, 15, 13, 0)) # (datetime.datetime(2013, 2, 15, 16, 0), datetime.datetime(2013, 2, 15, 23, 0)) # (datetime.datetime(2013, 3, 1, 8, 0), datetime.datetime(2013, 3, 1, 13, 0)) # (datetime.datetime(2013, 3, 1, 16, 0), datetime.datetime(2013, 3, 1, 23, 0)) # (datetime.datetime(2013, 3, 5, 8, 0), datetime.datetime(2013, 3, 5, 16, 0)) # (datetime.datetime(2013, 3, 8, 8, 0), datetime.datetime(2013, 3, 8, 9, 0)) res = self.resource_calendar.schedule_hours( cr, uid, self.calendar_id, 40, day_dt=self.date1.replace(minute=0, second=0), compute_leaves=True, resource_id=self.resource1_id ) self.assertEqual(res[0][0], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[1][0], datetime.strptime('2013-02-15 08:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[1][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[2][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[2][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[3][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[3][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[4][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[4][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[5][0], datetime.strptime('2013-02-22 08:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[5][1], datetime.strptime('2013-02-22 09:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[6][0], datetime.strptime('2013-02-22 16:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[6][1], datetime.strptime('2013-02-22 23:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[7][0], datetime.strptime('2013-03-01 11:30:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[7][1], datetime.strptime('2013-03-01 13:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[8][0], datetime.strptime('2013-03-01 16:00:00', _format), 'resource_calendar: wrong hours scheduling') self.assertEqual(res[8][1], datetime.strptime('2013-03-01 22:30:00', _format), 'resource_calendar: wrong hours scheduling') td = timedelta() for item in res: td += item[1] - item[0] self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling') # -------------------------------------------------- # Test3: working hours (old _interval_hours_get) # -------------------------------------------------- # old API: resource without leaves # res: 2 weeks -> 40 hours res = self.resource_calendar._interval_hours_get( cr, uid, self.calendar_id, self.date1.replace(hour=6, minute=0), self.date2.replace(hour=23, minute=0) + relativedelta(days=7), resource_id=self.resource1_id, exclude_leaves=True) self.assertEqual(res, 40.0, 'resource_calendar: wrong _interval_hours_get compatibility computation') # new API: resource without leaves # res: 2 weeks -> 40 hours res = self.resource_calendar.get_working_hours( cr, uid, self.calendar_id, self.date1.replace(hour=6, minute=0), self.date2.replace(hour=23, minute=0) + relativedelta(days=7), compute_leaves=False, resource_id=self.resource1_id) self.assertEqual(res, 40.0, 'resource_calendar: wrong get_working_hours computation') # old API: resource and leaves # res: 2 weeks -> 40 hours - (3+4) leave hours res = self.resource_calendar._interval_hours_get( cr, uid, self.calendar_id, self.date1.replace(hour=6, minute=0), self.date2.replace(hour=23, minute=0) + relativedelta(days=7), resource_id=self.resource1_id, exclude_leaves=False) self.assertEqual(res, 33.0, 'resource_calendar: wrong _interval_hours_get compatibility computation') # new API: resource and leaves # res: 2 weeks -> 40 hours - (3+4) leave hours res = self.resource_calendar.get_working_hours( cr, uid, self.calendar_id, self.date1.replace(hour=6, minute=0), self.date2.replace(hour=23, minute=0) + relativedelta(days=7), compute_leaves=True, resource_id=self.resource1_id) self.assertEqual(res, 33.0, 'resource_calendar: wrong get_working_hours computation') # -------------------------------------------------- # Test4: misc # -------------------------------------------------- # Test without calendar and default_interval res = self.resource_calendar.get_working_hours( cr, uid, None, self.date1.replace(hour=6, minute=0), self.date2.replace(hour=23, minute=0), compute_leaves=True, resource_id=self.resource1_id, default_interval=(8, 16)) self.assertEqual(res, 32.0, 'resource_calendar: wrong get_working_hours computation') def test_50_calendar_schedule_days(self): """ Testing calendar days scheduling """ cr, uid = self.cr, self.uid _format = '%Y-%m-%d %H:%M:%S' # -------------------------------------------------- # Test1: with calendar # -------------------------------------------------- res = self.resource_calendar.schedule_days_get_date(cr, uid, self.calendar_id, 5, day_date=self.date1) self.assertEqual(res.date(), datetime.strptime('2013-02-26 00:0:00', _format).date(), 'resource_calendar: wrong days scheduling') res = self.resource_calendar.schedule_days_get_date( cr, uid, self.calendar_id, 5, day_date=self.date1, compute_leaves=True, resource_id=self.resource1_id) self.assertEqual(res.date(), datetime.strptime('2013-03-01 00:0:00', _format).date(), 'resource_calendar: wrong days scheduling') # -------------------------------------------------- # Test2: misc # -------------------------------------------------- # Without calendar, should only count days -> 12 -> 16, 5 days with default intervals res = self.resource_calendar.schedule_days_get_date(cr, uid, None, 5, day_date=self.date1, default_interval=(8, 16)) self.assertEqual(res, datetime.strptime('2013-02-16 16:00:00', _format), 'resource_calendar: wrong days scheduling') def seconds(td): assert isinstance(td, timedelta) return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10.**6
agpl-3.0
rroldan/contactlearn
boilerplate/external/wtforms/ext/i18n/utils.py
119
1477
import os def messages_path(): """ Determine the path to the 'messages' directory as best possible. """ module_path = os.path.abspath(__file__) return os.path.join(os.path.dirname(module_path), 'messages') def get_builtin_gnu_translations(languages=None): """ Get a gettext.GNUTranslations object pointing at the included translation files. :param languages: A list of languages to try, in order. If omitted or None, then gettext will try to use locale information from the environment. """ import gettext return gettext.translation('wtforms', messages_path(), languages) def get_translations(languages=None): """ Get a WTForms translation object which wraps the builtin GNUTranslations object. """ translations = get_builtin_gnu_translations(languages) if hasattr(translations, 'ugettext'): return DefaultTranslations(translations) else: # Python 3 has no ugettext/ungettext, so just return the translations object. return translations class DefaultTranslations(object): """ A WTForms translations object to wrap translations objects which use ugettext/ungettext. """ def __init__(self, translations): self.translations = translations def gettext(self, string): return self.translations.ugettext(string) def ngettext(self, singular, plural, n): return self.translations.ungettext(singular, plural, n)
lgpl-3.0
pczerkas/tempest
tempest/api/baremetal/admin/test_api_discovery.py
36
1692
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.baremetal.admin import base from tempest import test class TestApiDiscovery(base.BaseBaremetalTest): """Tests for API discovery features.""" @test.idempotent_id('a3c27e94-f56c-42c4-8600-d6790650b9c5') def test_api_versions(self): _, descr = self.client.get_api_description() expected_versions = ('v1',) versions = [version['id'] for version in descr['versions']] for v in expected_versions: self.assertIn(v, versions) @test.idempotent_id('896283a6-488e-4f31-af78-6614286cbe0d') def test_default_version(self): _, descr = self.client.get_api_description() default_version = descr['default_version'] self.assertEqual(default_version['id'], 'v1') @test.idempotent_id('abc0b34d-e684-4546-9728-ab7a9ad9f174') def test_version_1_resources(self): _, descr = self.client.get_version_description(version='v1') expected_resources = ('nodes', 'chassis', 'ports', 'links', 'media_types') for res in expected_resources: self.assertIn(res, descr)
apache-2.0
LuckycoinFoundation/LuckycoinCore
share/qt/extract_strings_qt.py
145
1900
#!/usr/bin/python ''' Extract _("...") strings for translation and convert to Qt4 stringdefs so that they can be picked up by Qt linguist. ''' from subprocess import Popen, PIPE import glob import operator import os OUT_CPP="src/qt/bitcoinstrings.cpp" EMPTY=['""'] def parse_po(text): """ Parse 'po' format produced by xgettext. Return a list of (msgid,msgstr) tuples. """ messages = [] msgid = [] msgstr = [] in_msgid = False in_msgstr = False for line in text.split('\n'): line = line.rstrip('\r') if line.startswith('msgid '): if in_msgstr: messages.append((msgid, msgstr)) in_msgstr = False # message start in_msgid = True msgid = [line[6:]] elif line.startswith('msgstr '): in_msgid = False in_msgstr = True msgstr = [line[7:]] elif line.startswith('"'): if in_msgid: msgid.append(line) if in_msgstr: msgstr.append(line) if in_msgstr: messages.append((msgid, msgstr)) return messages files = glob.glob('src/*.cpp') + glob.glob('src/*.h') # xgettext -n --keyword=_ $FILES XGETTEXT=os.getenv('XGETTEXT', 'xgettext') child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE) (out, err) = child.communicate() messages = parse_po(out) f = open(OUT_CPP, 'w') f.write(""" #include <QtGlobal> // Automatically generated by extract_strings.py #ifdef __GNUC__ #define UNUSED __attribute__((unused)) #else #define UNUSED #endif """) f.write('static const char UNUSED *bitcoin_strings[] = {\n') messages.sort(key=operator.itemgetter(0)) for (msgid, msgstr) in messages: if msgid != EMPTY: f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid))) f.write('};\n') f.close()
mit
miniconfig/home-assistant
tests/components/sensor/test_rfxtrx.py
25
13917
"""The tests for the Rfxtrx sensor platform.""" import unittest import pytest from homeassistant.setup import setup_component from homeassistant.components import rfxtrx as rfxtrx_core from homeassistant.const import TEMP_CELSIUS from tests.common import get_test_home_assistant, mock_component @pytest.mark.skipif("os.environ.get('RFXTRX') != 'RUN'") class TestSensorRfxtrx(unittest.TestCase): """Test the Rfxtrx sensor platform.""" def setUp(self): """Setup things to be run when tests are started.""" self.hass = get_test_home_assistant() mock_component(self.hass, 'rfxtrx') def tearDown(self): """Stop everything that was started.""" rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS = [] rfxtrx_core.RFX_DEVICES = {} if rfxtrx_core.RFXOBJECT: rfxtrx_core.RFXOBJECT.close_connection() self.hass.stop() def test_default_config(self): """Test with 0 sensor.""" self.assertTrue(setup_component(self.hass, 'sensor', { 'sensor': {'platform': 'rfxtrx', 'devices': {}}})) self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES)) def test_old_config_sensor(self): """Test with 1 sensor.""" self.assertTrue(setup_component(self.hass, 'sensor', { 'sensor': {'platform': 'rfxtrx', 'devices': {'sensor_0502': { 'name': 'Test', 'packetid': '0a52080705020095220269', 'data_type': 'Temperature'}}}})) self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES)) entity = rfxtrx_core.RFX_DEVICES['sensor_0502']['Temperature'] self.assertEqual('Test', entity.name) self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement) self.assertEqual(None, entity.state) def test_one_sensor(self): """Test with 1 sensor.""" self.assertTrue(setup_component(self.hass, 'sensor', { 'sensor': {'platform': 'rfxtrx', 'devices': {'0a52080705020095220269': { 'name': 'Test', 'data_type': 'Temperature'}}}})) self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES)) entity = rfxtrx_core.RFX_DEVICES['sensor_0502']['Temperature'] self.assertEqual('Test', entity.name) self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement) self.assertEqual(None, entity.state) def test_one_sensor_no_datatype(self): """Test with 1 sensor.""" self.assertTrue(setup_component(self.hass, 'sensor', { 'sensor': {'platform': 'rfxtrx', 'devices': {'0a52080705020095220269': { 'name': 'Test'}}}})) self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES)) entity = rfxtrx_core.RFX_DEVICES['sensor_0502']['Temperature'] self.assertEqual('Test', entity.name) self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement) self.assertEqual(None, entity.state) entity_id = rfxtrx_core.RFX_DEVICES['sensor_0502']['Temperature']\ .entity_id entity = self.hass.states.get(entity_id) self.assertEqual('Test', entity.name) self.assertEqual('unknown', entity.state) def test_several_sensors(self): """Test with 3 sensors.""" self.assertTrue(setup_component(self.hass, 'sensor', { 'sensor': {'platform': 'rfxtrx', 'devices': {'0a52080705020095220269': { 'name': 'Test', 'data_type': 'Temperature'}, '0a520802060100ff0e0269': { 'name': 'Bath', 'data_type': ['Temperature', 'Humidity'] }}}})) self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES)) device_num = 0 for id in rfxtrx_core.RFX_DEVICES: if id == 'sensor_0601': device_num = device_num + 1 self.assertEqual(len(rfxtrx_core.RFX_DEVICES[id]), 2) _entity_temp = rfxtrx_core.RFX_DEVICES[id]['Temperature'] _entity_hum = rfxtrx_core.RFX_DEVICES[id]['Humidity'] self.assertEqual('%', _entity_hum.unit_of_measurement) self.assertEqual('Bath', _entity_hum.__str__()) self.assertEqual(None, _entity_hum.state) self.assertEqual(TEMP_CELSIUS, _entity_temp.unit_of_measurement) self.assertEqual('Bath', _entity_temp.__str__()) elif id == 'sensor_0502': device_num = device_num + 1 entity = rfxtrx_core.RFX_DEVICES[id]['Temperature'] self.assertEqual(None, entity.state) self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement) self.assertEqual('Test', entity.__str__()) self.assertEqual(2, device_num) def test_discover_sensor(self): """Test with discovery of sensor.""" self.assertTrue(setup_component(self.hass, 'sensor', { 'sensor': {'platform': 'rfxtrx', 'automatic_add': True, 'devices': {}}})) event = rfxtrx_core.get_rfx_object('0a520801070100b81b0279') event.data = bytearray(b'\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y') rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event) entity = rfxtrx_core.RFX_DEVICES['sensor_0701']['Temperature'] self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES)) self.assertEqual({'Humidity status': 'normal', 'Temperature': 18.4, 'Rssi numeric': 7, 'Humidity': 27, 'Battery numeric': 9, 'Humidity status numeric': 2}, entity.device_state_attributes) self.assertEqual('0a520801070100b81b0279', entity.__str__()) rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event) self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES)) event = rfxtrx_core.get_rfx_object('0a52080405020095240279') event.data = bytearray(b'\nR\x08\x04\x05\x02\x00\x95$\x02y') rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event) entity = rfxtrx_core.RFX_DEVICES['sensor_0502']['Temperature'] self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES)) self.assertEqual({'Humidity status': 'normal', 'Temperature': 14.9, 'Rssi numeric': 7, 'Humidity': 36, 'Battery numeric': 9, 'Humidity status numeric': 2}, entity.device_state_attributes) self.assertEqual('0a52080405020095240279', entity.__str__()) event = rfxtrx_core.get_rfx_object('0a52085e070100b31b0279') event.data = bytearray(b'\nR\x08^\x07\x01\x00\xb3\x1b\x02y') rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event) entity = rfxtrx_core.RFX_DEVICES['sensor_0701']['Temperature'] self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES)) self.assertEqual({'Humidity status': 'normal', 'Temperature': 17.9, 'Rssi numeric': 7, 'Humidity': 27, 'Battery numeric': 9, 'Humidity status numeric': 2}, entity.device_state_attributes) self.assertEqual('0a520801070100b81b0279', entity.__str__()) # trying to add a switch event = rfxtrx_core.get_rfx_object('0b1100cd0213c7f210010f70') rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event) self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES)) def test_discover_sensor_noautoadd(self): """Test with discover of sensor when auto add is False.""" self.assertTrue(setup_component(self.hass, 'sensor', { 'sensor': {'platform': 'rfxtrx', 'automatic_add': False, 'devices': {}}})) event = rfxtrx_core.get_rfx_object('0a520801070100b81b0279') event.data = bytearray(b'\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y') self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES)) rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event) self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES)) rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event) self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES)) event = rfxtrx_core.get_rfx_object('0a52080405020095240279') event.data = bytearray(b'\nR\x08\x04\x05\x02\x00\x95$\x02y') rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event) self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES)) event = rfxtrx_core.get_rfx_object('0a52085e070100b31b0279') event.data = bytearray(b'\nR\x08^\x07\x01\x00\xb3\x1b\x02y') rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event) self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES)) def test_update_of_sensors(self): """Test with 3 sensors.""" self.assertTrue(setup_component(self.hass, 'sensor', { 'sensor': {'platform': 'rfxtrx', 'devices': {'0a52080705020095220269': { 'name': 'Test', 'data_type': 'Temperature'}, '0a520802060100ff0e0269': { 'name': 'Bath', 'data_type': ['Temperature', 'Humidity'] }}}})) self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES)) device_num = 0 for id in rfxtrx_core.RFX_DEVICES: if id == 'sensor_0601': device_num = device_num + 1 self.assertEqual(len(rfxtrx_core.RFX_DEVICES[id]), 2) _entity_temp = rfxtrx_core.RFX_DEVICES[id]['Temperature'] _entity_hum = rfxtrx_core.RFX_DEVICES[id]['Humidity'] self.assertEqual('%', _entity_hum.unit_of_measurement) self.assertEqual('Bath', _entity_hum.__str__()) self.assertEqual(None, _entity_temp.state) self.assertEqual(TEMP_CELSIUS, _entity_temp.unit_of_measurement) self.assertEqual('Bath', _entity_temp.__str__()) elif id == 'sensor_0502': device_num = device_num + 1 entity = rfxtrx_core.RFX_DEVICES[id]['Temperature'] self.assertEqual(None, entity.state) self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement) self.assertEqual('Test', entity.__str__()) self.assertEqual(2, device_num) event = rfxtrx_core.get_rfx_object('0a520802060101ff0f0269') event.data = bytearray(b'\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y') rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event) rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event) event = rfxtrx_core.get_rfx_object('0a52080705020085220269') event.data = bytearray(b'\nR\x08\x04\x05\x02\x00\x95$\x02y') rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event) self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES)) device_num = 0 for id in rfxtrx_core.RFX_DEVICES: if id == 'sensor_0601': device_num = device_num + 1 self.assertEqual(len(rfxtrx_core.RFX_DEVICES[id]), 2) _entity_temp = rfxtrx_core.RFX_DEVICES[id]['Temperature'] _entity_hum = rfxtrx_core.RFX_DEVICES[id]['Humidity'] self.assertEqual('%', _entity_hum.unit_of_measurement) self.assertEqual(15, _entity_hum.state) self.assertEqual({'Battery numeric': 9, 'Temperature': 51.1, 'Humidity': 15, 'Humidity status': 'normal', 'Humidity status numeric': 2, 'Rssi numeric': 6}, _entity_hum.device_state_attributes) self.assertEqual('Bath', _entity_hum.__str__()) self.assertEqual(TEMP_CELSIUS, _entity_temp.unit_of_measurement) self.assertEqual(51.1, _entity_temp.state) self.assertEqual({'Battery numeric': 9, 'Temperature': 51.1, 'Humidity': 15, 'Humidity status': 'normal', 'Humidity status numeric': 2, 'Rssi numeric': 6}, _entity_temp.device_state_attributes) self.assertEqual('Bath', _entity_temp.__str__()) elif id == 'sensor_0502': device_num = device_num + 1 entity = rfxtrx_core.RFX_DEVICES[id]['Temperature'] self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement) self.assertEqual(13.3, entity.state) self.assertEqual({'Humidity status': 'normal', 'Temperature': 13.3, 'Rssi numeric': 6, 'Humidity': 34, 'Battery numeric': 9, 'Humidity status numeric': 2}, entity.device_state_attributes) self.assertEqual('Test', entity.__str__()) self.assertEqual(2, device_num) self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
mit
TeamWin/android_kernel_oneplus_msm8974
scripts/gcc-wrapper.py
1276
3382
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of The Linux Foundation nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Invoke gcc, looking for warnings, and causing a failure if there are # non-whitelisted warnings. import errno import re import os import sys import subprocess # Note that gcc uses unicode, which may depend on the locale. TODO: # force LANG to be set to en_US.UTF-8 to get consistent warnings. allowed_warnings = set([ "return_address.c:62", ]) # Capture the name of the object file, can find it. ofile = None warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''') def interpret_warning(line): """Decode the message from gcc. The messages we care about have a filename, and a warning""" line = line.rstrip('\n') m = warning_re.match(line) if m and m.group(2) not in allowed_warnings: print "error, forbidden warning:", m.group(2) # If there is a warning, remove any object if it exists. if ofile: try: os.remove(ofile) except OSError: pass sys.exit(1) def run_gcc(): args = sys.argv[1:] # Look for -o try: i = args.index('-o') global ofile ofile = args[i+1] except (ValueError, IndexError): pass compiler = sys.argv[0] try: proc = subprocess.Popen(args, stderr=subprocess.PIPE) for line in proc.stderr: print line, interpret_warning(line) result = proc.wait() except OSError as e: result = e.errno if result == errno.ENOENT: print args[0] + ':',e.strerror print 'Is your PATH set correctly?' else: print ' '.join(args), str(e) return result if __name__ == '__main__': status = run_gcc() sys.exit(status)
gpl-2.0
theo-l/django
tests/signing/tests.py
6
6968
import datetime from django.core import signing from django.test import SimpleTestCase from django.test.utils import freeze_time from django.utils.crypto import InvalidAlgorithm class TestSigner(SimpleTestCase): def test_signature(self): "signature() method should generate a signature" signer = signing.Signer('predictable-secret') signer2 = signing.Signer('predictable-secret2') for s in ( b'hello', b'3098247:529:087:', '\u2019'.encode(), ): self.assertEqual( signer.signature(s), signing.base64_hmac( signer.salt + 'signer', s, 'predictable-secret', algorithm=signer.algorithm, ) ) self.assertNotEqual(signer.signature(s), signer2.signature(s)) def test_signature_with_salt(self): "signature(value, salt=...) should work" signer = signing.Signer('predictable-secret', salt='extra-salt') self.assertEqual( signer.signature('hello'), signing.base64_hmac( 'extra-salt' + 'signer', 'hello', 'predictable-secret', algorithm=signer.algorithm, ) ) self.assertNotEqual( signing.Signer('predictable-secret', salt='one').signature('hello'), signing.Signer('predictable-secret', salt='two').signature('hello')) def test_custom_algorithm(self): signer = signing.Signer('predictable-secret', algorithm='sha512') self.assertEqual( signer.signature('hello'), 'Usf3uVQOZ9m6uPfVonKR-EBXjPe7bjMbp3_Fq8MfsptgkkM1ojidN0BxYaT5HAEN1' 'VzO9_jVu7R-VkqknHYNvw', ) def test_invalid_algorithm(self): signer = signing.Signer('predictable-secret', algorithm='whatever') msg = "'whatever' is not an algorithm accepted by the hashlib module." with self.assertRaisesMessage(InvalidAlgorithm, msg): signer.sign('hello') def test_legacy_signature(self): # RemovedInDjango40Warning: pre-Django 3.1 signatures won't be # supported. signer = signing.Signer() sha1_sig = 'foo:l-EMM5FtewpcHMbKFeQodt3X9z8' self.assertNotEqual(signer.sign('foo'), sha1_sig) self.assertEqual(signer.unsign(sha1_sig), 'foo') def test_sign_unsign(self): "sign/unsign should be reversible" signer = signing.Signer('predictable-secret') examples = [ 'q;wjmbk;wkmb', '3098247529087', '3098247:529:087:', 'jkw osanteuh ,rcuh nthu aou oauh ,ud du', '\u2019', ] for example in examples: signed = signer.sign(example) self.assertIsInstance(signed, str) self.assertNotEqual(example, signed) self.assertEqual(example, signer.unsign(signed)) def test_sign_unsign_non_string(self): signer = signing.Signer('predictable-secret') values = [ 123, 1.23, True, datetime.date.today(), ] for value in values: with self.subTest(value): signed = signer.sign(value) self.assertIsInstance(signed, str) self.assertNotEqual(signed, value) self.assertEqual(signer.unsign(signed), str(value)) def test_unsign_detects_tampering(self): "unsign should raise an exception if the value has been tampered with" signer = signing.Signer('predictable-secret') value = 'Another string' signed_value = signer.sign(value) transforms = ( lambda s: s.upper(), lambda s: s + 'a', lambda s: 'a' + s[1:], lambda s: s.replace(':', ''), ) self.assertEqual(value, signer.unsign(signed_value)) for transform in transforms: with self.assertRaises(signing.BadSignature): signer.unsign(transform(signed_value)) def test_dumps_loads(self): "dumps and loads be reversible for any JSON serializable object" objects = [ ['a', 'list'], 'a string \u2019', {'a': 'dictionary'}, ] for o in objects: self.assertNotEqual(o, signing.dumps(o)) self.assertEqual(o, signing.loads(signing.dumps(o))) self.assertNotEqual(o, signing.dumps(o, compress=True)) self.assertEqual(o, signing.loads(signing.dumps(o, compress=True))) def test_decode_detects_tampering(self): "loads should raise exception for tampered objects" transforms = ( lambda s: s.upper(), lambda s: s + 'a', lambda s: 'a' + s[1:], lambda s: s.replace(':', ''), ) value = { 'foo': 'bar', 'baz': 1, } encoded = signing.dumps(value) self.assertEqual(value, signing.loads(encoded)) for transform in transforms: with self.assertRaises(signing.BadSignature): signing.loads(transform(encoded)) def test_works_with_non_ascii_keys(self): binary_key = b'\xe7' # Set some binary (non-ASCII key) s = signing.Signer(binary_key) self.assertEqual( 'foo:EE4qGC5MEKyQG5msxYA0sBohAxLC0BJf8uRhemh0BGU', s.sign('foo'), ) def test_valid_sep(self): separators = ['/', '*sep*', ','] for sep in separators: signer = signing.Signer('predictable-secret', sep=sep) self.assertEqual( 'foo%sjZQoX_FtSO70jX9HLRGg2A_2s4kdDBxz1QoO_OpEQb0' % sep, signer.sign('foo'), ) def test_invalid_sep(self): """should warn on invalid separator""" msg = 'Unsafe Signer separator: %r (cannot be empty or consist of only A-z0-9-_=)' separators = ['', '-', 'abc'] for sep in separators: with self.assertRaisesMessage(ValueError, msg % sep): signing.Signer(sep=sep) class TestTimestampSigner(SimpleTestCase): def test_timestamp_signer(self): value = 'hello' with freeze_time(123456789): signer = signing.TimestampSigner('predictable-key') ts = signer.sign(value) self.assertNotEqual(ts, signing.Signer('predictable-key').sign(value)) self.assertEqual(signer.unsign(ts), value) with freeze_time(123456800): self.assertEqual(signer.unsign(ts, max_age=12), value) # max_age parameter can also accept a datetime.timedelta object self.assertEqual(signer.unsign(ts, max_age=datetime.timedelta(seconds=11)), value) with self.assertRaises(signing.SignatureExpired): signer.unsign(ts, max_age=10)
bsd-3-clause
cushon/bazel
third_party/py/abseil/setup.py
15
2011
# Copyright 2017 The Abseil Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Abseil setup configuration.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import platform try: import setuptools except ImportError: from ez_setup import use_setuptools use_setuptools() import setuptools py_version = platform.python_version_tuple() if py_version < ('2', '7') or py_version[0] == '3' and py_version < ('3', '4'): raise RuntimeError('Python version 2.7 or 3.4+ is required.') setuptools.setup( name='absl-py', version='0.1.1', description='Abseil Python Common Libraries', author='The Abseil Authors', url='https://github.com/abseil/abseil-py', packages=setuptools.find_packages(exclude=[ '*.tests', '*.tests.*', 'tests.*', 'tests', ]), install_requires=[ 'six', ], license='Apache 2.0', classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Intended Audience :: Developers', 'Topic :: Software Development :: Libraries :: Python Modules', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', ], )
apache-2.0
csutherl/sos
sos/plugins/named.py
12
2914
# This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin from os.path import exists, join, normpath class Named(Plugin): """BIND named server """ plugin_name = "named" profiles = ('system', 'services', 'network') named_conf = "/etc/named.conf" config_files = named_conf def setup(self): for cfg in self.config_files: if exists(cfg): self.add_copy_spec([ cfg, self.get_dns_dir(cfg) ]) self.add_forbidden_path(join(self.get_dns_dir(cfg), "chroot/dev")) self.add_forbidden_path(join(self.get_dns_dir(cfg), "chroot/proc")) def get_dns_dir(self, config_file): """ grab directory path from named{conf,boot} """ directory_list = self.do_regex_find_all("directory\s+\"(.*)\"", config_file) if directory_list: return normpath(directory_list[0]) else: return "" def postproc(self): match = r"(\s*arg \"password )[^\"]*" subst = r"\1******" self.do_file_sub(self.named_conf, match, subst) class RedHatNamed(Named, RedHatPlugin): named_conf = "/etc/named.conf" config_files = ("/etc/named.conf", "/etc/named.boot") files = (named_conf, '/etc/sysconfig/named') packages = ('bind',) def setup(self): super(RedHatNamed, self).setup() self.add_copy_spec("/etc/named/") self.add_copy_spec("/etc/sysconfig/named") self.add_cmd_output("klist -ket /etc/named.keytab") self.add_forbidden_path("/etc/named.keytab") return class DebianNamed(Named, DebianPlugin, UbuntuPlugin): files = ('/etc/bind/named.conf') packages = ('bind9',) named_conf = "/etc/bind/named.conf" config_files = (named_conf, "/etc/bind/named.conf.options", "/etc/bind/named.conf.local") def setup(self): super(DebianNamed, self).setup() self.add_copy_spec("/etc/bind/") return # vim: set et ts=4 sw=4 :
gpl-2.0
stackforge/tacker
tacker/api/vnflcm/v1/sync_resource.py
2
5064
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from tacker.common import csar_utils from tacker.common import exceptions from tacker.common import utils from tacker.conductor.conductorrpc import vnf_pkgm_rpc from tacker.glance_store import store as glance_store from tacker import objects from tacker.objects import fields import tacker.vnfm.nfvo_client as nfvo_client import time import webob CONF = cfg.CONF LOG = logging.getLogger(__name__) class SyncVnfPackage: vnf_package_rpc_api = vnf_pkgm_rpc.VNFPackageRPCAPI() @classmethod def create_package(cls, context, vnf_package_info): """vnf_package, create a vnf_package_vnfd table.""" vnf_package_info = utils.convert_camelcase_to_snakecase( vnf_package_info) try: vnf_package = cls.__create_vnf_package(context, vnf_package_info) except Exception as exc: raise webob.exc.HTTPInternalServerError( explanation=exc) try: artifact_paths = cls._get_artifact_paths(vnf_package_info) vnf_package_binary = \ nfvo_client.VnfPackageRequest.download_vnf_packages( vnf_package.id, artifact_paths) except nfvo_client.UndefinedExternalSettingException as exc: raise webob.exc.HTTPNotFound(explanation=exc) except (nfvo_client.FaliedDownloadContentException, Exception) as exc: raise webob.exc.HTTPInternalServerError( explanation=exc) try: (location, size, _, multihash, _) = glance_store.store_csar( context, vnf_package.id, vnf_package_binary) cls.__update_vnf_package(vnf_package, location, size, multihash) cls.vnf_package_rpc_api.upload_vnf_package_content( context, vnf_package) vnf_package_vnfd = cls._get_vnf_package_vnfd( context, vnf_package_info.get('vnfd_id')) except Exception as exc: raise webob.exc.HTTPInternalServerError( explanation=exc) return vnf_package_vnfd @classmethod def _get_artifact_paths(cls, vnf_package_info): additional_artifacts = vnf_package_info.get('additional_artifacts') if additional_artifacts is None: return None return [artifact.get('artifact_path') for artifact in additional_artifacts if 'artifact_path' in artifact] @classmethod def __store_csar(cls, context, id, body): (location, size, checksum, multihash, loc_meta) = glance_store.store_csar(context, id, body) return location, size, checksum, multihash, loc_meta @classmethod def __load_csar(cls, context, vnf_package): location = vnf_package.location_glance_store zip_path = glance_store.load_csar(vnf_package.id, location) vnf_data, flavours = csar_utils.load_csar_data( context.elevated(), vnf_package.id, zip_path) return vnf_data, flavours @classmethod def __create_vnf_package(cls, context, vnf_package_info): """VNF Package Table Registration.""" vnf_package = objects.VnfPackage( context=context, id=vnf_package_info.get('id'), onboarding_state=fields.PackageOnboardingStateType.CREATED, operational_state=fields.PackageOperationalStateType.DISABLED, usage_state=fields.PackageUsageStateType.NOT_IN_USE, tenant_id=context.project_id ) vnf_package.create() return vnf_package @classmethod def __update_vnf_package(cls, vnf_package, location, size, multihash): """VNF Package Table Update.""" vnf_package.algorithm = CONF.vnf_package.hashing_algorithm vnf_package.location_glance_store = location vnf_package.hash = multihash vnf_package.size = size vnf_package.save() @classmethod def _get_vnf_package_vnfd(cls, context, vnfd_id): """Get VNF Package VNFD.""" for num in range(CONF.vnf_lcm.retry_num): try: vnfd = objects.VnfPackageVnfd.get_by_id( context, vnfd_id) return vnfd except exceptions.VnfPackageVnfdNotFound: LOG.debug("retry_wait %s" % CONF.vnf_lcm.retry_wait) time.sleep(CONF.vnf_lcm.retry_wait) return None
apache-2.0
PrincessTeruko/TsunArt
tags/parser.py
1
4394
from tags.models import Tag import collections, copy, json, re, urllib.parse sample = 'young nia-teppelin with short multicolored hair and cat ears; yoko-littner with flame bikini, pink stockings, and long red hair without gun' DEFAULT_TAG_DICT = {'nouns': collections.OrderedDict({}), 'filters': {}} def format_query_str(query_str): query_str = urllib.parse.unquote(query_str) query_str = query_str.replace('+', ' ').lower() return query_str def parse(tag_str, reformat=False): '''Parses a tag/query string into a tag/query dict.''' parsed = copy.deepcopy(DEFAULT_TAG_DICT) if reformat: parsed['flat'] = [] parsed['text'] = '' proto = {'parents': {}, 'root': []} operator_re = re.compile(r'( with | without )+') conjunction_re = re.compile(r'(, and |,and | and |, |,)+') for clause in tag_str.split(';'): clauses = re.split(operator_re, clause) root_descriptor = clauses[0].split() try: root_noun = root_descriptor[-1] except IndexError: # This is an empty clause, so skip it. continue if len(clauses[0]) > 1: root_adjs = root_descriptor[:-1] else: root_adjs = [] if root_adjs and root_adjs[-1] == 'by': # This is an author meta attribute. if root_adjs[0] == 'not': try: parsed['filtered_authors'].append(root_noun) except KeyError: parsed['filtered_authors'] = [root_noun] if reformat: parsed['text'] += '; not by ' + root_noun else: parsed['author'] = root_noun if reformat: parsed['text'] += '; by ' + root_noun continue root_target = 'nouns' try: if root_adjs[0] == 'not': root_target = 'filters' root_adjs = root_adjs[1:] except IndexError: pass if root_noun not in parsed[root_target]: parsed[root_target][root_noun]= copy.deepcopy(proto) parsed[root_target][root_noun]['root'] = root_adjs if reformat: if root_adjs: adjs = ' '.join(root_adjs) + ' ' else: adjs = '' if root_target == 'filters': adjs = 'not ' + adjs parsed['flat'] += root_adjs + [root_noun] parsed['text'] += '; ' + adjs + root_noun for t, operator in list(enumerate(clauses))[1::2]: operator = operator.strip() descriptors = re.split(conjunction_re, clauses[t+1]) root_sub_descriptor = descriptors[0].split() try: noun = root_sub_descriptor[-1] except IndexError: # This is an empty descriptor, so skip it. continue adjectives = root_sub_descriptor[:-1] if reformat: if adjectives: adjs = ' '.join(adjectives) + ' ' else: adjs = '' parsed['flat'] += adjectives + [noun] parsed['text'] += (' ' + operator + ' ' + adjs + noun) target = 'nouns' if operator == 'without' or root_target == 'filters': target = 'filters' if noun not in parsed[target]: parsed[target][noun]= copy.deepcopy(proto) parsed[target][noun]['parents'][root_noun] = adjectives for d, conjunction in list(enumerate(descriptors))[1::2]: # Conjunctions no longer matter. conjunction = conjunction.strip() sub_descriptor = descriptors[d+1].split() try: noun = sub_descriptor[-1] except IndexError: # This is an empty descriptor, so skip it. continue adjectives = sub_descriptor[:-1] if reformat: if d + 2 == len(descriptors): if d == 1: conj = ' and ' else: conj = ', and ' else: conj = ', ' if adjectives: adjs = ' '.join(adjectives) + ' ' else: adjs = '' parsed['flat'] += adjectives + [noun] parsed['text'] += (conj + adjs + noun) if noun not in parsed[target]: parsed[target][noun]= copy.deepcopy(proto) parsed[target][noun]['parents'][root_noun] = adjectives if reformat: parsed['flat'] = set(parsed['flat']) parsed['text'] = parsed['text'][2:] #print(json.dumps(parsed, sort_keys=True, indent=4)) return parsed def validate(tag_dict): '''Verify that the tag dict is valid.''' pass def save(raw_tags, obj): '''Save tag dict to the database.''' tag_dict = parse(raw_tags, True) if hasattr(obj, 'tags'): for name in tag_dict['flat']: tag, new = Tag.objects.get_or_create(name=name) obj.tags.add(tag) del tag_dict['flat'] obj.raw_tags = tag_dict['text'] del tag_dict['text'] obj.tag_dict = json.dumps(tag_dict)
mit
erjohnso/ansible
lib/ansible/modules/network/illumos/dladm_vlan.py
33
5394
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, Adam Števko <adam.stevko@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: dladm_vlan short_description: Manage VLAN interfaces on Solaris/illumos systems. description: - Create or delete VLAN interfaces on Solaris/illumos systems. version_added: "2.3" author: Adam Števko (@xen0l) options: name: description: - VLAN interface name. required: true link: description: - VLAN underlying link name. required: true temporary: description: - Specifies that the VLAN interface is temporary. Temporary VLANs do not persist across reboots. required: false default: false vlan_id: description: - VLAN ID value for VLAN interface. required: false default: false aliases: [ "vid" ] state: description: - Create or delete Solaris/illumos VNIC. required: false default: "present" choices: [ "present", "absent" ] ''' EXAMPLES = ''' name: Create 'vlan42' VLAN over 'bnx0' link dladm_vlan: name=vlan42 link=bnx0 vlan_id=42 state=present name: Remove 'vlan1337' VLAN interface dladm_vlan: name=vlan1337 state=absent ''' RETURN = ''' name: description: VLAN name returned: always type: string sample: vlan42 state: description: state of the target returned: always type: string sample: present temporary: description: specifies if operation will persist across reboots returned: always type: boolean sample: True link: description: VLAN's underlying link name returned: always type: string sample: e100g0 vlan_id: description: VLAN ID returned: always type: string sample: 42 ''' from ansible.module_utils.basic import AnsibleModule class VLAN(object): def __init__(self, module): self.module = module self.name = module.params['name'] self.link = module.params['link'] self.vlan_id = module.params['vlan_id'] self.temporary = module.params['temporary'] self.state = module.params['state'] def vlan_exists(self): cmd = [self.module.get_bin_path('dladm', True)] cmd.append('show-vlan') cmd.append(self.name) (rc, _, _) = self.module.run_command(cmd) if rc == 0: return True else: return False def create_vlan(self): cmd = [self.module.get_bin_path('dladm', True)] cmd.append('create-vlan') if self.temporary: cmd.append('-t') cmd.append('-l') cmd.append(self.link) cmd.append('-v') cmd.append(self.vlan_id) cmd.append(self.name) return self.module.run_command(cmd) def delete_vlan(self): cmd = [self.module.get_bin_path('dladm', True)] cmd.append('delete-vlan') if self.temporary: cmd.append('-t') cmd.append(self.name) return self.module.run_command(cmd) def is_valid_vlan_id(self): return 0 <= int(self.vlan_id) <= 4095 def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True, type='str'), link=dict(default=None, type='str'), vlan_id=dict(default=0, aliases=['vid']), temporary=dict(default=False, type='bool'), state=dict(default='present', choices=['absent', 'present']), ), required_if=[ ['state', 'present', ['vlan_id', 'link', 'name']], ], supports_check_mode=True ) vlan = VLAN(module) rc = None out = '' err = '' result = {} result['name'] = vlan.name result['link'] = vlan.link result['state'] = vlan.state result['temporary'] = vlan.temporary if int(vlan.vlan_id) != 0: if not vlan.is_valid_vlan_id(): module.fail_json(msg='Invalid VLAN id value', name=vlan.name, state=vlan.state, link=vlan.link, vlan_id=vlan.vlan_id) result['vlan_id'] = vlan.vlan_id if vlan.state == 'absent': if vlan.vlan_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = vlan.delete_vlan() if rc != 0: module.fail_json(name=vlan.name, msg=err, rc=rc) elif vlan.state == 'present': if not vlan.vlan_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = vlan.create_vlan() if rc is not None and rc != 0: module.fail_json(name=vlan.name, msg=err, rc=rc) if rc is None: result['changed'] = False else: result['changed'] = True if out: result['stdout'] = out if err: result['stderr'] = err module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
Astron/yaml-cpp
test/gmock-1.7.0/gtest/test/gtest_output_test.py
1733
12005
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests the text output of Google C++ Testing Framework. SYNOPSIS gtest_output_test.py --build_dir=BUILD/DIR --gengolden # where BUILD/DIR contains the built gtest_output_test_ file. gtest_output_test.py --gengolden gtest_output_test.py """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import sys import gtest_test_utils # The flag for generating the golden file GENGOLDEN_FLAG = '--gengolden' CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS' IS_WINDOWS = os.name == 'nt' # TODO(vladl@google.com): remove the _lin suffix. GOLDEN_NAME = 'gtest_output_test_golden_lin.txt' PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_') # At least one command we exercise must not have the # --gtest_internal_skip_environment_and_ad_hoc_tests flag. COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests']) COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes']) COMMAND_WITH_TIME = ({}, [PROGRAM_PATH, '--gtest_print_time', '--gtest_internal_skip_environment_and_ad_hoc_tests', '--gtest_filter=FatalFailureTest.*:LoggingTest.*']) COMMAND_WITH_DISABLED = ( {}, [PROGRAM_PATH, '--gtest_also_run_disabled_tests', '--gtest_internal_skip_environment_and_ad_hoc_tests', '--gtest_filter=*DISABLED_*']) COMMAND_WITH_SHARDING = ( {'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'}, [PROGRAM_PATH, '--gtest_internal_skip_environment_and_ad_hoc_tests', '--gtest_filter=PassingTest.*']) GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME) def ToUnixLineEnding(s): """Changes all Windows/Mac line endings in s to UNIX line endings.""" return s.replace('\r\n', '\n').replace('\r', '\n') def RemoveLocations(test_output): """Removes all file location info from a Google Test program's output. Args: test_output: the output of a Google Test program. Returns: output with all file location info (in the form of 'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or 'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by 'FILE_NAME:#: '. """ return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output) def RemoveStackTraceDetails(output): """Removes all stack traces from a Google Test program's output.""" # *? means "find the shortest string that matches". return re.sub(r'Stack trace:(.|\n)*?\n\n', 'Stack trace: (omitted)\n\n', output) def RemoveStackTraces(output): """Removes all traces of stack traces from a Google Test program's output.""" # *? means "find the shortest string that matches". return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output) def RemoveTime(output): """Removes all time information from a Google Test program's output.""" return re.sub(r'\(\d+ ms', '(? ms', output) def RemoveTypeInfoDetails(test_output): """Removes compiler-specific type info from Google Test program's output. Args: test_output: the output of a Google Test program. Returns: output with type information normalized to canonical form. """ # some compilers output the name of type 'unsigned int' as 'unsigned' return re.sub(r'unsigned int', 'unsigned', test_output) def NormalizeToCurrentPlatform(test_output): """Normalizes platform specific output details for easier comparison.""" if IS_WINDOWS: # Removes the color information that is not present on Windows. test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output) # Changes failure message headers into the Windows format. test_output = re.sub(r': Failure\n', r': error: ', test_output) # Changes file(line_number) to file:line_number. test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output) return test_output def RemoveTestCounts(output): """Removes test counts from a Google Test program's output.""" output = re.sub(r'\d+ tests?, listed below', '? tests, listed below', output) output = re.sub(r'\d+ FAILED TESTS', '? FAILED TESTS', output) output = re.sub(r'\d+ tests? from \d+ test cases?', '? tests from ? test cases', output) output = re.sub(r'\d+ tests? from ([a-zA-Z_])', r'? tests from \1', output) return re.sub(r'\d+ tests?\.', '? tests.', output) def RemoveMatchingTests(test_output, pattern): """Removes output of specified tests from a Google Test program's output. This function strips not only the beginning and the end of a test but also all output in between. Args: test_output: A string containing the test output. pattern: A regex string that matches names of test cases or tests to remove. Returns: Contents of test_output with tests whose names match pattern removed. """ test_output = re.sub( r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % ( pattern, pattern), '', test_output) return re.sub(r'.*%s.*\n' % pattern, '', test_output) def NormalizeOutput(output): """Normalizes output (the output of gtest_output_test_.exe).""" output = ToUnixLineEnding(output) output = RemoveLocations(output) output = RemoveStackTraceDetails(output) output = RemoveTime(output) return output def GetShellCommandOutput(env_cmd): """Runs a command in a sub-process, and returns its output in a string. Args: env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra environment variables to set, and element 1 is a string with the command and any flags. Returns: A string with the command's combined standard and diagnostic output. """ # Spawns cmd in a sub-process, and gets its standard I/O file objects. # Set and save the environment properly. environ = os.environ.copy() environ.update(env_cmd[0]) p = gtest_test_utils.Subprocess(env_cmd[1], env=environ) return p.output def GetCommandOutput(env_cmd): """Runs a command and returns its output with all file location info stripped off. Args: env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra environment variables to set, and element 1 is a string with the command and any flags. """ # Disables exception pop-ups on Windows. environ, cmdline = env_cmd environ = dict(environ) # Ensures we are modifying a copy. environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1' return NormalizeOutput(GetShellCommandOutput((environ, cmdline))) def GetOutputOfAllCommands(): """Returns concatenated output from several representative commands.""" return (GetCommandOutput(COMMAND_WITH_COLOR) + GetCommandOutput(COMMAND_WITH_TIME) + GetCommandOutput(COMMAND_WITH_DISABLED) + GetCommandOutput(COMMAND_WITH_SHARDING)) test_list = GetShellCommandOutput(COMMAND_LIST_TESTS) SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list SUPPORTS_STACK_TRACES = False CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and SUPPORTS_TYPED_TESTS and SUPPORTS_THREADS) class GTestOutputTest(gtest_test_utils.TestCase): def RemoveUnsupportedTests(self, test_output): if not SUPPORTS_DEATH_TESTS: test_output = RemoveMatchingTests(test_output, 'DeathTest') if not SUPPORTS_TYPED_TESTS: test_output = RemoveMatchingTests(test_output, 'TypedTest') test_output = RemoveMatchingTests(test_output, 'TypedDeathTest') test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest') if not SUPPORTS_THREADS: test_output = RemoveMatchingTests(test_output, 'ExpectFailureWithThreadsTest') test_output = RemoveMatchingTests(test_output, 'ScopedFakeTestPartResultReporterTest') test_output = RemoveMatchingTests(test_output, 'WorksConcurrently') if not SUPPORTS_STACK_TRACES: test_output = RemoveStackTraces(test_output) return test_output def testOutput(self): output = GetOutputOfAllCommands() golden_file = open(GOLDEN_PATH, 'rb') # A mis-configured source control system can cause \r appear in EOL # sequences when we read the golden file irrespective of an operating # system used. Therefore, we need to strip those \r's from newlines # unconditionally. golden = ToUnixLineEnding(golden_file.read()) golden_file.close() # We want the test to pass regardless of certain features being # supported or not. # We still have to remove type name specifics in all cases. normalized_actual = RemoveTypeInfoDetails(output) normalized_golden = RemoveTypeInfoDetails(golden) if CAN_GENERATE_GOLDEN_FILE: self.assertEqual(normalized_golden, normalized_actual) else: normalized_actual = NormalizeToCurrentPlatform( RemoveTestCounts(normalized_actual)) normalized_golden = NormalizeToCurrentPlatform( RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden))) # This code is very handy when debugging golden file differences: if os.getenv('DEBUG_GTEST_OUTPUT_TEST'): open(os.path.join( gtest_test_utils.GetSourceDir(), '_gtest_output_test_normalized_actual.txt'), 'wb').write( normalized_actual) open(os.path.join( gtest_test_utils.GetSourceDir(), '_gtest_output_test_normalized_golden.txt'), 'wb').write( normalized_golden) self.assertEqual(normalized_golden, normalized_actual) if __name__ == '__main__': if sys.argv[1:] == [GENGOLDEN_FLAG]: if CAN_GENERATE_GOLDEN_FILE: output = GetOutputOfAllCommands() golden_file = open(GOLDEN_PATH, 'wb') golden_file.write(output) golden_file.close() else: message = ( """Unable to write a golden file when compiled in an environment that does not support all the required features (death tests, typed tests, and multiple threads). Please generate the golden file using a binary built with those features enabled.""") sys.stderr.write(message) sys.exit(1) else: gtest_test_utils.Main()
mit
gtko/CouchPotatoServer
libs/git/commit.py
110
3498
# Copyright (c) 2009, Rotem Yaari <vmalloc@gmail.com> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of organization nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from .ref import Ref from .files import ModifiedFile SHA1_LENGTH = 40 class Commit(Ref): def __init__(self, repo, sha): sha = str(sha).lower() if len(sha) < SHA1_LENGTH: sha = repo._getCommitByPartialHash(sha).hash super(Commit, self).__init__(repo, sha) self.hash = sha def __repr__(self): return self.hash def __eq__(self, other): if not isinstance(other, Commit): if isinstance(other, Ref): other = other.getHead().hash else: other = other.hash if other is None: return False if not isinstance(other, basestring): raise TypeError("Comparing %s and %s" % (type(self), type(other))) return (self.hash == other.lower()) def getParents(self): output = self.repo._getOutputAssertSuccess("rev-list %s --parents -1" % self) return [Commit(self.repo, sha.strip()) for sha in output.split()[1:]] def getChange(self): returned = [] for line in self.repo._getOutputAssertSuccess("show --pretty=format: --raw %s" % self).splitlines(): line = line.strip() if not line: continue filename = line.split()[-1] returned.append(ModifiedFile(filename)) return returned getChangedFiles = getChange ############################ Misc. Commit attributes ########################### def _getCommitField(self, field): return self.repo._executeGitCommandAssertSuccess("log -1 --pretty=format:%s %s" % (field, self)).stdout.read().strip() def getAuthorName(self): return self._getCommitField("%an") def getAuthorEmail(self): return self._getCommitField("%ae") def getDate(self): return int(self._getCommitField("%at")) def getSubject(self): return self._getCommitField("%s") def getMessageBody(self): return self._getCommitField("%b")
gpl-3.0
aequitas/home-assistant
homeassistant/components/homekit/type_fans.py
10
6065
"""Class to hold all light accessories.""" import logging from pyhap.const import CATEGORY_FAN from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, ATTR_SPEED_LIST, DIRECTION_FORWARD, DIRECTION_REVERSE, DOMAIN, SERVICE_OSCILLATE, SERVICE_SET_DIRECTION, SERVICE_SET_SPEED, SUPPORT_DIRECTION, SUPPORT_OSCILLATE, SUPPORT_SET_SPEED) from homeassistant.const import ( ATTR_ENTITY_ID, ATTR_SUPPORTED_FEATURES, SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_OFF, STATE_ON) from . import TYPES from .accessories import HomeAccessory, debounce from .const import ( CHAR_ACTIVE, CHAR_ROTATION_DIRECTION, CHAR_ROTATION_SPEED, CHAR_SWING_MODE, SERV_FANV2) from .util import HomeKitSpeedMapping _LOGGER = logging.getLogger(__name__) @TYPES.register('Fan') class Fan(HomeAccessory): """Generate a Fan accessory for a fan entity. Currently supports: state, speed, oscillate, direction. """ def __init__(self, *args): """Initialize a new Light accessory object.""" super().__init__(*args, category=CATEGORY_FAN) self._flag = {CHAR_ACTIVE: False, CHAR_ROTATION_DIRECTION: False, CHAR_SWING_MODE: False} self._state = 0 chars = [] features = self.hass.states.get(self.entity_id) \ .attributes.get(ATTR_SUPPORTED_FEATURES) if features & SUPPORT_DIRECTION: chars.append(CHAR_ROTATION_DIRECTION) if features & SUPPORT_OSCILLATE: chars.append(CHAR_SWING_MODE) if features & SUPPORT_SET_SPEED: speed_list = self.hass.states.get(self.entity_id) \ .attributes.get(ATTR_SPEED_LIST) self.speed_mapping = HomeKitSpeedMapping(speed_list) chars.append(CHAR_ROTATION_SPEED) serv_fan = self.add_preload_service(SERV_FANV2, chars) self.char_active = serv_fan.configure_char( CHAR_ACTIVE, value=0, setter_callback=self.set_state) self.char_direction = None self.char_speed = None self.char_swing = None if CHAR_ROTATION_DIRECTION in chars: self.char_direction = serv_fan.configure_char( CHAR_ROTATION_DIRECTION, value=0, setter_callback=self.set_direction) if CHAR_ROTATION_SPEED in chars: self.char_speed = serv_fan.configure_char( CHAR_ROTATION_SPEED, value=0, setter_callback=self.set_speed) if CHAR_SWING_MODE in chars: self.char_swing = serv_fan.configure_char( CHAR_SWING_MODE, value=0, setter_callback=self.set_oscillating) def set_state(self, value): """Set state if call came from HomeKit.""" _LOGGER.debug('%s: Set state to %d', self.entity_id, value) self._flag[CHAR_ACTIVE] = True service = SERVICE_TURN_ON if value == 1 else SERVICE_TURN_OFF params = {ATTR_ENTITY_ID: self.entity_id} self.call_service(DOMAIN, service, params) def set_direction(self, value): """Set state if call came from HomeKit.""" _LOGGER.debug('%s: Set direction to %d', self.entity_id, value) self._flag[CHAR_ROTATION_DIRECTION] = True direction = DIRECTION_REVERSE if value == 1 else DIRECTION_FORWARD params = {ATTR_ENTITY_ID: self.entity_id, ATTR_DIRECTION: direction} self.call_service(DOMAIN, SERVICE_SET_DIRECTION, params, direction) def set_oscillating(self, value): """Set state if call came from HomeKit.""" _LOGGER.debug('%s: Set oscillating to %d', self.entity_id, value) self._flag[CHAR_SWING_MODE] = True oscillating = value == 1 params = {ATTR_ENTITY_ID: self.entity_id, ATTR_OSCILLATING: oscillating} self.call_service(DOMAIN, SERVICE_OSCILLATE, params, oscillating) @debounce def set_speed(self, value): """Set state if call came from HomeKit.""" _LOGGER.debug('%s: Set speed to %d', self.entity_id, value) speed = self.speed_mapping.speed_to_states(value) params = {ATTR_ENTITY_ID: self.entity_id, ATTR_SPEED: speed} self.call_service(DOMAIN, SERVICE_SET_SPEED, params, speed) def update_state(self, new_state): """Update fan after state change.""" # Handle State state = new_state.state if state in (STATE_ON, STATE_OFF): self._state = 1 if state == STATE_ON else 0 if not self._flag[CHAR_ACTIVE] and \ self.char_active.value != self._state: self.char_active.set_value(self._state) self._flag[CHAR_ACTIVE] = False # Handle Direction if self.char_direction is not None: direction = new_state.attributes.get(ATTR_DIRECTION) if not self._flag[CHAR_ROTATION_DIRECTION] and \ direction in (DIRECTION_FORWARD, DIRECTION_REVERSE): hk_direction = 1 if direction == DIRECTION_REVERSE else 0 if self.char_direction.value != hk_direction: self.char_direction.set_value(hk_direction) self._flag[CHAR_ROTATION_DIRECTION] = False # Handle Speed if self.char_speed is not None: speed = new_state.attributes.get(ATTR_SPEED) hk_speed_value = self.speed_mapping.speed_to_homekit(speed) if hk_speed_value is not None and \ self.char_speed.value != hk_speed_value: self.char_speed.set_value(hk_speed_value) # Handle Oscillating if self.char_swing is not None: oscillating = new_state.attributes.get(ATTR_OSCILLATING) if not self._flag[CHAR_SWING_MODE] and \ oscillating in (True, False): hk_oscillating = 1 if oscillating else 0 if self.char_swing.value != hk_oscillating: self.char_swing.set_value(hk_oscillating) self._flag[CHAR_SWING_MODE] = False
apache-2.0
windyuuy/opera
chromium/src/third_party/trace-viewer/third_party/closure_linter/closure_linter/tokenutil.py
135
10976
#!/usr/bin/env python # # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Token utility functions.""" __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') import copy from closure_linter import javascripttokens from closure_linter.common import tokens # Shorthand JavaScriptToken = javascripttokens.JavaScriptToken Type = tokens.TokenType def GetFirstTokenInSameLine(token): """Returns the first token in the same line as token. Args: token: Any token in the line. Returns: The first token in the same line as token. """ while not token.IsFirstInLine(): token = token.previous return token def GetFirstTokenInPreviousLine(token): """Returns the first token in the previous line as token. Args: token: Any token in the line. Returns: The first token in the previous line as token, or None if token is on the first line. """ first_in_line = GetFirstTokenInSameLine(token) if first_in_line.previous: return GetFirstTokenInSameLine(first_in_line.previous) return None def GetLastTokenInSameLine(token): """Returns the last token in the same line as token. Args: token: Any token in the line. Returns: The last token in the same line as token. """ while not token.IsLastInLine(): token = token.next return token def GetAllTokensInSameLine(token): """Returns all tokens in the same line as the given token. Args: token: Any token in the line. Returns: All tokens on the same line as the given token. """ first_token = GetFirstTokenInSameLine(token) last_token = GetLastTokenInSameLine(token) tokens_in_line = [] while first_token != last_token: tokens_in_line.append(first_token) first_token = first_token.next tokens_in_line.append(last_token) return tokens_in_line def CustomSearch(start_token, func, end_func=None, distance=None, reverse=False): """Returns the first token where func is True within distance of this token. Args: start_token: The token to start searching from func: The function to call to test a token for applicability end_func: The function to call to test a token to determine whether to abort the search. distance: The number of tokens to look through before failing search. Must be positive. If unspecified, will search until the end of the token chain reverse: When true, search the tokens before this one instead of the tokens after it Returns: The first token matching func within distance of this token, or None if no such token is found. """ token = start_token if reverse: while token and (distance is None or distance > 0): previous = token.previous if previous: if func(previous): return previous if end_func and end_func(previous): return None token = previous if distance is not None: distance -= 1 else: while token and (distance is None or distance > 0): next_token = token.next if next_token: if func(next_token): return next_token if end_func and end_func(next_token): return None token = next_token if distance is not None: distance -= 1 return None def Search(start_token, token_types, distance=None, reverse=False): """Returns the first token of type in token_types within distance. Args: start_token: The token to start searching from token_types: The allowable types of the token being searched for distance: The number of tokens to look through before failing search. Must be positive. If unspecified, will search until the end of the token chain reverse: When true, search the tokens before this one instead of the tokens after it Returns: The first token of any type in token_types within distance of this token, or None if no such token is found. """ return CustomSearch(start_token, lambda token: token.IsAnyType(token_types), None, distance, reverse) def SearchExcept(start_token, token_types, distance=None, reverse=False): """Returns the first token not of any type in token_types within distance. Args: start_token: The token to start searching from token_types: The unallowable types of the token being searched for distance: The number of tokens to look through before failing search. Must be positive. If unspecified, will search until the end of the token chain reverse: When true, search the tokens before this one instead of the tokens after it Returns: The first token of any type in token_types within distance of this token, or None if no such token is found. """ return CustomSearch(start_token, lambda token: not token.IsAnyType(token_types), None, distance, reverse) def SearchUntil(start_token, token_types, end_types, distance=None, reverse=False): """Returns the first token of type in token_types before a token of end_type. Args: start_token: The token to start searching from. token_types: The allowable types of the token being searched for. end_types: Types of tokens to abort search if we find. distance: The number of tokens to look through before failing search. Must be positive. If unspecified, will search until the end of the token chain reverse: When true, search the tokens before this one instead of the tokens after it Returns: The first token of any type in token_types within distance of this token before any tokens of type in end_type, or None if no such token is found. """ return CustomSearch(start_token, lambda token: token.IsAnyType(token_types), lambda token: token.IsAnyType(end_types), distance, reverse) def DeleteToken(token): """Deletes the given token from the linked list. Args: token: The token to delete """ if token.previous: token.previous.next = token.next if token.next: token.next.previous = token.previous following_token = token.next while following_token and following_token.metadata.last_code == token: following_token.metadata.last_code = token.metadata.last_code following_token = following_token.next def DeleteTokens(token, token_count): """Deletes the given number of tokens starting with the given token. Args: token: The token to start deleting at. token_count: The total number of tokens to delete. """ for i in xrange(1, token_count): DeleteToken(token.next) DeleteToken(token) def InsertTokenAfter(new_token, token): """Insert new_token after token. Args: new_token: A token to be added to the stream token: A token already in the stream """ new_token.previous = token new_token.next = token.next new_token.metadata = copy.copy(token.metadata) if token.IsCode(): new_token.metadata.last_code = token if new_token.IsCode(): following_token = token.next while following_token and following_token.metadata.last_code == token: following_token.metadata.last_code = new_token following_token = following_token.next token.next = new_token if new_token.next: new_token.next.previous = new_token if new_token.start_index is None: if new_token.line_number == token.line_number: new_token.start_index = token.start_index + len(token.string) else: new_token.start_index = 0 iterator = new_token.next while iterator and iterator.line_number == new_token.line_number: iterator.start_index += len(new_token.string) iterator = iterator.next def InsertTokensAfter(new_tokens, token): """Insert multiple tokens after token. Args: new_tokens: An array of tokens to be added to the stream token: A token already in the stream """ # TODO(user): It would be nicer to have InsertTokenAfter defer to here # instead of vice-versa. current_token = token for new_token in new_tokens: InsertTokenAfter(new_token, current_token) current_token = new_token def InsertSpaceTokenAfter(token): """Inserts a space token after the given token. Args: token: The token to insert a space token after Returns: A single space token """ space_token = JavaScriptToken(' ', Type.WHITESPACE, token.line, token.line_number) InsertTokenAfter(space_token, token) def InsertBlankLineAfter(token): """Inserts a blank line after the given token. Args: token: The token to insert a blank line after Returns: A single space token """ blank_token = JavaScriptToken('', Type.BLANK_LINE, '', token.line_number + 1) InsertLineAfter(token, [blank_token]) def InsertLineAfter(token, new_tokens): """Inserts a new line consisting of new_tokens after the given token. Args: token: The token to insert after. new_tokens: The tokens that will make up the new line. """ insert_location = token for new_token in new_tokens: InsertTokenAfter(new_token, insert_location) insert_location = new_token # Update all subsequent line numbers. next_token = new_tokens[-1].next while next_token: next_token.line_number += 1 next_token = next_token.next def SplitToken(token, position): """Splits the token into two tokens at position. Args: token: The token to split position: The position to split at. Will be the beginning of second token. Returns: The new second token. """ new_string = token.string[position:] token.string = token.string[:position] new_token = JavaScriptToken(new_string, token.type, token.line, token.line_number) InsertTokenAfter(new_token, token) return new_token def Compare(token1, token2): """Compares two tokens and determines their relative order. Args: token1: The first token to compare. token2: The second token to compare. Returns: A negative integer, zero, or a positive integer as the first token is before, equal, or after the second in the token stream. """ if token2.line_number != token1.line_number: return token1.line_number - token2.line_number else: return token1.start_index - token2.start_index
bsd-3-clause
itbabu/django-filer
filer/fields/multistorage_file.py
12
2577
#-*- coding: utf-8 -*- from django.core.files.base import File from django.core.files.storage import Storage from easy_thumbnails import fields as easy_thumbnails_fields, \ files as easy_thumbnails_files from filer import settings as filer_settings from filer.utils.filer_easy_thumbnails import ThumbnailerNameMixin STORAGES = { 'public': filer_settings.FILER_PUBLICMEDIA_STORAGE, 'private': filer_settings.FILER_PRIVATEMEDIA_STORAGE, } THUMBNAIL_STORAGES = { 'public': filer_settings.FILER_PUBLICMEDIA_THUMBNAIL_STORAGE, 'private': filer_settings.FILER_PRIVATEMEDIA_THUMBNAIL_STORAGE, } def generate_filename_multistorage(instance, filename): if instance.is_public: upload_to = filer_settings.FILER_PUBLICMEDIA_UPLOAD_TO else: upload_to = filer_settings.FILER_PRIVATEMEDIA_UPLOAD_TO if callable(upload_to): return upload_to(instance, filename) else: return upload_to class MultiStorageFieldFile(ThumbnailerNameMixin, easy_thumbnails_files.ThumbnailerFieldFile): def __init__(self, instance, field, name): File.__init__(self, None, name) self.instance = instance self.field = field self._committed = True self.storages = self.field.storages self.thumbnail_storages = self.field.thumbnail_storages @property def storage(self): if self.instance.is_public: return self.storages['public'] else: return self.storages['private'] @property def source_storage(self): if self.instance.is_public: return self.storages['public'] else: return self.storages['private'] @property def thumbnail_storage(self): if self.instance.is_public: return self.thumbnail_storages['public'] else: return self.thumbnail_storages['private'] class MultiStorageFileField(easy_thumbnails_fields.ThumbnailerField): attr_class = MultiStorageFieldFile def __init__(self, verbose_name=None, name=None, upload_to_dict=None, storages=None, thumbnail_storages=None, **kwargs): self.storages = storages or STORAGES self.thumbnail_storages = thumbnail_storages or THUMBNAIL_STORAGES super(easy_thumbnails_fields.ThumbnailerField, self).__init__( verbose_name=verbose_name, name=name, upload_to=generate_filename_multistorage, storage=None, **kwargs)
mit
our-city-app/oca-backend
src/rogerthat_service_api_calls.py
1
1191
# -*- coding: utf-8 -*- # Copyright 2020 Green Valley Belgium NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.7@@ # Do not touch the indentation here from rogerthat.rpc.service import register_service_api_calls from rogerthat.service.api import app, communities, friends, messaging, qr, system, news, payments def register_all_service_api_calls(): register_service_api_calls(app) register_service_api_calls(communities) register_service_api_calls(friends) register_service_api_calls(messaging) register_service_api_calls(qr) register_service_api_calls(system) register_service_api_calls(news) register_service_api_calls(payments)
apache-2.0
fabianfreyer/libvirt
scripts/check-symsorting.py
7
3536
#!/usr/bin/env python3 # Copyright (C) 2012-2019 Red Hat, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see # <http://www.gnu.org/licenses/>. import os.path import re import sys if len(sys.argv) < 3: print("syntax: %s SRCDIR SYMFILE..." % sys.argv[0], file=sys.stderr) sys.exit(1) def check_sorting(group, symfile, line, groupfile, lastgroup): sortedgroup = sorted(group, key=str.lower) issorted = True first = None last = None err = False # Check that groups are in order and groupfile exists if lastgroup is not None and lastgroup.lower() > groupfile.lower(): print("Symbol block at %s:%s: block not sorted" % (symfile, line), file=sys.stderr) print("Move %s block before %s block" % (groupfile, lastgroup), file=sys.stderr) print("", file=sys.stderr) err = True if not os.path.exists(os.path.join(srcdir, groupfile)): print("Symbol block at %s:%s: %s not found" % (symfile, line, groupfile), file=sys.stderr) print("", file=sys.stderr) err = True # Check that symbols within a group are in order for i in range(len(group)): if sortedgroup[i] != group[i]: if first is None: first = i last = i issorted = False if not issorted: actual = group[first:last] expect = sortedgroup[first:last] print("Symbol block at %s:%s: symbols not sorted" % (symfile, line), file=sys.stderr) for g in actual: print(" %s" % g, file=sys.stderr) print("Correct ordering", file=sys.stderr) for g in expect: print(" %s" % g, file=sys.stderr) print("", file=sys.stderr) err = True return err ret = 0 srcdir = sys.argv[1] lastgroup = None for symfile in sys.argv[2:]: with open(symfile, "r") as fh: lineno = 0 groupfile = "" group = [] thisline = 0 for line in fh: thisline = thisline + 1 line = line.strip() filenamematch = re.search(r'''^#\s*((\w+\/)*(\w+\.h))\s*$''', line) if filenamematch is not None: groupfile = filenamematch.group(1) elif line == "": if len(group) > 0: if check_sorting(group, symfile, lineno, groupfile, lastgroup): ret = 1 group = [] lineno = thisline lastgroup = groupfile elif line[0] == '#': # Ignore comments pass else: line = line.strip(";") group.append(line) if len(group) > 0: if check_sorting(group, symfile, lineno, groupfile, lastgroup): ret = 1 lastgroup = None sys.exit(ret)
lgpl-2.1
brandond/ansible
lib/ansible/modules/network/nso/nso_show.py
69
3509
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2017 Cisco and/or its affiliates. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified' } DOCUMENTATION = ''' --- module: nso_show extends_documentation_fragment: nso short_description: Displays data from Cisco NSO. description: - This module provides support for displaying data from Cisco NSO. requirements: - Cisco NSO version 3.4.12 or higher, 4.1.9 or higher, 4.2.6 or higher, 4.3.7 or higher, 4.4.5 or higher, 4.5 or higher. author: "Claes Nästén (@cnasten)" options: path: description: Path to NSO data. required: true operational: description: > Controls whether or not operational data is included in the result. type: bool default: false version_added: "2.5" ''' EXAMPLES = ''' - name: Show devices including operational data nso_show: url: http://localhost:8080/jsonrpc username: username password: password path: /ncs:devices/device operational: true ''' RETURN = ''' output: description: Configuration returned: success type: dict ''' from ansible.module_utils.network.nso.nso import connect, verify_version, nso_argument_spec from ansible.module_utils.network.nso.nso import ModuleFailException, NsoException from ansible.module_utils.basic import AnsibleModule class NsoShow(object): REQUIRED_VERSIONS = [ (4, 5), (4, 4, 5), (4, 3, 7), (4, 2, 6), (4, 1, 9), (3, 4, 12) ] def __init__(self, check_mode, client, path, operational): self._check_mode = check_mode self._client = client self._path = path self._operational = operational def main(self): if self._check_mode: return {} else: return self._client.show_config(self._path, self._operational) def main(): argument_spec = dict( path=dict(required=True, type='str'), operational=dict(required=False, type='bool', default=False) ) argument_spec.update(nso_argument_spec) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True ) p = module.params client = connect(p) nso_show = NsoShow( module.check_mode, client, p['path'], p['operational']) try: verify_version(client, NsoShow.REQUIRED_VERSIONS) output = nso_show.main() client.logout() module.exit_json(changed=False, output=output) except NsoException as ex: client.logout() module.fail_json(msg=ex.message) except ModuleFailException as ex: client.logout() module.fail_json(msg=ex.message) if __name__ == '__main__': main()
gpl-3.0
gijs/inasafe
third_party/raven/utils/encoding.py
25
3510
""" raven.utils.encoding ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import warnings def force_unicode(s, encoding='utf-8', errors='strict'): """ Similar to smart_unicode, except that lazy instances are resolved to strings, rather than kept as lazy objects. Adapted from Django """ try: if not isinstance(s, basestring,): if hasattr(s, '__unicode__'): s = unicode(s) else: try: s = unicode(str(s), encoding, errors) except UnicodeEncodeError: if not isinstance(s, Exception): raise # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII data without special # handling to display as a string. We need to handle this # without raising a further exception. We do an # approximation to what the Exception's standard str() # output should be. s = ' '.join([force_unicode(arg, encoding, errors) for arg in s]) elif not isinstance(s, unicode): # Note: We use .decode() here, instead of unicode(s, encoding, # errors), so that if s is a SafeString, it ends up being a # SafeUnicode at the end. s = s.decode(encoding, errors) except UnicodeDecodeError, e: if not isinstance(s, Exception): raise UnicodeDecodeError(s, *e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join([force_unicode(arg, encoding, errors) for arg in s]) return s def transform(value): from raven.utils.serializer import transform warnings.warn('You should switch to raven.utils.serializer.transform', DeprecationWarning) return transform(value) def to_unicode(value): try: value = unicode(force_unicode(value)) except (UnicodeEncodeError, UnicodeDecodeError): value = '(Error decoding value)' except Exception: # in some cases we get a different exception try: value = str(repr(type(value))) except Exception: value = '(Error decoding value)' return value def to_string(value): try: return str(value.decode('utf-8').encode('utf-8')) except: return to_unicode(value).encode('utf-8') def shorten(var, list_length=50, string_length=200): from raven.utils.serializer import transform var = transform(var) if isinstance(var, basestring) and len(var) > string_length: var = var[:string_length] + '...' elif isinstance(var, (list, tuple, set, frozenset)) and len(var) > list_length: # TODO: we should write a real API for storing some metadata with vars when # we get around to doing ref storage # TODO: when we finish the above, we should also implement this for dicts var = list(var)[:list_length] + ['...', '(%d more elements)' % (len(var) - list_length,)] return var
gpl-3.0
mattbasta/amo-validator
tests/compat/test_gecko10.py
1
2585
from helper import CompatTestCase from validator.compat import FX10_DEFINITION class TestFX10Compat(CompatTestCase): """Test that compatibility tests for Firefox 10 are properly executed.""" VERSION = FX10_DEFINITION def test_isSameNode(self): """Test that `isSameNode` is flagged in Gecko 10.""" self.run_script_for_compat('alert(x.isSameNode(foo));') self.assert_silent() self.assert_compat_error(type_="error") def test_replaceWholeText(self): """Test that `repalceWholeText` is flagged in Gecko 10.""" self.run_script_for_compat('alert(x.replaceWholeText());') self.assert_silent() self.assert_compat_error(type_="error") def test_isElementContentWhitespace(self): """Test that `isElementContentWhitespace` is flagged in Gecko 10.""" self.run_script_for_compat('alert(x.isElementContentWhitespace);') self.assert_silent() self.assert_compat_error(type_="error") def test_xml_docuemnt_properties(self): """ Test that the `xmlEncoding`, `xmlVersion`, and `xmlStandalone` objects are dead for the document object in Gecko 10. """ patterns = ["document.xmlEncoding", "document.xmlVersion", "document.xmlStandalone", "content.document.xmlEncoding"] for pattern in patterns: self.run_script_for_compat("alert(%s);" % pattern) self.assert_silent() self.assert_compat_error(type_="error") def test_xml_properties(self): """ Test that the `xmlEncoding`, `xmlVersion`, and `xmlStandalone` objects are dead in Gecko 10. """ patterns = ["foo.xmlEncoding", "foo.xmlVersion", "foo.xmlStandalone"] for pattern in patterns: self.run_script_for_compat("alert(%s);" % pattern) self.assert_silent() self.assert_compat_error(type_="error") def test_interfaces(self): interfaces = ["nsIDOMNSHTMLFrameElement", "nsIDOMNSHTMLElement"] for interface in interfaces: self.run_script_for_compat(""" var x = Components.classes[""].createInstance( Components.interfaces.%s); """ % interface) self.assert_silent() self.assert_compat_error(type_="error") def test_nsIBrowserHistory(self): for method in self.run_xpcom_for_compat( "nsIBrowserHistory", ["lastPageVisited"]): self.assert_silent() self.assert_compat_error(type_="error")
bsd-3-clause
wtodom/spotipi
spotipi/sandbox/sandbox.py
1
1256
from __future__ import print_function, unicode_literals import getpass import sys import threading import spotify if sys.argv[1:]: track_uri = sys.argv[1] else: track_uri = 'spotify:track:6xZtSE6xaBxmRozKA0F6TA' # Assuming a spotify_appkey.key in the current dir session = spotify.Session() # Process events in the background loop = spotify.EventLoop(session) loop.start() # Connect an audio sink audio = spotify.AlsaSink(session) # Events for coordination logged_in = threading.Event() end_of_track = threading.Event() def on_connection_state_updated(session): if session.connection.state is spotify.ConnectionState.LOGGED_IN: logged_in.set() def on_end_of_track(self): end_of_track.set() # Register event listeners session.on( spotify.SessionEvent.CONNECTION_STATE_UPDATED, on_connection_state_updated) session.on(spotify.SessionEvent.END_OF_TRACK, on_end_of_track) user = raw_input("Username: ") password = getpass.getpass() session.login(user, password) logged_in.wait() # Play a track track = session.get_track(track_uri).load() session.player.load(track) session.player.play() # Wait for playback to complete or Ctrl+C try: while not end_of_track.wait(1): pass except KeyboardInterrupt: pass
mit
dawnbreak/hubzilla
library/blueimp_upload/server/gae-python/main.py
29
7574
# -*- coding: utf-8 -*- # # jQuery File Upload Plugin GAE Python Example # https://github.com/blueimp/jQuery-File-Upload # # Copyright 2011, Sebastian Tschan # https://blueimp.net # # Licensed under the MIT license: # https://opensource.org/licenses/MIT # from google.appengine.api import memcache, images import json import os import re import urllib import webapp2 DEBUG=os.environ.get('SERVER_SOFTWARE', '').startswith('Dev') WEBSITE = 'https://blueimp.github.io/jQuery-File-Upload/' MIN_FILE_SIZE = 1 # bytes # Max file size is memcache limit (1MB) minus key size minus overhead: MAX_FILE_SIZE = 999000 # bytes IMAGE_TYPES = re.compile('image/(gif|p?jpeg|(x-)?png)') ACCEPT_FILE_TYPES = IMAGE_TYPES THUMB_MAX_WIDTH = 80 THUMB_MAX_HEIGHT = 80 THUMB_SUFFIX = '.'+str(THUMB_MAX_WIDTH)+'x'+str(THUMB_MAX_HEIGHT)+'.png' EXPIRATION_TIME = 300 # seconds # If set to None, only allow redirects to the referer protocol+host. # Set to a regexp for custom pattern matching against the redirect value: REDIRECT_ALLOW_TARGET = None class CORSHandler(webapp2.RequestHandler): def cors(self): headers = self.response.headers headers['Access-Control-Allow-Origin'] = '*' headers['Access-Control-Allow-Methods'] =\ 'OPTIONS, HEAD, GET, POST, DELETE' headers['Access-Control-Allow-Headers'] =\ 'Content-Type, Content-Range, Content-Disposition' def initialize(self, request, response): super(CORSHandler, self).initialize(request, response) self.cors() def json_stringify(self, obj): return json.dumps(obj, separators=(',', ':')) def options(self, *args, **kwargs): pass class UploadHandler(CORSHandler): def validate(self, file): if file['size'] < MIN_FILE_SIZE: file['error'] = 'File is too small' elif file['size'] > MAX_FILE_SIZE: file['error'] = 'File is too big' elif not ACCEPT_FILE_TYPES.match(file['type']): file['error'] = 'Filetype not allowed' else: return True return False def validate_redirect(self, redirect): if redirect: if REDIRECT_ALLOW_TARGET: return REDIRECT_ALLOW_TARGET.match(redirect) referer = self.request.headers['referer'] if referer: from urlparse import urlparse parts = urlparse(referer) redirect_allow_target = '^' + re.escape( parts.scheme + '://' + parts.netloc + '/' ) return re.match(redirect_allow_target, redirect) return False def get_file_size(self, file): file.seek(0, 2) # Seek to the end of the file size = file.tell() # Get the position of EOF file.seek(0) # Reset the file position to the beginning return size def write_blob(self, data, info): key = urllib.quote(info['type'].encode('utf-8'), '') +\ '/' + str(hash(data)) +\ '/' + urllib.quote(info['name'].encode('utf-8'), '') try: memcache.set(key, data, time=EXPIRATION_TIME) except: #Failed to add to memcache return (None, None) thumbnail_key = None if IMAGE_TYPES.match(info['type']): try: img = images.Image(image_data=data) img.resize( width=THUMB_MAX_WIDTH, height=THUMB_MAX_HEIGHT ) thumbnail_data = img.execute_transforms() thumbnail_key = key + THUMB_SUFFIX memcache.set( thumbnail_key, thumbnail_data, time=EXPIRATION_TIME ) except: #Failed to resize Image or add to memcache thumbnail_key = None return (key, thumbnail_key) def handle_upload(self): results = [] for name, fieldStorage in self.request.POST.items(): if type(fieldStorage) is unicode: continue result = {} result['name'] = urllib.unquote(fieldStorage.filename) result['type'] = fieldStorage.type result['size'] = self.get_file_size(fieldStorage.file) if self.validate(result): key, thumbnail_key = self.write_blob( fieldStorage.value, result ) if key is not None: result['url'] = self.request.host_url + '/' + key result['deleteUrl'] = result['url'] result['deleteType'] = 'DELETE' if thumbnail_key is not None: result['thumbnailUrl'] = self.request.host_url +\ '/' + thumbnail_key else: result['error'] = 'Failed to store uploaded file.' results.append(result) return results def head(self): pass def get(self): self.redirect(WEBSITE) def post(self): if (self.request.get('_method') == 'DELETE'): return self.delete() result = {'files': self.handle_upload()} s = self.json_stringify(result) redirect = self.request.get('redirect') if self.validate_redirect(redirect): return self.redirect(str( redirect.replace('%s', urllib.quote(s, ''), 1) )) if 'application/json' in self.request.headers.get('Accept'): self.response.headers['Content-Type'] = 'application/json' self.response.write(s) class FileHandler(CORSHandler): def normalize(self, str): return urllib.quote(urllib.unquote(str), '') def get(self, content_type, data_hash, file_name): content_type = self.normalize(content_type) file_name = self.normalize(file_name) key = content_type + '/' + data_hash + '/' + file_name data = memcache.get(key) if data is None: return self.error(404) # Prevent browsers from MIME-sniffing the content-type: self.response.headers['X-Content-Type-Options'] = 'nosniff' content_type = urllib.unquote(content_type) if not IMAGE_TYPES.match(content_type): # Force a download dialog for non-image types: content_type = 'application/octet-stream' elif file_name.endswith(THUMB_SUFFIX): content_type = 'image/png' self.response.headers['Content-Type'] = content_type # Cache for the expiration time: self.response.headers['Cache-Control'] = 'public,max-age=%d' \ % EXPIRATION_TIME self.response.write(data) def delete(self, content_type, data_hash, file_name): content_type = self.normalize(content_type) file_name = self.normalize(file_name) key = content_type + '/' + data_hash + '/' + file_name result = {key: memcache.delete(key)} content_type = urllib.unquote(content_type) if IMAGE_TYPES.match(content_type): thumbnail_key = key + THUMB_SUFFIX result[thumbnail_key] = memcache.delete(thumbnail_key) if 'application/json' in self.request.headers.get('Accept'): self.response.headers['Content-Type'] = 'application/json' s = self.json_stringify(result) self.response.write(s) app = webapp2.WSGIApplication( [ ('/', UploadHandler), ('/(.+)/([^/]+)/([^/]+)', FileHandler) ], debug=DEBUG )
mit
listamilton/supermilton.repository
plugin.video.sonypictures/cloudflare.py
221
2812
import sys,traceback,urllib2,re, urllib,xbmc def createCookie(url,cj=None,agent='Mozilla/5.0 (Windows NT 6.1; rv:32.0) Gecko/20100101 Firefox/32.0'): urlData='' try: import urlparse,cookielib,urllib2 class NoRedirection(urllib2.HTTPErrorProcessor): def http_response(self, request, response): return response def parseJSString(s): try: offset=1 if s[0]=='+' else 0 val = int(eval(s.replace('!+[]','1').replace('!![]','1').replace('[]','0').replace('(','str(')[offset:])) return val except: pass #agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0' if cj==None: cj = cookielib.CookieJar() opener = urllib2.build_opener(NoRedirection, urllib2.HTTPCookieProcessor(cj)) opener.addheaders = [('User-Agent', agent)] response = opener.open(url) result=urlData = response.read() response.close() # print result # print response.headers jschl = re.compile('name="jschl_vc" value="(.+?)"/>').findall(result)[0] init = re.compile('setTimeout\(function\(\){\s*.*?.*:(.*?)};').findall(result)[0] builder = re.compile(r"challenge-form\'\);\s*(.*)a.v").findall(result)[0] decryptVal = parseJSString(init) lines = builder.split(';') for line in lines: if len(line)>0 and '=' in line: sections=line.split('=') line_val = parseJSString(sections[1]) decryptVal = int(eval(str(decryptVal)+sections[0][-1]+str(line_val))) # print urlparse.urlparse(url).netloc answer = decryptVal + len(urlparse.urlparse(url).netloc) u='/'.join(url.split('/')[:-1]) query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % (u, jschl, answer) if 'type="hidden" name="pass"' in result: passval=re.compile('name="pass" value="(.*?)"').findall(result)[0] query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % (u,urllib.quote_plus(passval), jschl, answer) xbmc.sleep(4*1000) ##sleep so that the call work # print query # import urllib2 # opener = urllib2.build_opener(NoRedirection,urllib2.HTTPCookieProcessor(cj)) # opener.addheaders = [('User-Agent', agent)] #print opener.headers response = opener.open(query) # print response.headers #cookie = str(response.headers.get('Set-Cookie')) #response = opener.open(url) #print cj # print response.read() response.close() return urlData except: traceback.print_exc(file=sys.stdout) return urlData
gpl-2.0
kalamaico/FootballManager
resources/random_generator.py
1
1176
#!python import random, time class RandomGenerator: def __init__(self): random.seed(time.clock()) def generate_uniform(self, min_v, max_v): return random.uniform(min_v, max_v) def generate_int(self, min_v, max_v): return random.randint(min_v, max_v) def generate_int_sequence_no_repetitions(self, length, min_v, max_v): ret = set() if length > max_v - min_v + 1: raise ValueError("Requested a length of " + str(length) + " for an interval of " + str(max_v - min_v + 1)) while len(ret) < length: val = ret.add(self.generate_int(min_v, max_v)) return sorted(ret) def generate_int_sequence(self, length, min_v, max_v): ret = list() while len(ret) < length: ret.append(self.generate_int(min_v, max_v)) return ret class RandomGeneratorDeterministic(RandomGenerator): def __init__(self): random.seed(0) def generate_uniform(self, min_v, max_v): return (max_v - min_v) /2 def generate_int(self, min_v, max_v): return 42
lgpl-3.0
lenstr/rethinkdb
external/v8_3.30.33.16/testing/gtest/test/gtest_output_test.py
496
12051
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests the text output of Google C++ Testing Framework. SYNOPSIS gtest_output_test.py --build_dir=BUILD/DIR --gengolden # where BUILD/DIR contains the built gtest_output_test_ file. gtest_output_test.py --gengolden gtest_output_test.py """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import sys import gtest_test_utils # The flag for generating the golden file GENGOLDEN_FLAG = '--gengolden' CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS' IS_WINDOWS = os.name == 'nt' # TODO(vladl@google.com): remove the _lin suffix. GOLDEN_NAME = 'gtest_output_test_golden_lin.txt' PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_') # At least one command we exercise must not have the # --gtest_internal_skip_environment_and_ad_hoc_tests flag. COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests']) COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes']) COMMAND_WITH_TIME = ({}, [PROGRAM_PATH, '--gtest_print_time', '--gtest_internal_skip_environment_and_ad_hoc_tests', '--gtest_filter=FatalFailureTest.*:LoggingTest.*']) COMMAND_WITH_DISABLED = ( {}, [PROGRAM_PATH, '--gtest_also_run_disabled_tests', '--gtest_internal_skip_environment_and_ad_hoc_tests', '--gtest_filter=*DISABLED_*']) COMMAND_WITH_SHARDING = ( {'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'}, [PROGRAM_PATH, '--gtest_internal_skip_environment_and_ad_hoc_tests', '--gtest_filter=PassingTest.*']) GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME) def ToUnixLineEnding(s): """Changes all Windows/Mac line endings in s to UNIX line endings.""" return s.replace('\r\n', '\n').replace('\r', '\n') def RemoveLocations(test_output): """Removes all file location info from a Google Test program's output. Args: test_output: the output of a Google Test program. Returns: output with all file location info (in the form of 'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or 'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by 'FILE_NAME:#: '. """ return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output) def RemoveStackTraceDetails(output): """Removes all stack traces from a Google Test program's output.""" # *? means "find the shortest string that matches". return re.sub(r'Stack trace:(.|\n)*?\n\n', 'Stack trace: (omitted)\n\n', output) def RemoveStackTraces(output): """Removes all traces of stack traces from a Google Test program's output.""" # *? means "find the shortest string that matches". return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output) def RemoveTime(output): """Removes all time information from a Google Test program's output.""" return re.sub(r'\(\d+ ms', '(? ms', output) def RemoveTypeInfoDetails(test_output): """Removes compiler-specific type info from Google Test program's output. Args: test_output: the output of a Google Test program. Returns: output with type information normalized to canonical form. """ # some compilers output the name of type 'unsigned int' as 'unsigned' return re.sub(r'unsigned int', 'unsigned', test_output) def NormalizeToCurrentPlatform(test_output): """Normalizes platform specific output details for easier comparison.""" if IS_WINDOWS: # Removes the color information that is not present on Windows. test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output) # Changes failure message headers into the Windows format. test_output = re.sub(r': Failure\n', r': error: ', test_output) # Changes file(line_number) to file:line_number. test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output) return test_output def RemoveTestCounts(output): """Removes test counts from a Google Test program's output.""" output = re.sub(r'\d+ tests?, listed below', '? tests, listed below', output) output = re.sub(r'\d+ FAILED TESTS', '? FAILED TESTS', output) output = re.sub(r'\d+ tests? from \d+ test cases?', '? tests from ? test cases', output) output = re.sub(r'\d+ tests? from ([a-zA-Z_])', r'? tests from \1', output) return re.sub(r'\d+ tests?\.', '? tests.', output) def RemoveMatchingTests(test_output, pattern): """Removes output of specified tests from a Google Test program's output. This function strips not only the beginning and the end of a test but also all output in between. Args: test_output: A string containing the test output. pattern: A regex string that matches names of test cases or tests to remove. Returns: Contents of test_output with tests whose names match pattern removed. """ test_output = re.sub( r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % ( pattern, pattern), '', test_output) return re.sub(r'.*%s.*\n' % pattern, '', test_output) def NormalizeOutput(output): """Normalizes output (the output of gtest_output_test_.exe).""" output = ToUnixLineEnding(output) output = RemoveLocations(output) output = RemoveStackTraceDetails(output) output = RemoveTime(output) return output def GetShellCommandOutput(env_cmd): """Runs a command in a sub-process, and returns its output in a string. Args: env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra environment variables to set, and element 1 is a string with the command and any flags. Returns: A string with the command's combined standard and diagnostic output. """ # Spawns cmd in a sub-process, and gets its standard I/O file objects. # Set and save the environment properly. environ = os.environ.copy() environ.update(env_cmd[0]) p = gtest_test_utils.Subprocess(env_cmd[1], env=environ) return p.output def GetCommandOutput(env_cmd): """Runs a command and returns its output with all file location info stripped off. Args: env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra environment variables to set, and element 1 is a string with the command and any flags. """ # Disables exception pop-ups on Windows. environ, cmdline = env_cmd environ = dict(environ) # Ensures we are modifying a copy. environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1' return NormalizeOutput(GetShellCommandOutput((environ, cmdline))) def GetOutputOfAllCommands(): """Returns concatenated output from several representative commands.""" return (GetCommandOutput(COMMAND_WITH_COLOR) + GetCommandOutput(COMMAND_WITH_TIME) + GetCommandOutput(COMMAND_WITH_DISABLED) + GetCommandOutput(COMMAND_WITH_SHARDING)) test_list = GetShellCommandOutput(COMMAND_LIST_TESTS) SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list SUPPORTS_STACK_TRACES = False CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and SUPPORTS_TYPED_TESTS and SUPPORTS_THREADS and not IS_WINDOWS) class GTestOutputTest(gtest_test_utils.TestCase): def RemoveUnsupportedTests(self, test_output): if not SUPPORTS_DEATH_TESTS: test_output = RemoveMatchingTests(test_output, 'DeathTest') if not SUPPORTS_TYPED_TESTS: test_output = RemoveMatchingTests(test_output, 'TypedTest') test_output = RemoveMatchingTests(test_output, 'TypedDeathTest') test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest') if not SUPPORTS_THREADS: test_output = RemoveMatchingTests(test_output, 'ExpectFailureWithThreadsTest') test_output = RemoveMatchingTests(test_output, 'ScopedFakeTestPartResultReporterTest') test_output = RemoveMatchingTests(test_output, 'WorksConcurrently') if not SUPPORTS_STACK_TRACES: test_output = RemoveStackTraces(test_output) return test_output def testOutput(self): output = GetOutputOfAllCommands() golden_file = open(GOLDEN_PATH, 'rb') # A mis-configured source control system can cause \r appear in EOL # sequences when we read the golden file irrespective of an operating # system used. Therefore, we need to strip those \r's from newlines # unconditionally. golden = ToUnixLineEnding(golden_file.read()) golden_file.close() # We want the test to pass regardless of certain features being # supported or not. # We still have to remove type name specifics in all cases. normalized_actual = RemoveTypeInfoDetails(output) normalized_golden = RemoveTypeInfoDetails(golden) if CAN_GENERATE_GOLDEN_FILE: self.assertEqual(normalized_golden, normalized_actual) else: normalized_actual = NormalizeToCurrentPlatform( RemoveTestCounts(normalized_actual)) normalized_golden = NormalizeToCurrentPlatform( RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden))) # This code is very handy when debugging golden file differences: if os.getenv('DEBUG_GTEST_OUTPUT_TEST'): open(os.path.join( gtest_test_utils.GetSourceDir(), '_gtest_output_test_normalized_actual.txt'), 'wb').write( normalized_actual) open(os.path.join( gtest_test_utils.GetSourceDir(), '_gtest_output_test_normalized_golden.txt'), 'wb').write( normalized_golden) self.assertEqual(normalized_golden, normalized_actual) if __name__ == '__main__': if sys.argv[1:] == [GENGOLDEN_FLAG]: if CAN_GENERATE_GOLDEN_FILE: output = GetOutputOfAllCommands() golden_file = open(GOLDEN_PATH, 'wb') golden_file.write(output) golden_file.close() else: message = ( """Unable to write a golden file when compiled in an environment that does not support all the required features (death tests, typed tests, and multiple threads). Please generate the golden file using a binary built with those features enabled.""") sys.stderr.write(message) sys.exit(1) else: gtest_test_utils.Main()
agpl-3.0
rofl0r/gdb
gdb/testsuite/gdb.python/py-section-script.py
6
1962
# Copyright (C) 2010, 2011 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # This file is part of the GDB testsuite. import re class pp_ss: def __init__(self, val): self.val = val def to_string(self): return "a=<" + str(self.val["a"]) + "> b=<" + str(self.val["b"]) + ">" def lookup_function (val): "Look-up and return a pretty-printer that can print val." # Get the type. type = val.type # If it points to a reference, get the reference. if type.code == gdb.TYPE_CODE_REF: type = type.target () # Get the unqualified type, stripped of typedefs. type = type.unqualified ().strip_typedefs () # Get the type name. typename = type.tag if typename == None: return None # Iterate over local dictionary of types to determine # if a printer is registered for that type. Return an # instantiation of the printer if found. for function in pretty_printers_dict: if function.match (typename): return pretty_printers_dict[function] (val) # Cannot find a pretty printer. Return None. return None def register_pretty_printers (): pretty_printers_dict[re.compile ('^ss$')] = pp_ss pretty_printers_dict = {} register_pretty_printers () gdb.current_progspace().pretty_printers.append (lookup_function)
gpl-2.0
chaomodus/pymetasurf
examples/shape.py
1
6757
# An example of using pymetasurf to create objects in Panda3d # Written by Cas Rusnov <rusnovn@gmail.com> import pymetasurf import pymetasurf.shapes as shapes from pandac.PandaModules import loadPrcFile configfiles = ["./Config.prc",] for prc in configfiles: loadPrcFile(prc) import sys from direct.showbase.ShowBase import ShowBase from direct.showbase import DirectObject from direct.task import Task from direct.actor.Actor import Actor from direct.interval.IntervalGlobal import Sequence from panda3d.core import Point3, PointLight, VBase4, AmbientLight, Spotlight, Vec4, DirectionalLight, Material from panda3d.core import Geom, GeomVertexFormat, GeomVertexData, GeomVertexWriter, GeomNode, GeomTriangles from math import pi, sin, cos, tan, sqrt, exp, radians class spherical_coordinate(object): def __init__(self, rho=1, theta=0, phi=0): self.rho = rho self.theta = theta self.phi = phi @property def x(self): return self.rho * sin(radians(self.theta)) * cos(radians(self.phi)) @property def y(self): return self.rho * sin(radians(self.theta)) * sin(radians(self.phi)) @property def z(self): return self.rho * cos(radians(self.theta)) def __str__(self): return "<scoord %d %d %d>" % (self.rho, self.theta, self.phi) def sqr(x): return x**2 class meta_generic(pymetasurf.MetaSurface): def __init__(self, shapespec): pymetasurf.MetaSurface.__init__(self) self.set_inside(shapespec[0]) self.set_bounds(*shapespec[1]) self.set_resolution(*shapespec[2]) self.func = shapespec[3] def eval_callback(self, x, y, z): return self.func(x,y,z); class MyApp(ShowBase): def __init__(self): ShowBase.__init__(self) self.render.setShaderAuto() ambientLight = AmbientLight('ambientLight') ambientLight.setColor(Vec4(0.25, 0.25, 0.25, 1)) ambientLightNP = render.attachNewNode(ambientLight) self.render.setLight(ambientLightNP) # Directional light 01 directionalLight = DirectionalLight('directionalLight') directionalLight.setColor(Vec4(0.5, 0.5, 0.5, 1)) directionalLightNP = self.render.attachNewNode(directionalLight) self.dlnp = directionalLightNP # This light is facing backwards, towards the camera. directionalLightNP.setHpr(180, -20, 0) self.render.setLight(directionalLightNP) n = self.render.attachNewNode(um_GN) myMaterial = Material() myMaterial.setShininess(30.0) #Make this material shiny myMaterial.setAmbient(VBase4(1,1,1,1)) myMaterial.setDiffuse(VBase4(0.5,0.6,0.5,1)) myMaterial.setSpecular(VBase4(1,1,1,1)) myMaterial.setTwoside(True) n.setMaterial(myMaterial) n.reparentTo(self.render) # Add the spinCameraTask procedure to the task manager. self.taskMgr.add(self.spinCameraTask, "SpinCameraTask") class KeyMgr(DirectObject.DirectObject): def __init__(self): self.states = {'up':False,'down':False,'left':False,'right':False,'pgup':False,'pgdn':False, 'a':False} self.accept('arrow_up',self.state, ['up', True]) self.accept('arrow_up-up',self.state, ['up', False]) self.accept('arrow_down',self.state, ['down', True]) self.accept('arrow_down-up',self.state, ['down', False]) self.accept('arrow_left',self.state, ['left', True]) self.accept('arrow_left-up',self.state, ['left', False]) self.accept('arrow_right',self.state, ['right', True]) self.accept('arrow_right-up',self.state, ['right', False]) self.accept('page_up',self.state, ['pgup',True]) self.accept('page_up-up',self.state, ['pgup',False]) self.accept('page_down',self.state, ['pgdn',True]) self.accept('page_down-up',self.state, ['pgdn',False]) self.accept('a', self.impulse, ['a',]) self.accept('escape', sys.exit ) # exit on esc self.impulses = list() def state(self, key, st): self.states[key] = st def impulse(self, key): self.impulses.append(key) def __getitem__(self, key): return self.states.__getitem__(key) self.keymgr = KeyMgr() self.auto = False self.campos = spherical_coordinate(10,90,90) # Define a procedure to move the camera. def spinCameraTask(self, task): if self.keymgr['up']: self.campos.theta += 1 if self.keymgr['down']: self.campos.theta -= 1 if self.keymgr['left'] or self.auto: self.campos.phi -= 1 if self.keymgr['right']: self.campos.phi += 1 if self.keymgr['pgup']: self.campos.rho += 0.25 if self.keymgr['pgdn']: self.campos.rho -= 0.25 while (len(self.keymgr.impulses) > 0): k = self.keymgr.impulses.pop() if k == 'a': self.auto = not self.auto self.camera.setPos(self.campos.x, self.campos.y, self.campos.z) self.dlnp.setPos(self.campos.x, self.campos.y, self.campos.z) #self.camera.setHpr(angleDegrees, 0, 0) self.camera.lookAt(0,0,0) self.dlnp.lookAt(0,0,0) return Task.cont if __name__ == '__main__': if len(sys.argv) < 2: print "specify a shape. -l for list." sys.exit(-1) if sys.argv[1] == '-l': for sh in shapes.keys(): print sh sys.exit(0) shp = sys.argv[1] if not shapes.has_key(shp): print "cannot find shape, -l for list" sys.exit(-1) um = meta_generic(shapes[shp]) um.polygonize() format=GeomVertexFormat.getV3n3() vdata=GeomVertexData("vertices", format, Geom.UHStatic) vertexWriter=GeomVertexWriter(vdata, "vertex") normalWriter=GeomVertexWriter(vdata, "normal") print len(um.tri_list), "vertices" print len(um.tri_list) / 3.0, "tris" for p in um.tri_list: vertexWriter.addData3f(p[0],p[1],p[2]) for p in um.normal_list: normalWriter.addData3f(p[0],p[1],p[2]) for p in reversed(um.tri_list): vertexWriter.addData3f(p[0],p[1],p[2]) for p in reversed(um.normal_list): normalWriter.addData3f(-1 * p[0], -1 * p[1], -1 *p[2]) tris = GeomTriangles(Geom.UHStatic) tris.addConsecutiveVertices(0, len(um.tri_list)) tris.closePrimitive() um_geom = Geom(vdata) um_geom.addPrimitive(tris) um_GN = GeomNode(shp) um_GN.addGeom(um_geom) app = MyApp() app.run()
lgpl-2.1
Edraak/edx-platform
lms/djangoapps/lti_provider/users.py
63
5258
""" LTI user management functionality. This module reconciles the two identities that an individual has in the campus LMS platform and on edX. """ import string import random import uuid from django.conf import settings from django.contrib.auth import authenticate, login from django.contrib.auth.models import User from django.core.exceptions import PermissionDenied from django.db import IntegrityError, transaction from lti_provider.models import LtiUser from student.models import UserProfile def authenticate_lti_user(request, lti_user_id, lti_consumer): """ Determine whether the user specified by the LTI launch has an existing account. If not, create a new Django User model and associate it with an LtiUser object. If the currently logged-in user does not match the user specified by the LTI launch, log out the old user and log in the LTI identity. """ try: lti_user = LtiUser.objects.get( lti_user_id=lti_user_id, lti_consumer=lti_consumer ) except LtiUser.DoesNotExist: # This is the first time that the user has been here. Create an account. lti_user = create_lti_user(lti_user_id, lti_consumer) if not (request.user.is_authenticated() and request.user == lti_user.edx_user): # The user is not authenticated, or is logged in as somebody else. # Switch them to the LTI user switch_user(request, lti_user, lti_consumer) def create_lti_user(lti_user_id, lti_consumer): """ Generate a new user on the edX platform with a random username and password, and associates that account with the LTI identity. """ edx_password = str(uuid.uuid4()) created = False while not created: try: edx_user_id = generate_random_edx_username() edx_email = "{}@{}".format(edx_user_id, settings.LTI_USER_EMAIL_DOMAIN) with transaction.atomic(): edx_user = User.objects.create_user( username=edx_user_id, password=edx_password, email=edx_email, ) # A profile is required if PREVENT_CONCURRENT_LOGINS flag is set. # TODO: We could populate user information from the LTI launch here, # but it's not necessary for our current uses. edx_user_profile = UserProfile(user=edx_user) edx_user_profile.save() created = True except IntegrityError: # The random edx_user_id wasn't unique. Since 'created' is still # False, we will retry with a different random ID. pass lti_user = LtiUser( lti_consumer=lti_consumer, lti_user_id=lti_user_id, edx_user=edx_user ) lti_user.save() return lti_user def switch_user(request, lti_user, lti_consumer): """ Log out the current user, and log in using the edX identity associated with the LTI ID. """ edx_user = authenticate( username=lti_user.edx_user.username, lti_user_id=lti_user.lti_user_id, lti_consumer=lti_consumer ) if not edx_user: # This shouldn't happen, since we've created edX accounts for any LTI # users by this point, but just in case we can return a 403. raise PermissionDenied() login(request, edx_user) def generate_random_edx_username(): """ Create a valid random edX user ID. An ID is at most 30 characters long, and can contain upper and lowercase letters and numbers. :return: """ allowable_chars = string.ascii_letters + string.digits username = '' for _index in range(30): username = username + random.SystemRandom().choice(allowable_chars) return username class LtiBackend(object): """ A Django authentication backend that authenticates users via LTI. This backend will only return a User object if it is associated with an LTI identity (i.e. the user was created by the create_lti_user method above). """ def authenticate(self, username=None, lti_user_id=None, lti_consumer=None): """ Try to authenticate a user. This method will return a Django user object if a user with the corresponding username exists in the database, and if a record that links that user with an LTI user_id field exists in the LtiUser collection. If such a user is not found, the method returns None (in line with the authentication backend specification). """ try: edx_user = User.objects.get(username=username) except User.DoesNotExist: return None try: LtiUser.objects.get( edx_user_id=edx_user.id, lti_user_id=lti_user_id, lti_consumer=lti_consumer ) except LtiUser.DoesNotExist: return None return edx_user def get_user(self, user_id): """ Return the User object for a user that has already been authenticated by this backend. """ try: return User.objects.get(id=user_id) except User.DoesNotExist: return None
agpl-3.0
oleiade/Hurdles
hurdles/referee/importer.py
1
3834
# -*- coding: utf-8 -*- # Copyright (c) 2012 theo crevon # # See the file LICENSE for copying permission. # # Content of this module was extracted from python nose # project importer.py module. # # See : https://github.com/nose-devs/nose/blob/master/nose/importer.py # import os import sys import imp class Importer(object): """An importer class that does only path-specific imports. That is, the given module is not searched for on sys.path, but only at the path or in the directory specified. """ def importFromPath(self, path, fqname): """Import a dotted-name package whose tail is at path. In other words, given foo.bar and path/to/foo/bar.py, import foo from path/to/foo then bar from path/to/foo/bar, returning bar. """ # find the base dir of the package path_parts = os.path.normpath(os.path.abspath(path)).split(os.sep) name_parts = fqname.split('.') if path_parts[-1].startswith('__init__'): path_parts.pop() path_parts = path_parts[:-(len(name_parts))] dir_path = os.sep.join(path_parts) # then import fqname starting from that dir return self.importFromDir(dir_path, fqname) def importFromDir(self, dir, fqname): """Import a module *only* from path, ignoring sys.path and reloading if the version in sys.modules is not the one we want. """ dir = os.path.normpath(os.path.abspath(dir)) # log.debug("Import %s from %s", fqname, dir) # FIXME reimplement local per-dir cache? # special case for __main__ if fqname == '__main__': return sys.modules[fqname] path = [dir] parts = fqname.split('.') part_fqname = '' mod = parent = fh = None for part in parts: if part_fqname == '': part_fqname = part else: part_fqname = "%s.%s" % (part_fqname, part) try: imp.acquire_lock() fh, filename, desc = imp.find_module(part, path) old = sys.modules.get(part_fqname) if old is not None: # test modules frequently have name overlap; make sure # we get a fresh copy of anything we are trying to load # from a new path if (self.sameModule(old, filename) or (getattr(old, '__path__', None))): mod = old else: del sys.modules[part_fqname] mod = imp.load_module(part_fqname, fh, filename, desc) else: mod = imp.load_module(part_fqname, fh, filename, desc) finally: if fh: fh.close() imp.release_lock() if parent: setattr(parent, part, mod) if hasattr(mod, '__path__'): path = mod.__path__ parent = mod return mod def sameModule(self, mod, filename): mod_paths = [] if hasattr(mod, '__path__'): for path in mod.__path__: mod_paths.append(os.path.dirname( os.path.normpath( os.path.abspath(path)))) elif hasattr(mod, '__file__'): mod_paths.append(os.path.dirname( os.path.normpath( os.path.abspath(mod.__file__)))) else: # builtin or other module-like object that # doesn't have __file__; must be new return False new_path = os.path.dirname(os.path.normpath(filename)) for mod_path in mod_paths: if mod_path == new_path: return True return False
mit
packenx/PythonInstaller
app/src/main/assets/arm/static/python/lib/python2.7/site-packages/dpkt/tcp.py
15
3226
# $Id: tcp.py 42 2007-08-02 22:38:47Z jon.oberheide $ """Transmission Control Protocol.""" import dpkt # TCP control flags TH_FIN = 0x01 # end of data TH_SYN = 0x02 # synchronize sequence numbers TH_RST = 0x04 # reset connection TH_PUSH = 0x08 # push TH_ACK = 0x10 # acknowledgment number set TH_URG = 0x20 # urgent pointer set TH_ECE = 0x40 # ECN echo, RFC 3168 TH_CWR = 0x80 # congestion window reduced TCP_PORT_MAX = 65535 # maximum port TCP_WIN_MAX = 65535 # maximum (unscaled) window class TCP(dpkt.Packet): __hdr__ = ( ('sport', 'H', 0xdead), ('dport', 'H', 0), ('seq', 'I', 0xdeadbeefL), ('ack', 'I', 0), ('off_x2', 'B', ((5 << 4) | 0)), ('flags', 'B', TH_SYN), ('win', 'H', TCP_WIN_MAX), ('sum', 'H', 0), ('urp', 'H', 0) ) opts = '' def _get_off(self): return self.off_x2 >> 4 def _set_off(self, off): self.off_x2 = (off << 4) | (self.off_x2 & 0xf) off = property(_get_off, _set_off) def __len__(self): return self.__hdr_len__ + len(self.opts) + len(self.data) def __str__(self): return self.pack_hdr() + self.opts + str(self.data) def unpack(self, buf): dpkt.Packet.unpack(self, buf) ol = ((self.off_x2 >> 4) << 2) - self.__hdr_len__ if ol < 0: raise dpkt.UnpackError, 'invalid header length' self.opts = buf[self.__hdr_len__:self.__hdr_len__ + ol] self.data = buf[self.__hdr_len__ + ol:] # Options (opt_type) - http://www.iana.org/assignments/tcp-parameters TCP_OPT_EOL = 0 # end of option list TCP_OPT_NOP = 1 # no operation TCP_OPT_MSS = 2 # maximum segment size TCP_OPT_WSCALE = 3 # window scale factor, RFC 1072 TCP_OPT_SACKOK = 4 # SACK permitted, RFC 2018 TCP_OPT_SACK = 5 # SACK, RFC 2018 TCP_OPT_ECHO = 6 # echo (obsolete), RFC 1072 TCP_OPT_ECHOREPLY = 7 # echo reply (obsolete), RFC 1072 TCP_OPT_TIMESTAMP = 8 # timestamp, RFC 1323 TCP_OPT_POCONN = 9 # partial order conn, RFC 1693 TCP_OPT_POSVC = 10 # partial order service, RFC 1693 TCP_OPT_CC = 11 # connection count, RFC 1644 TCP_OPT_CCNEW = 12 # CC.NEW, RFC 1644 TCP_OPT_CCECHO = 13 # CC.ECHO, RFC 1644 TCP_OPT_ALTSUM = 14 # alt checksum request, RFC 1146 TCP_OPT_ALTSUMDATA = 15 # alt checksum data, RFC 1146 TCP_OPT_SKEETER = 16 # Skeeter TCP_OPT_BUBBA = 17 # Bubba TCP_OPT_TRAILSUM = 18 # trailer checksum TCP_OPT_MD5 = 19 # MD5 signature, RFC 2385 TCP_OPT_SCPS = 20 # SCPS capabilities TCP_OPT_SNACK = 21 # selective negative acks TCP_OPT_REC = 22 # record boundaries TCP_OPT_CORRUPT = 23 # corruption experienced TCP_OPT_SNAP = 24 # SNAP TCP_OPT_TCPCOMP = 26 # TCP compression filter TCP_OPT_MAX = 27 def parse_opts(buf): """Parse TCP option buffer into a list of (option, data) tuples.""" opts = [] while buf: o = ord(buf[0]) if o > TCP_OPT_NOP: try: l = ord(buf[1]) d, buf = buf[2:l], buf[l:] except ValueError: #print 'bad option', repr(str(buf)) opts.append(None) # XXX break else: d, buf = '', buf[1:] opts.append((o,d)) return opts
gpl-3.0
bvamanan/ns3
src/visualizer/visualizer/plugins/interface_statistics.py
182
6565
import gtk import ns.core import ns.network from visualizer.base import InformationWindow NODE_STATISTICS_MEMORY = 10 class StatisticsCollector(object): """ Collects interface statistics for all nodes. """ class NetDevStats(object): __slots__ = ['rxPackets', 'rxBytes', 'txPackets', 'txBytes', 'rxPacketRate', 'rxBitRate', 'txPacketRate', 'txBitRate'] def __init__(self, visualizer): self.node_statistics = {} # nodeid -> list(raw statistics) self.visualizer = visualizer def simulation_periodic_update(self, viz): nodes_statistics = viz.simulation.sim_helper.GetNodesStatistics() for stats in nodes_statistics: try: raw_stats_list = self.node_statistics[stats.nodeId] except KeyError: raw_stats_list = [] self.node_statistics[stats.nodeId] = raw_stats_list raw_stats_list.append(stats.statistics) while len(raw_stats_list) > NODE_STATISTICS_MEMORY: raw_stats_list.pop(0) def get_interface_statistics(self, nodeId): try: raw_stats_list = self.node_statistics[nodeId] except KeyError: return [] if len(raw_stats_list) < NODE_STATISTICS_MEMORY: return [] assert len(raw_stats_list) == NODE_STATISTICS_MEMORY tx_packets1 = [] # transmitted packets, one value per interface rx_packets1 = [] tx_bytes1 = [] rx_bytes1 = [] for iface, stats in enumerate(raw_stats_list[0]): tx_packets1.append(stats.transmittedPackets) tx_bytes1.append(stats.transmittedBytes) rx_packets1.append(stats.receivedPackets) rx_bytes1.append(stats.receivedBytes) retval = [] k = self.visualizer.sample_period*(NODE_STATISTICS_MEMORY-1) for iface, stats in enumerate(raw_stats_list[-1]): outStat = self.NetDevStats() outStat.txPackets = stats.transmittedPackets outStat.txBytes = stats.transmittedBytes outStat.rxPackets = stats.receivedPackets outStat.rxBytes = stats.receivedBytes outStat.txPacketRate = (stats.transmittedPackets - tx_packets1[iface])/k outStat.rxPacketRate = (stats.receivedPackets - rx_packets1[iface])/k outStat.txBitRate = (stats.transmittedBytes - tx_bytes1[iface])*8/k outStat.rxBitRate = (stats.receivedBytes - rx_bytes1[iface])*8/k retval.append(outStat) return retval class ShowInterfaceStatistics(InformationWindow): ( COLUMN_INTERFACE, COLUMN_TX_PACKETS, COLUMN_TX_BYTES, COLUMN_TX_PACKET_RATE, COLUMN_TX_BIT_RATE, COLUMN_RX_PACKETS, COLUMN_RX_BYTES, COLUMN_RX_PACKET_RATE, COLUMN_RX_BIT_RATE, ) = range(9) def __init__(self, visualizer, node_index, statistics_collector): InformationWindow.__init__(self) self.win = gtk.Dialog(parent=visualizer.window, flags=gtk.DIALOG_DESTROY_WITH_PARENT|gtk.DIALOG_NO_SEPARATOR, buttons=(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE)) self.win.connect("response", self._response_cb) self.win.set_title("Statistics for node %i" % node_index) self.visualizer = visualizer self.statistics_collector = statistics_collector self.node_index = node_index self.viz_node = visualizer.get_node(node_index) self.table_model = gtk.ListStore(*([str]*13)) treeview = gtk.TreeView(self.table_model) treeview.show() self.win.vbox.add(treeview) def add_column(descr, colid): column = gtk.TreeViewColumn(descr, gtk.CellRendererText(), text=colid) treeview.append_column(column) add_column("Interface", self.COLUMN_INTERFACE) add_column("Tx Packets", self.COLUMN_TX_PACKETS) add_column("Tx Bytes", self.COLUMN_TX_BYTES) add_column("Tx pkt/1s", self.COLUMN_TX_PACKET_RATE) add_column("Tx bit/1s", self.COLUMN_TX_BIT_RATE) add_column("Rx Packets", self.COLUMN_RX_PACKETS) add_column("Rx Bytes", self.COLUMN_RX_BYTES) add_column("Rx pkt/1s", self.COLUMN_RX_PACKET_RATE) add_column("Rx bit/1s", self.COLUMN_RX_BIT_RATE) self.visualizer.add_information_window(self) self.win.show() def _response_cb(self, win, response): self.win.destroy() self.visualizer.remove_information_window(self) def update(self): node = ns.network.NodeList.GetNode(self.node_index) stats_list = self.statistics_collector.get_interface_statistics(self.node_index) self.table_model.clear() for iface, stats in enumerate(stats_list): tree_iter = self.table_model.append() netdevice = node.GetDevice(iface) interface_name = ns.core.Names.FindName(netdevice) if not interface_name: interface_name = "(interface %i)" % iface self.table_model.set(tree_iter, self.COLUMN_INTERFACE, interface_name, self.COLUMN_TX_PACKETS, str(stats.txPackets), self.COLUMN_TX_BYTES, str(stats.txBytes), self.COLUMN_TX_PACKET_RATE, str(stats.txPacketRate), self.COLUMN_TX_BIT_RATE, str(stats.txBitRate), self.COLUMN_RX_PACKETS, str(stats.rxPackets), self.COLUMN_RX_BYTES, str(stats.rxBytes), self.COLUMN_RX_PACKET_RATE, str(stats.rxPacketRate), self.COLUMN_RX_BIT_RATE, str(stats.rxBitRate) ) def populate_node_menu(viz, node, menu, statistics_collector): menu_item = gtk.MenuItem("Show Interface Statistics") menu_item.show() def _show_it(dummy_menu_item): ShowInterfaceStatistics(viz, node.node_index, statistics_collector) menu_item.connect("activate", _show_it) menu.add(menu_item) def register(viz): statistics_collector = StatisticsCollector(viz) viz.connect("populate-node-menu", populate_node_menu, statistics_collector) viz.connect("simulation-periodic-update", statistics_collector.simulation_periodic_update)
gpl-2.0
Bitcoinsulting/bitcoinxt
contrib/seeds/generate-seeds.py
115
4377
#!/usr/bin/python # Copyright (c) 2014 Wladmir J. van der Laan # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Script to generate list of seed nodes for chainparams.cpp. This script expects two text files in the directory that is passed as an argument: nodes_main.txt nodes_test.txt These files must consist of lines in the format <ip> <ip>:<port> [<ipv6>] [<ipv6>]:<port> <onion>.onion 0xDDBBCCAA (IPv4 little-endian old pnSeeds format) The output will be two data structures with the peers in binary format: static SeedSpec6 pnSeed6_main[]={ ... } static SeedSpec6 pnSeed6_test[]={ ... } These should be pasted into `src/chainparamsseeds.h`. ''' from __future__ import print_function, division from base64 import b32decode from binascii import a2b_hex import sys, os import re # ipv4 in ipv6 prefix pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff]) # tor-specific ipv6 prefix pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43]) def name_to_ipv6(addr): if len(addr)>6 and addr.endswith('.onion'): vchAddr = b32decode(addr[0:-6], True) if len(vchAddr) != 16-len(pchOnionCat): raise ValueError('Invalid onion %s' % s) return pchOnionCat + vchAddr elif '.' in addr: # IPv4 return pchIPv4 + bytearray((int(x) for x in addr.split('.'))) elif ':' in addr: # IPv6 sub = [[], []] # prefix, suffix x = 0 addr = addr.split(':') for i,comp in enumerate(addr): if comp == '': if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end continue x += 1 # :: skips to suffix assert(x < 2) else: # two bytes per component val = int(comp, 16) sub[x].append(val >> 8) sub[x].append(val & 0xff) nullbytes = 16 - len(sub[0]) - len(sub[1]) assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0)) return bytearray(sub[0] + ([0] * nullbytes) + sub[1]) elif addr.startswith('0x'): # IPv4-in-little-endian return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:]))) else: raise ValueError('Could not parse address %s' % addr) def parse_spec(s, defaultport): match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s) if match: # ipv6 host = match.group(1) port = match.group(2) elif s.count(':') > 1: # ipv6, no port host = s port = '' else: (host,_,port) = s.partition(':') if not port: port = defaultport else: port = int(port) host = name_to_ipv6(host) return (host,port) def process_nodes(g, f, structname, defaultport): g.write('static SeedSpec6 %s[] = {\n' % structname) first = True for line in f: comment = line.find('#') if comment != -1: line = line[0:comment] line = line.strip() if not line: continue if not first: g.write(',\n') first = False (host,port) = parse_spec(line, defaultport) hoststr = ','.join(('0x%02x' % b) for b in host) g.write(' {{%s}, %i}' % (hoststr, port)) g.write('\n};\n') def main(): if len(sys.argv)<2: print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr) exit(1) g = sys.stdout indir = sys.argv[1] g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n') g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n') g.write('/**\n') g.write(' * List of fixed seed nodes for the bitcoin network\n') g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n') g.write(' *\n') g.write(' * Each line contains a 16-byte IPv6 address and a port.\n') g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n') g.write(' */\n') with open(os.path.join(indir,'nodes_main.txt'),'r') as f: process_nodes(g, f, 'pnSeed6_main', 8333) g.write('\n') with open(os.path.join(indir,'nodes_test.txt'),'r') as f: process_nodes(g, f, 'pnSeed6_test', 18333) g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n') if __name__ == '__main__': main()
mit
spolanski/WirnikApp
wirnikApp/calculix.py
1
6259
# -*- coding: utf-8 -*- """ W tym pliku następuje konwersja pliku wsadowego z programu GMSH na plik do programu Calculix. """ import numpy as np import os def zmianaElem(mesh, stMesh): """ Funkcja zmienia formę zapisu elementów na odpowiednia dla Calculix'a. """ np.set_printoptions(threshold='nan') quad = [] for i in stMesh: for wsk, j in enumerate(mesh): s = "ELSET=Surface" + str(i) if s in j: quad.append(wsk) for i in quad: mesh[i] = mesh[i].replace("type=CPS4", "type=S8") mesh[i] = mesh[i].split("\n",1) mesh[i][1] = mesh[i][1].replace("\n",", ") mesh[i][1] = mesh[i][1][:-1] ar = np.fromstring(string = mesh[i][1], dtype=int, sep=',') div = 10 if len(ar) % div != 0: raise ValueError("Blad w macierzy") else: row = len(ar)/div ar = np.reshape(ar, (row,div)) ar = np.delete(ar,9,1) elem = np.array2string(ar, separator=', ')[2:-2] elem = elem.replace("],","") elem = elem.replace(" [","") mesh[i] = mesh[i][0] + "\n" + elem + "\n" element = "".join(mesh) return element def zmianyWez(nodes): """ Funkcja zmienia formę zapisu węzłów na odpowiednia dla Calculix'a """ nodes = nodes.split("*NODE\n") nodes[1] = nodes[1].replace("\n",", ") nodes[1] = nodes[1].split(", ")[:-1] nodes[1] = [eval(i) for i in nodes[1]] def sprawdzZero(a): b = float(a) temp = round(b,4) if temp != 0.0000: return a else: return 0 st = "" wsk = 1 for node in nodes[1]: temp = str(sprawdzZero(node)) if wsk % 4 != 0: st += temp + ", " else: st += temp + "\n" wsk += 1 nodes[1] = "*NODE\n" + st nodes = nodes[0] + nodes[1] return nodes def konwertujSiatke(Goniec): """ Funkcja pozwala na przygotowanie pliku *.inp* stworzonego w programie GMSH, tak aby mógł zostać użyty w Calculixie. Funkcja była testowana na elementach typu SHELL. :param Goniec: obiekt Gońca zawierający informacje potrzebne do stworzenia pliku z siatką elementów skończonych. """ # Zdefiniuj zmienne wd = os.getcwd() nazwaPliku = wd + '/' + Goniec.Info['Obiekt']['nazwa'] indSet = Goniec.Info['indSet'] stMesh = Goniec.Info['structMesh'] # Wczytaj siatke z pliku with open(nazwaPliku+'.inp','r') as f: txt = f.read() # Podziel dane na czesc zawierajaca elementy i wezly nodes, els = txt.split("******* E L E M E N T S *************") # Przekonwertuj czesc odpowiedzialna za wezly nodes = zmianyWez(nodes) # Podziel czesc opisujaca elementy na elementy 1D oraz 2D oneD, twoD = els.split("*ELEMENT, type=CPS6, ELSET=Surface1",1) twoD = "*ELEMENT, type=CPS6, ELSET=Surface1" + twoD twoD, sets = twoD.split("*ELSET",1) # Zamien nazwe typu elementow z 'CPS6' na 'S6' twoD = twoD.replace("CPS6","S6") twoD = twoD.split("*ELEMENT")[1:] for i in range(len(twoD)): twoD[i] = "*ELEMENT" + twoD[i] # Przekonwertuj czesc odpowiedzialna za elementy twoD = zmianaElem(twoD, stMesh) # Czesc w ktorej przygotowywane zbiory elementow elSet, nSet = sets.split("*NSET",1) elSet = elSet.split("*ELSET") allSet = "" temp = indSet.values() temp.remove(indSet['wlot']) for num in temp: allInd = "PhysicalSurface" + str(num) for i in elSet: if allInd in i: allSet = allSet + "*ELSET" + i break # Czesc w ktorej przygotowywane zbiory wezlow nSet = nSet.split("*NSET") lineSet = "" lineInd = "PhysicalLine" + str(indSet['wlot']) for i in nSet: if lineInd in i: lineSet = "*NSET" + i break # Przygotuj caly tekst zawierajacy siatke elementow skonczonych text = nodes + twoD + allSet + lineSet # Zapisz plik zawierajacy siatke elementow skonczonych with open(nazwaPliku+'.inp','w') as f: f.write(text) def stworzPlikWsadowy(Goniec): """ Funkcja służąca tworzeniu pliku wsadowego do Calculixa. Również dzięki tej funkcji, wartości pobrane z GUI opisujące warunki brzegowe, oraz obciążenie zostają wyodrębnione z Gońca. :param Goniec: obiekt Gońca zawierający informacje potrzebne do stworzenia pliku z siatką elementów skończonych. """ Info = Goniec.Info inpFile = """ *INCLUDE, INPUT=NAZWA.inp *Material, name=Stal *Density GESTOSC, *Elastic MYOUNG,POISS *Shell Section, elset=PhysicalSurfaceG_WEW, material=Stal, offset=-0.5 1.5 *Shell Section, elset=PhysicalSurfaceG_ZEW, material=Stal, offset=0.5 1.5 *Shell Section, elset=PhysicalSurfacePODSTAWA, material=Stal, offset=-0.5 3. *Shell Section, elset=PhysicalSurfaceLOPATKI, material=Stal 1.5 *Boundary PhysicalLineWLOT, 1, 1 PhysicalLineWLOT, 2, 2 PhysicalLineWLOT, 3, 3 PhysicalLineWLOT, 4, 4 PhysicalLineWLOT, 5, 5 *Step *Static 0.5,1 *DLOAD PhysicalSurfaceALL, CENTRIF,OBROTY,0.,0.,0.,0.,0.,-1. *NODE FILE U, RF *EL FILE S, *End Step """ Info['obroty'] = str((float(Info['obroty']) *((2.0*np.pi)/60.))**2.) # Podmien wartosci w pliku wsadowym na wartosci pobrane z GUI, oraz GMSHa inpFile = inpFile.replace("GESTOSC",str(Info['gestosc'])) inpFile = inpFile.replace("MYOUNG",str(Info['myoung'])) inpFile = inpFile.replace("POISS",str(Info['poiss'])) inpFile = inpFile.replace("G_WEW",str(Info['indSet']['g_wew'])) inpFile = inpFile.replace("G_ZEW",str(Info['indSet']['g_zew'])) inpFile = inpFile.replace("PODSTAWA",str(Info['indSet']['podstawa'])) inpFile = inpFile.replace("LOPATKI",str(Info['indSet']['lopatki'])) inpFile = inpFile.replace("WLOT",str(Info['indSet']['wlot'])) inpFile = inpFile.replace("ALL",str(Info['indSet']['all'])) inpFile = inpFile.replace("OBROTY",str(Info['obroty'])) inpFile = inpFile.replace("NAZWA",str(Info['Obiekt']['nazwa'])) # Stworz plik wsadowy do Calculixa with open('ccxInp.inp','w') as f: f.write(inpFile)
gpl-3.0
andaviaco/tronido
src/syntax/statements/ifstat.py
1
1530
from lexer import lang from ..tree import Node class IfStat(Node): """docstring for IfStat.""" def __init__(self, exp, stats, else_stat, token): super().__init__(None, token) self.exp = exp or Node(None, token) self.stats = stats self.else_stat = else_stat def process_semantic(self, **cond): self.exp.process_semantic() if self.exp.datatype != lang.SEMANTIC_LOGIC_TYPE: Node.raise_error(f'Condition must be of type {lang.SEMANTIC_LOGIC_TYPE}. Line: {self.token.line_index} - Col: {self.token.col_index}') else: self.datatype = lang.SEMANTIC_VOID_TYPE Node.proccess_traversal_semantics(self.stats, **cond) Node.proccess_traversal_semantics(self.else_stat, **cond) def generate_code(self, **cond): self.exp.generate_code() false_label = Node.get_unique_label('false') end_label = Node.get_unique_label('endif') array, line = Node.assignated_array() Node.array_append(array, f'{line} JMC F, {false_label}') Node.cascade_code(self.stats, **cond) _, endstatement_line = Node.assignated_array() Node.array_append(array, f'{endstatement_line} JMP 0, {end_label}') endstatement_line += 1 Node.code_labels.append(f'{false_label},I,I,{endstatement_line},0,#,') Node.cascade_code(self.else_stat, **cond) _, end_else_line = Node.assignated_array() Node.code_labels.append(f'{end_label},I,I,{end_else_line},0,#,')
mit
dimaspivak/docker-py
tests/integration/api_container_test.py
1
49036
import os import signal import tempfile import docker from docker.constants import IS_WINDOWS_PLATFORM from docker.utils.socket import next_frame_size from docker.utils.socket import read_exactly import pytest import six from .base import BUSYBOX, BaseAPIIntegrationTest from .. import helpers from ..helpers import requires_api_version class ListContainersTest(BaseAPIIntegrationTest): def test_list_containers(self): res0 = self.client.containers(all=True) size = len(res0) res1 = self.client.create_container(BUSYBOX, 'true') self.assertIn('Id', res1) self.client.start(res1['Id']) self.tmp_containers.append(res1['Id']) res2 = self.client.containers(all=True) self.assertEqual(size + 1, len(res2)) retrieved = [x for x in res2 if x['Id'].startswith(res1['Id'])] self.assertEqual(len(retrieved), 1) retrieved = retrieved[0] self.assertIn('Command', retrieved) self.assertEqual(retrieved['Command'], six.text_type('true')) self.assertIn('Image', retrieved) self.assertRegex(retrieved['Image'], r'busybox:.*') self.assertIn('Status', retrieved) class CreateContainerTest(BaseAPIIntegrationTest): def test_create(self): res = self.client.create_container(BUSYBOX, 'true') self.assertIn('Id', res) self.tmp_containers.append(res['Id']) def test_create_with_host_pid_mode(self): ctnr = self.client.create_container( BUSYBOX, 'true', host_config=self.client.create_host_config( pid_mode='host', network_mode='none' ) ) self.assertIn('Id', ctnr) self.tmp_containers.append(ctnr['Id']) self.client.start(ctnr) inspect = self.client.inspect_container(ctnr) self.assertIn('HostConfig', inspect) host_config = inspect['HostConfig'] self.assertIn('PidMode', host_config) self.assertEqual(host_config['PidMode'], 'host') def test_create_with_links(self): res0 = self.client.create_container( BUSYBOX, 'cat', detach=True, stdin_open=True, environment={'FOO': '1'}) container1_id = res0['Id'] self.tmp_containers.append(container1_id) self.client.start(container1_id) res1 = self.client.create_container( BUSYBOX, 'cat', detach=True, stdin_open=True, environment={'FOO': '1'}) container2_id = res1['Id'] self.tmp_containers.append(container2_id) self.client.start(container2_id) # we don't want the first / link_path1 = self.client.inspect_container(container1_id)['Name'][1:] link_alias1 = 'mylink1' link_env_prefix1 = link_alias1.upper() link_path2 = self.client.inspect_container(container2_id)['Name'][1:] link_alias2 = 'mylink2' link_env_prefix2 = link_alias2.upper() res2 = self.client.create_container( BUSYBOX, 'env', host_config=self.client.create_host_config( links={link_path1: link_alias1, link_path2: link_alias2}, network_mode='bridge' ) ) container3_id = res2['Id'] self.tmp_containers.append(container3_id) self.client.start(container3_id) self.assertEqual(self.client.wait(container3_id), 0) logs = self.client.logs(container3_id) if six.PY3: logs = logs.decode('utf-8') self.assertIn('{0}_NAME='.format(link_env_prefix1), logs) self.assertIn('{0}_ENV_FOO=1'.format(link_env_prefix1), logs) self.assertIn('{0}_NAME='.format(link_env_prefix2), logs) self.assertIn('{0}_ENV_FOO=1'.format(link_env_prefix2), logs) def test_create_with_restart_policy(self): container = self.client.create_container( BUSYBOX, ['sleep', '2'], host_config=self.client.create_host_config( restart_policy={"Name": "always", "MaximumRetryCount": 0}, network_mode='none' ) ) id = container['Id'] self.client.start(id) self.client.wait(id) with self.assertRaises(docker.errors.APIError) as exc: self.client.remove_container(id) err = exc.exception.explanation self.assertIn( 'You cannot remove ', err ) self.client.remove_container(id, force=True) def test_create_container_with_volumes_from(self): vol_names = ['foobar_vol0', 'foobar_vol1'] res0 = self.client.create_container( BUSYBOX, 'true', name=vol_names[0] ) container1_id = res0['Id'] self.tmp_containers.append(container1_id) self.client.start(container1_id) res1 = self.client.create_container( BUSYBOX, 'true', name=vol_names[1] ) container2_id = res1['Id'] self.tmp_containers.append(container2_id) self.client.start(container2_id) with self.assertRaises(docker.errors.DockerException): self.client.create_container( BUSYBOX, 'cat', detach=True, stdin_open=True, volumes_from=vol_names ) res2 = self.client.create_container( BUSYBOX, 'cat', detach=True, stdin_open=True, host_config=self.client.create_host_config( volumes_from=vol_names, network_mode='none' ) ) container3_id = res2['Id'] self.tmp_containers.append(container3_id) self.client.start(container3_id) info = self.client.inspect_container(res2['Id']) self.assertCountEqual(info['HostConfig']['VolumesFrom'], vol_names) def create_container_readonly_fs(self): ctnr = self.client.create_container( BUSYBOX, ['mkdir', '/shrine'], host_config=self.client.create_host_config( read_only=True, network_mode='none' ) ) self.assertIn('Id', ctnr) self.tmp_containers.append(ctnr['Id']) self.client.start(ctnr) res = self.client.wait(ctnr) self.assertNotEqual(res, 0) def create_container_with_name(self): res = self.client.create_container(BUSYBOX, 'true', name='foobar') self.assertIn('Id', res) self.tmp_containers.append(res['Id']) inspect = self.client.inspect_container(res['Id']) self.assertIn('Name', inspect) self.assertEqual('/foobar', inspect['Name']) def create_container_privileged(self): res = self.client.create_container( BUSYBOX, 'true', host_config=self.client.create_host_config( privileged=True, network_mode='none' ) ) self.assertIn('Id', res) self.tmp_containers.append(res['Id']) self.client.start(res['Id']) inspect = self.client.inspect_container(res['Id']) self.assertIn('Config', inspect) self.assertIn('Id', inspect) self.assertTrue(inspect['Id'].startswith(res['Id'])) self.assertIn('Image', inspect) self.assertIn('State', inspect) self.assertIn('Running', inspect['State']) if not inspect['State']['Running']: self.assertIn('ExitCode', inspect['State']) self.assertEqual(inspect['State']['ExitCode'], 0) # Since Nov 2013, the Privileged flag is no longer part of the # container's config exposed via the API (safety concerns?). # if 'Privileged' in inspect['Config']: self.assertEqual(inspect['Config']['Privileged'], True) def test_create_with_mac_address(self): mac_address_expected = "02:42:ac:11:00:0a" container = self.client.create_container( BUSYBOX, ['sleep', '60'], mac_address=mac_address_expected) id = container['Id'] self.client.start(container) res = self.client.inspect_container(container['Id']) self.assertEqual(mac_address_expected, res['NetworkSettings']['MacAddress']) self.client.kill(id) @requires_api_version('1.20') def test_group_id_ints(self): container = self.client.create_container( BUSYBOX, 'id -G', host_config=self.client.create_host_config(group_add=[1000, 1001]) ) self.tmp_containers.append(container) self.client.start(container) self.client.wait(container) logs = self.client.logs(container) if six.PY3: logs = logs.decode('utf-8') groups = logs.strip().split(' ') self.assertIn('1000', groups) self.assertIn('1001', groups) @requires_api_version('1.20') def test_group_id_strings(self): container = self.client.create_container( BUSYBOX, 'id -G', host_config=self.client.create_host_config( group_add=['1000', '1001'] ) ) self.tmp_containers.append(container) self.client.start(container) self.client.wait(container) logs = self.client.logs(container) if six.PY3: logs = logs.decode('utf-8') groups = logs.strip().split(' ') self.assertIn('1000', groups) self.assertIn('1001', groups) def test_valid_log_driver_and_log_opt(self): log_config = docker.types.LogConfig( type='json-file', config={'max-file': '100'} ) container = self.client.create_container( BUSYBOX, ['true'], host_config=self.client.create_host_config(log_config=log_config) ) self.tmp_containers.append(container['Id']) self.client.start(container) info = self.client.inspect_container(container) container_log_config = info['HostConfig']['LogConfig'] self.assertEqual(container_log_config['Type'], log_config.type) self.assertEqual(container_log_config['Config'], log_config.config) def test_invalid_log_driver_raises_exception(self): log_config = docker.types.LogConfig( type='asdf-nope', config={} ) expected_msg = "logger: no log driver named 'asdf-nope' is registered" with pytest.raises(docker.errors.APIError) as excinfo: # raises an internal server error 500 container = self.client.create_container( BUSYBOX, ['true'], host_config=self.client.create_host_config( log_config=log_config ) ) self.client.start(container) assert excinfo.value.explanation == expected_msg def test_valid_no_log_driver_specified(self): log_config = docker.types.LogConfig( type="", config={'max-file': '100'} ) container = self.client.create_container( BUSYBOX, ['true'], host_config=self.client.create_host_config(log_config=log_config) ) self.tmp_containers.append(container['Id']) self.client.start(container) info = self.client.inspect_container(container) container_log_config = info['HostConfig']['LogConfig'] self.assertEqual(container_log_config['Type'], "json-file") self.assertEqual(container_log_config['Config'], log_config.config) def test_valid_no_config_specified(self): log_config = docker.types.LogConfig( type="json-file", config=None ) container = self.client.create_container( BUSYBOX, ['true'], host_config=self.client.create_host_config(log_config=log_config) ) self.tmp_containers.append(container['Id']) self.client.start(container) info = self.client.inspect_container(container) container_log_config = info['HostConfig']['LogConfig'] self.assertEqual(container_log_config['Type'], "json-file") self.assertEqual(container_log_config['Config'], {}) def test_create_with_memory_constraints_with_str(self): ctnr = self.client.create_container( BUSYBOX, 'true', host_config=self.client.create_host_config( memswap_limit='1G', mem_limit='700M' ) ) self.assertIn('Id', ctnr) self.tmp_containers.append(ctnr['Id']) self.client.start(ctnr) inspect = self.client.inspect_container(ctnr) self.assertIn('HostConfig', inspect) host_config = inspect['HostConfig'] for limit in ['Memory', 'MemorySwap']: self.assertIn(limit, host_config) def test_create_with_memory_constraints_with_int(self): ctnr = self.client.create_container( BUSYBOX, 'true', host_config=self.client.create_host_config(mem_swappiness=40) ) self.assertIn('Id', ctnr) self.tmp_containers.append(ctnr['Id']) self.client.start(ctnr) inspect = self.client.inspect_container(ctnr) self.assertIn('HostConfig', inspect) host_config = inspect['HostConfig'] self.assertIn('MemorySwappiness', host_config) def test_create_with_environment_variable_no_value(self): container = self.client.create_container( BUSYBOX, ['echo'], environment={'Foo': None, 'Other': 'one', 'Blank': ''}, ) self.tmp_containers.append(container['Id']) config = self.client.inspect_container(container['Id']) assert ( sorted(config['Config']['Env']) == sorted(['Foo', 'Other=one', 'Blank=']) ) @requires_api_version('1.22') def test_create_with_tmpfs(self): tmpfs = { '/tmp1': 'size=3M' } container = self.client.create_container( BUSYBOX, ['echo'], host_config=self.client.create_host_config( tmpfs=tmpfs)) self.tmp_containers.append(container['Id']) config = self.client.inspect_container(container) assert config['HostConfig']['Tmpfs'] == tmpfs @requires_api_version('1.24') def test_create_with_isolation(self): container = self.client.create_container( BUSYBOX, ['echo'], host_config=self.client.create_host_config( isolation='default' ) ) self.tmp_containers.append(container['Id']) config = self.client.inspect_container(container) assert config['HostConfig']['Isolation'] == 'default' @requires_api_version('1.25') def test_create_with_auto_remove(self): host_config = self.client.create_host_config( auto_remove=True ) container = self.client.create_container( BUSYBOX, ['echo', 'test'], host_config=host_config ) self.tmp_containers.append(container['Id']) config = self.client.inspect_container(container) assert config['HostConfig']['AutoRemove'] is True @requires_api_version('1.25') def test_create_with_stop_timeout(self): container = self.client.create_container( BUSYBOX, ['echo', 'test'], stop_timeout=25 ) self.tmp_containers.append(container['Id']) config = self.client.inspect_container(container) assert config['Config']['StopTimeout'] == 25 @requires_api_version('1.24') @pytest.mark.xfail(True, reason='Not supported on most drivers') def test_create_with_storage_opt(self): host_config = self.client.create_host_config( storage_opt={'size': '120G'} ) container = self.client.create_container( BUSYBOX, ['echo', 'test'], host_config=host_config ) self.tmp_containers.append(container) config = self.client.inspect_container(container) assert config['HostConfig']['StorageOpt'] == { 'size': '120G' } @requires_api_version('1.25') def test_create_with_init(self): ctnr = self.client.create_container( BUSYBOX, 'true', host_config=self.client.create_host_config( init=True ) ) self.tmp_containers.append(ctnr['Id']) config = self.client.inspect_container(ctnr) assert config['HostConfig']['Init'] is True @pytest.mark.xfail(True, reason='init-path removed in 17.05.0') @requires_api_version('1.25') def test_create_with_init_path(self): ctnr = self.client.create_container( BUSYBOX, 'true', host_config=self.client.create_host_config( init_path="/usr/libexec/docker-init" ) ) self.tmp_containers.append(ctnr['Id']) config = self.client.inspect_container(ctnr) assert config['HostConfig']['InitPath'] == "/usr/libexec/docker-init" class VolumeBindTest(BaseAPIIntegrationTest): def setUp(self): super(VolumeBindTest, self).setUp() self.mount_dest = '/mnt' # Get a random pathname - we don't need it to exist locally self.mount_origin = tempfile.mkdtemp() self.filename = 'shared.txt' self.run_with_volume( False, BUSYBOX, ['touch', os.path.join(self.mount_dest, self.filename)], ) @pytest.mark.xfail( IS_WINDOWS_PLATFORM, reason='Test not designed for Windows platform' ) def test_create_with_binds_rw(self): container = self.run_with_volume( False, BUSYBOX, ['ls', self.mount_dest], ) logs = self.client.logs(container) if six.PY3: logs = logs.decode('utf-8') self.assertIn(self.filename, logs) inspect_data = self.client.inspect_container(container) self.check_container_data(inspect_data, True) @pytest.mark.xfail( IS_WINDOWS_PLATFORM, reason='Test not designed for Windows platform' ) def test_create_with_binds_ro(self): self.run_with_volume( False, BUSYBOX, ['touch', os.path.join(self.mount_dest, self.filename)], ) container = self.run_with_volume( True, BUSYBOX, ['ls', self.mount_dest], ) logs = self.client.logs(container) if six.PY3: logs = logs.decode('utf-8') self.assertIn(self.filename, logs) inspect_data = self.client.inspect_container(container) self.check_container_data(inspect_data, False) def check_container_data(self, inspect_data, rw): if docker.utils.compare_version('1.20', self.client._version) < 0: self.assertIn('Volumes', inspect_data) self.assertIn(self.mount_dest, inspect_data['Volumes']) self.assertEqual( self.mount_origin, inspect_data['Volumes'][self.mount_dest] ) self.assertIn(self.mount_dest, inspect_data['VolumesRW']) self.assertFalse(inspect_data['VolumesRW'][self.mount_dest]) else: self.assertIn('Mounts', inspect_data) filtered = list(filter( lambda x: x['Destination'] == self.mount_dest, inspect_data['Mounts'] )) self.assertEqual(len(filtered), 1) mount_data = filtered[0] self.assertEqual(mount_data['Source'], self.mount_origin) self.assertEqual(mount_data['RW'], rw) def run_with_volume(self, ro, *args, **kwargs): return self.run_container( *args, volumes={self.mount_dest: {}}, host_config=self.client.create_host_config( binds={ self.mount_origin: { 'bind': self.mount_dest, 'ro': ro, }, }, network_mode='none' ), **kwargs ) @requires_api_version('1.20') class ArchiveTest(BaseAPIIntegrationTest): def test_get_file_archive_from_container(self): data = 'The Maid and the Pocket Watch of Blood' ctnr = self.client.create_container( BUSYBOX, 'sh -c "echo {0} > /vol1/data.txt"'.format(data), volumes=['/vol1'] ) self.tmp_containers.append(ctnr) self.client.start(ctnr) self.client.wait(ctnr) with tempfile.NamedTemporaryFile() as destination: strm, stat = self.client.get_archive(ctnr, '/vol1/data.txt') for d in strm: destination.write(d) destination.seek(0) retrieved_data = helpers.untar_file(destination, 'data.txt') if six.PY3: retrieved_data = retrieved_data.decode('utf-8') self.assertEqual(data, retrieved_data.strip()) def test_get_file_stat_from_container(self): data = 'The Maid and the Pocket Watch of Blood' ctnr = self.client.create_container( BUSYBOX, 'sh -c "echo -n {0} > /vol1/data.txt"'.format(data), volumes=['/vol1'] ) self.tmp_containers.append(ctnr) self.client.start(ctnr) self.client.wait(ctnr) strm, stat = self.client.get_archive(ctnr, '/vol1/data.txt') self.assertIn('name', stat) self.assertEqual(stat['name'], 'data.txt') self.assertIn('size', stat) self.assertEqual(stat['size'], len(data)) def test_copy_file_to_container(self): data = b'Deaf To All But The Song' with tempfile.NamedTemporaryFile(delete=False) as test_file: test_file.write(data) test_file.seek(0) ctnr = self.client.create_container( BUSYBOX, 'cat {0}'.format( os.path.join('/vol1/', os.path.basename(test_file.name)) ), volumes=['/vol1'] ) self.tmp_containers.append(ctnr) with helpers.simple_tar(test_file.name) as test_tar: self.client.put_archive(ctnr, '/vol1', test_tar) self.client.start(ctnr) self.client.wait(ctnr) logs = self.client.logs(ctnr) if six.PY3: logs = logs.decode('utf-8') data = data.decode('utf-8') self.assertEqual(logs.strip(), data) def test_copy_directory_to_container(self): files = ['a.py', 'b.py', 'foo/b.py'] dirs = ['foo', 'bar'] base = helpers.make_tree(dirs, files) ctnr = self.client.create_container( BUSYBOX, 'ls -p /vol1', volumes=['/vol1'] ) self.tmp_containers.append(ctnr) with docker.utils.tar(base) as test_tar: self.client.put_archive(ctnr, '/vol1', test_tar) self.client.start(ctnr) self.client.wait(ctnr) logs = self.client.logs(ctnr) if six.PY3: logs = logs.decode('utf-8') results = logs.strip().split() self.assertIn('a.py', results) self.assertIn('b.py', results) self.assertIn('foo/', results) self.assertIn('bar/', results) class RenameContainerTest(BaseAPIIntegrationTest): def test_rename_container(self): version = self.client.version()['Version'] name = 'hong_meiling' res = self.client.create_container(BUSYBOX, 'true') self.assertIn('Id', res) self.tmp_containers.append(res['Id']) self.client.rename(res, name) inspect = self.client.inspect_container(res['Id']) self.assertIn('Name', inspect) if version == '1.5.0': self.assertEqual(name, inspect['Name']) else: self.assertEqual('/{0}'.format(name), inspect['Name']) class StartContainerTest(BaseAPIIntegrationTest): def test_start_container(self): res = self.client.create_container(BUSYBOX, 'true') self.assertIn('Id', res) self.tmp_containers.append(res['Id']) self.client.start(res['Id']) inspect = self.client.inspect_container(res['Id']) self.assertIn('Config', inspect) self.assertIn('Id', inspect) self.assertTrue(inspect['Id'].startswith(res['Id'])) self.assertIn('Image', inspect) self.assertIn('State', inspect) self.assertIn('Running', inspect['State']) if not inspect['State']['Running']: self.assertIn('ExitCode', inspect['State']) self.assertEqual(inspect['State']['ExitCode'], 0) def test_start_container_with_dict_instead_of_id(self): res = self.client.create_container(BUSYBOX, 'true') self.assertIn('Id', res) self.tmp_containers.append(res['Id']) self.client.start(res) inspect = self.client.inspect_container(res['Id']) self.assertIn('Config', inspect) self.assertIn('Id', inspect) self.assertTrue(inspect['Id'].startswith(res['Id'])) self.assertIn('Image', inspect) self.assertIn('State', inspect) self.assertIn('Running', inspect['State']) if not inspect['State']['Running']: self.assertIn('ExitCode', inspect['State']) self.assertEqual(inspect['State']['ExitCode'], 0) def test_run_shlex_commands(self): commands = [ 'true', 'echo "The Young Descendant of Tepes & Septette for the ' 'Dead Princess"', 'echo -n "The Young Descendant of Tepes & Septette for the ' 'Dead Princess"', '/bin/sh -c "echo Hello World"', '/bin/sh -c \'echo "Hello World"\'', 'echo "\"Night of Nights\""', 'true && echo "Night of Nights"' ] for cmd in commands: container = self.client.create_container(BUSYBOX, cmd) id = container['Id'] self.client.start(id) self.tmp_containers.append(id) exitcode = self.client.wait(id) self.assertEqual(exitcode, 0, msg=cmd) class WaitTest(BaseAPIIntegrationTest): def test_wait(self): res = self.client.create_container(BUSYBOX, ['sleep', '3']) id = res['Id'] self.tmp_containers.append(id) self.client.start(id) exitcode = self.client.wait(id) self.assertEqual(exitcode, 0) inspect = self.client.inspect_container(id) self.assertIn('Running', inspect['State']) self.assertEqual(inspect['State']['Running'], False) self.assertIn('ExitCode', inspect['State']) self.assertEqual(inspect['State']['ExitCode'], exitcode) def test_wait_with_dict_instead_of_id(self): res = self.client.create_container(BUSYBOX, ['sleep', '3']) id = res['Id'] self.tmp_containers.append(id) self.client.start(res) exitcode = self.client.wait(res) self.assertEqual(exitcode, 0) inspect = self.client.inspect_container(res) self.assertIn('Running', inspect['State']) self.assertEqual(inspect['State']['Running'], False) self.assertIn('ExitCode', inspect['State']) self.assertEqual(inspect['State']['ExitCode'], exitcode) class LogsTest(BaseAPIIntegrationTest): def test_logs(self): snippet = 'Flowering Nights (Sakuya Iyazoi)' container = self.client.create_container( BUSYBOX, 'echo {0}'.format(snippet) ) id = container['Id'] self.tmp_containers.append(id) self.client.start(id) exitcode = self.client.wait(id) self.assertEqual(exitcode, 0) logs = self.client.logs(id) self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii')) def test_logs_tail_option(self): snippet = '''Line1 Line2''' container = self.client.create_container( BUSYBOX, 'echo "{0}"'.format(snippet) ) id = container['Id'] self.tmp_containers.append(id) self.client.start(id) exitcode = self.client.wait(id) self.assertEqual(exitcode, 0) logs = self.client.logs(id, tail=1) self.assertEqual(logs, 'Line2\n'.encode(encoding='ascii')) def test_logs_streaming_and_follow(self): snippet = 'Flowering Nights (Sakuya Iyazoi)' container = self.client.create_container( BUSYBOX, 'echo {0}'.format(snippet) ) id = container['Id'] self.tmp_containers.append(id) self.client.start(id) logs = six.binary_type() for chunk in self.client.logs(id, stream=True, follow=True): logs += chunk exitcode = self.client.wait(id) self.assertEqual(exitcode, 0) self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii')) def test_logs_with_dict_instead_of_id(self): snippet = 'Flowering Nights (Sakuya Iyazoi)' container = self.client.create_container( BUSYBOX, 'echo {0}'.format(snippet) ) id = container['Id'] self.tmp_containers.append(id) self.client.start(id) exitcode = self.client.wait(id) self.assertEqual(exitcode, 0) logs = self.client.logs(container) self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii')) def test_logs_with_tail_0(self): snippet = 'Flowering Nights (Sakuya Iyazoi)' container = self.client.create_container( BUSYBOX, 'echo "{0}"'.format(snippet) ) id = container['Id'] self.tmp_containers.append(id) self.client.start(id) exitcode = self.client.wait(id) self.assertEqual(exitcode, 0) logs = self.client.logs(id, tail=0) self.assertEqual(logs, ''.encode(encoding='ascii')) class DiffTest(BaseAPIIntegrationTest): def test_diff(self): container = self.client.create_container(BUSYBOX, ['touch', '/test']) id = container['Id'] self.client.start(id) self.tmp_containers.append(id) exitcode = self.client.wait(id) self.assertEqual(exitcode, 0) diff = self.client.diff(id) test_diff = [x for x in diff if x.get('Path', None) == '/test'] self.assertEqual(len(test_diff), 1) self.assertIn('Kind', test_diff[0]) self.assertEqual(test_diff[0]['Kind'], 1) def test_diff_with_dict_instead_of_id(self): container = self.client.create_container(BUSYBOX, ['touch', '/test']) id = container['Id'] self.client.start(id) self.tmp_containers.append(id) exitcode = self.client.wait(id) self.assertEqual(exitcode, 0) diff = self.client.diff(container) test_diff = [x for x in diff if x.get('Path', None) == '/test'] self.assertEqual(len(test_diff), 1) self.assertIn('Kind', test_diff[0]) self.assertEqual(test_diff[0]['Kind'], 1) class StopTest(BaseAPIIntegrationTest): def test_stop(self): container = self.client.create_container(BUSYBOX, ['sleep', '9999']) id = container['Id'] self.client.start(id) self.tmp_containers.append(id) self.client.stop(id, timeout=2) container_info = self.client.inspect_container(id) self.assertIn('State', container_info) state = container_info['State'] self.assertIn('Running', state) self.assertEqual(state['Running'], False) def test_stop_with_dict_instead_of_id(self): container = self.client.create_container(BUSYBOX, ['sleep', '9999']) self.assertIn('Id', container) id = container['Id'] self.client.start(container) self.tmp_containers.append(id) self.client.stop(container, timeout=2) container_info = self.client.inspect_container(id) self.assertIn('State', container_info) state = container_info['State'] self.assertIn('Running', state) self.assertEqual(state['Running'], False) class KillTest(BaseAPIIntegrationTest): def test_kill(self): container = self.client.create_container(BUSYBOX, ['sleep', '9999']) id = container['Id'] self.client.start(id) self.tmp_containers.append(id) self.client.kill(id) container_info = self.client.inspect_container(id) self.assertIn('State', container_info) state = container_info['State'] self.assertIn('ExitCode', state) self.assertNotEqual(state['ExitCode'], 0) self.assertIn('Running', state) self.assertEqual(state['Running'], False) def test_kill_with_dict_instead_of_id(self): container = self.client.create_container(BUSYBOX, ['sleep', '9999']) id = container['Id'] self.client.start(id) self.tmp_containers.append(id) self.client.kill(container) container_info = self.client.inspect_container(id) self.assertIn('State', container_info) state = container_info['State'] self.assertIn('ExitCode', state) self.assertNotEqual(state['ExitCode'], 0) self.assertIn('Running', state) self.assertEqual(state['Running'], False) def test_kill_with_signal(self): id = self.client.create_container(BUSYBOX, ['sleep', '60']) self.tmp_containers.append(id) self.client.start(id) self.client.kill( id, signal=signal.SIGKILL if not IS_WINDOWS_PLATFORM else 9 ) exitcode = self.client.wait(id) self.assertNotEqual(exitcode, 0) container_info = self.client.inspect_container(id) self.assertIn('State', container_info) state = container_info['State'] self.assertIn('ExitCode', state) self.assertNotEqual(state['ExitCode'], 0) self.assertIn('Running', state) self.assertEqual(state['Running'], False, state) def test_kill_with_signal_name(self): id = self.client.create_container(BUSYBOX, ['sleep', '60']) self.client.start(id) self.tmp_containers.append(id) self.client.kill(id, signal='SIGKILL') exitcode = self.client.wait(id) self.assertNotEqual(exitcode, 0) container_info = self.client.inspect_container(id) self.assertIn('State', container_info) state = container_info['State'] self.assertIn('ExitCode', state) self.assertNotEqual(state['ExitCode'], 0) self.assertIn('Running', state) self.assertEqual(state['Running'], False, state) def test_kill_with_signal_integer(self): id = self.client.create_container(BUSYBOX, ['sleep', '60']) self.client.start(id) self.tmp_containers.append(id) self.client.kill(id, signal=9) exitcode = self.client.wait(id) self.assertNotEqual(exitcode, 0) container_info = self.client.inspect_container(id) self.assertIn('State', container_info) state = container_info['State'] self.assertIn('ExitCode', state) self.assertNotEqual(state['ExitCode'], 0) self.assertIn('Running', state) self.assertEqual(state['Running'], False, state) class PortTest(BaseAPIIntegrationTest): def test_port(self): port_bindings = { '1111': ('127.0.0.1', '4567'), '2222': ('127.0.0.1', '4568') } container = self.client.create_container( BUSYBOX, ['sleep', '60'], ports=list(port_bindings.keys()), host_config=self.client.create_host_config( port_bindings=port_bindings, network_mode='bridge' ) ) id = container['Id'] self.client.start(container) # Call the port function on each biding and compare expected vs actual for port in port_bindings: actual_bindings = self.client.port(container, port) port_binding = actual_bindings.pop() ip, host_port = port_binding['HostIp'], port_binding['HostPort'] self.assertEqual(ip, port_bindings[port][0]) self.assertEqual(host_port, port_bindings[port][1]) self.client.kill(id) class ContainerTopTest(BaseAPIIntegrationTest): def test_top(self): container = self.client.create_container( BUSYBOX, ['sleep', '60'] ) self.tmp_containers.append(container) self.client.start(container) res = self.client.top(container) if IS_WINDOWS_PLATFORM: assert res['Titles'] == ['PID', 'USER', 'TIME', 'COMMAND'] else: assert res['Titles'] == [ 'UID', 'PID', 'PPID', 'C', 'STIME', 'TTY', 'TIME', 'CMD' ] assert len(res['Processes']) == 1 assert res['Processes'][0][-1] == 'sleep 60' self.client.kill(container) @pytest.mark.skipif( IS_WINDOWS_PLATFORM, reason='No psargs support on windows' ) def test_top_with_psargs(self): container = self.client.create_container( BUSYBOX, ['sleep', '60']) self.tmp_containers.append(container) self.client.start(container) res = self.client.top(container, 'waux') self.assertEqual( res['Titles'], ['USER', 'PID', '%CPU', '%MEM', 'VSZ', 'RSS', 'TTY', 'STAT', 'START', 'TIME', 'COMMAND'], ) self.assertEqual(len(res['Processes']), 1) self.assertEqual(res['Processes'][0][10], 'sleep 60') class RestartContainerTest(BaseAPIIntegrationTest): def test_restart(self): container = self.client.create_container(BUSYBOX, ['sleep', '9999']) id = container['Id'] self.client.start(id) self.tmp_containers.append(id) info = self.client.inspect_container(id) self.assertIn('State', info) self.assertIn('StartedAt', info['State']) start_time1 = info['State']['StartedAt'] self.client.restart(id, timeout=2) info2 = self.client.inspect_container(id) self.assertIn('State', info2) self.assertIn('StartedAt', info2['State']) start_time2 = info2['State']['StartedAt'] self.assertNotEqual(start_time1, start_time2) self.assertIn('Running', info2['State']) self.assertEqual(info2['State']['Running'], True) self.client.kill(id) def test_restart_with_dict_instead_of_id(self): container = self.client.create_container(BUSYBOX, ['sleep', '9999']) self.assertIn('Id', container) id = container['Id'] self.client.start(container) self.tmp_containers.append(id) info = self.client.inspect_container(id) self.assertIn('State', info) self.assertIn('StartedAt', info['State']) start_time1 = info['State']['StartedAt'] self.client.restart(container, timeout=2) info2 = self.client.inspect_container(id) self.assertIn('State', info2) self.assertIn('StartedAt', info2['State']) start_time2 = info2['State']['StartedAt'] self.assertNotEqual(start_time1, start_time2) self.assertIn('Running', info2['State']) self.assertEqual(info2['State']['Running'], True) self.client.kill(id) class RemoveContainerTest(BaseAPIIntegrationTest): def test_remove(self): container = self.client.create_container(BUSYBOX, ['true']) id = container['Id'] self.client.start(id) self.client.wait(id) self.client.remove_container(id) containers = self.client.containers(all=True) res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)] self.assertEqual(len(res), 0) def test_remove_with_dict_instead_of_id(self): container = self.client.create_container(BUSYBOX, ['true']) id = container['Id'] self.client.start(id) self.client.wait(id) self.client.remove_container(container) containers = self.client.containers(all=True) res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)] self.assertEqual(len(res), 0) class AttachContainerTest(BaseAPIIntegrationTest): def test_run_container_streaming(self): container = self.client.create_container(BUSYBOX, '/bin/sh', detach=True, stdin_open=True) id = container['Id'] self.tmp_containers.append(id) self.client.start(id) sock = self.client.attach_socket(container, ws=False) self.assertTrue(sock.fileno() > -1) def test_run_container_reading_socket(self): line = 'hi there and stuff and things, words!' # `echo` appends CRLF, `printf` doesn't command = "printf '{0}'".format(line) container = self.client.create_container(BUSYBOX, command, detach=True, tty=False) ident = container['Id'] self.tmp_containers.append(ident) opts = {"stdout": 1, "stream": 1, "logs": 1} pty_stdout = self.client.attach_socket(ident, opts) self.addCleanup(pty_stdout.close) self.client.start(ident) next_size = next_frame_size(pty_stdout) self.assertEqual(next_size, len(line)) data = read_exactly(pty_stdout, next_size) self.assertEqual(data.decode('utf-8'), line) class PauseTest(BaseAPIIntegrationTest): def test_pause_unpause(self): container = self.client.create_container(BUSYBOX, ['sleep', '9999']) id = container['Id'] self.tmp_containers.append(id) self.client.start(container) self.client.pause(id) container_info = self.client.inspect_container(id) self.assertIn('State', container_info) state = container_info['State'] self.assertIn('ExitCode', state) self.assertEqual(state['ExitCode'], 0) self.assertIn('Running', state) self.assertEqual(state['Running'], True) self.assertIn('Paused', state) self.assertEqual(state['Paused'], True) self.client.unpause(id) container_info = self.client.inspect_container(id) self.assertIn('State', container_info) state = container_info['State'] self.assertIn('ExitCode', state) self.assertEqual(state['ExitCode'], 0) self.assertIn('Running', state) self.assertEqual(state['Running'], True) self.assertIn('Paused', state) self.assertEqual(state['Paused'], False) class PruneTest(BaseAPIIntegrationTest): @requires_api_version('1.25') def test_prune_containers(self): container1 = self.client.create_container( BUSYBOX, ['sh', '-c', 'echo hello > /data.txt'] ) container2 = self.client.create_container(BUSYBOX, ['sleep', '9999']) self.client.start(container1) self.client.start(container2) self.client.wait(container1) result = self.client.prune_containers() assert container1['Id'] in result['ContainersDeleted'] assert result['SpaceReclaimed'] > 0 assert container2['Id'] not in result['ContainersDeleted'] class GetContainerStatsTest(BaseAPIIntegrationTest): @requires_api_version('1.19') def test_get_container_stats_no_stream(self): container = self.client.create_container( BUSYBOX, ['sleep', '60'], ) self.tmp_containers.append(container) self.client.start(container) response = self.client.stats(container, stream=0) self.client.kill(container) self.assertEqual(type(response), dict) for key in ['read', 'networks', 'precpu_stats', 'cpu_stats', 'memory_stats', 'blkio_stats']: self.assertIn(key, response) @requires_api_version('1.17') def test_get_container_stats_stream(self): container = self.client.create_container( BUSYBOX, ['sleep', '60'], ) self.tmp_containers.append(container) self.client.start(container) stream = self.client.stats(container) for chunk in stream: self.assertEqual(type(chunk), dict) for key in ['read', 'network', 'precpu_stats', 'cpu_stats', 'memory_stats', 'blkio_stats']: self.assertIn(key, chunk) class ContainerUpdateTest(BaseAPIIntegrationTest): @requires_api_version('1.22') def test_update_container(self): old_mem_limit = 400 * 1024 * 1024 new_mem_limit = 300 * 1024 * 1024 container = self.client.create_container( BUSYBOX, 'top', host_config=self.client.create_host_config( mem_limit=old_mem_limit ) ) self.tmp_containers.append(container) self.client.start(container) self.client.update_container(container, mem_limit=new_mem_limit) inspect_data = self.client.inspect_container(container) self.assertEqual(inspect_data['HostConfig']['Memory'], new_mem_limit) @requires_api_version('1.23') def test_restart_policy_update(self): old_restart_policy = { 'MaximumRetryCount': 0, 'Name': 'always' } new_restart_policy = { 'MaximumRetryCount': 42, 'Name': 'on-failure' } container = self.client.create_container( BUSYBOX, ['sleep', '60'], host_config=self.client.create_host_config( restart_policy=old_restart_policy ) ) self.tmp_containers.append(container) self.client.start(container) self.client.update_container(container, restart_policy=new_restart_policy) inspect_data = self.client.inspect_container(container) self.assertEqual( inspect_data['HostConfig']['RestartPolicy']['MaximumRetryCount'], new_restart_policy['MaximumRetryCount'] ) self.assertEqual( inspect_data['HostConfig']['RestartPolicy']['Name'], new_restart_policy['Name'] ) class ContainerCPUTest(BaseAPIIntegrationTest): @requires_api_version('1.18') def test_container_cpu_shares(self): cpu_shares = 512 container = self.client.create_container( BUSYBOX, 'ls', host_config=self.client.create_host_config( cpu_shares=cpu_shares ) ) self.tmp_containers.append(container) self.client.start(container) inspect_data = self.client.inspect_container(container) self.assertEqual(inspect_data['HostConfig']['CpuShares'], 512) @requires_api_version('1.18') def test_container_cpuset(self): cpuset_cpus = "0,1" container = self.client.create_container( BUSYBOX, 'ls', host_config=self.client.create_host_config( cpuset_cpus=cpuset_cpus ) ) self.tmp_containers.append(container) self.client.start(container) inspect_data = self.client.inspect_container(container) self.assertEqual(inspect_data['HostConfig']['CpusetCpus'], cpuset_cpus) @requires_api_version('1.25') def test_create_with_runtime(self): container = self.client.create_container( BUSYBOX, ['echo', 'test'], runtime='runc' ) self.tmp_containers.append(container['Id']) config = self.client.inspect_container(container) assert config['HostConfig']['Runtime'] == 'runc' class LinkTest(BaseAPIIntegrationTest): def test_remove_link(self): # Create containers container1 = self.client.create_container( BUSYBOX, 'cat', detach=True, stdin_open=True ) container1_id = container1['Id'] self.tmp_containers.append(container1_id) self.client.start(container1_id) # Create Link # we don't want the first / link_path = self.client.inspect_container(container1_id)['Name'][1:] link_alias = 'mylink' container2 = self.client.create_container( BUSYBOX, 'cat', host_config=self.client.create_host_config( links={link_path: link_alias} ) ) container2_id = container2['Id'] self.tmp_containers.append(container2_id) self.client.start(container2_id) # Remove link linked_name = self.client.inspect_container(container2_id)['Name'][1:] link_name = '%s/%s' % (linked_name, link_alias) self.client.remove_container(link_name, link=True) # Link is gone containers = self.client.containers(all=True) retrieved = [x for x in containers if link_name in x['Names']] self.assertEqual(len(retrieved), 0) # Containers are still there retrieved = [ x for x in containers if x['Id'].startswith(container1_id) or x['Id'].startswith(container2_id) ] self.assertEqual(len(retrieved), 2)
apache-2.0
sandeepklr/gridmap
examples/manual.py
5
3147
#!/usr/bin/env python # Written (W) 2008-2012 Christian Widmer # Written (W) 2008-2010 Cheng Soon Ong # Written (W) 2012-2014 Daniel Blanchard, dblanchard@ets.org # Copyright (C) 2008-2012 Max-Planck-Society, 2012-2014 ETS # This file is part of GridMap. # GridMap is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # GridMap is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with GridMap. If not, see <http://www.gnu.org/licenses/>. """ In addition to the high level map interface "grid_map", gridmap also allows one to easily create a list of jobs (that potentially run different functions) and execute them on the cluster as well. """ from __future__ import print_function, unicode_literals import logging from datetime import datetime from gridmap import Job, process_jobs def sleep_walk(secs): ''' Pass the time by adding numbers until the specified number of seconds has elapsed. Intended as a replacement for ``time.sleep`` that doesn't leave the CPU idle (which will make the job seem like it's stalled). ''' start_time = datetime.now() num = 0 while (datetime.now() - start_time).seconds < secs: num = num + 1 def compute_factorial(n): """ computes factorial of n """ sleep_walk(10) ret = 1 for i in range(n): ret = ret * (i + 1) return ret def make_jobs(): """ creates a list of Job objects, which carry all information needed for a function to be executed on SGE: - function object - arguments - settings """ # set up list of arguments inputvec = [[3], [5], [10], [20]] # create empty job vector jobs = [] # create job objects for arg in inputvec: # The default queue used by the Job class is all.q. You must specify # the `queue` keyword argument if that is not the name of your queue. job = Job(compute_factorial, arg, queue='all.q') jobs.append(job) return jobs def main(): """ run a set of jobs on cluster """ logging.captureWarnings(True) logging.basicConfig(format=('%(asctime)s - %(name)s - %(levelname)s - ' + '%(message)s'), level=logging.INFO) print("=====================================") print("======== Submit and Wait ========") print("=====================================") print("") functionJobs = make_jobs() print("sending function jobs to cluster") print("") job_outputs = process_jobs(functionJobs, max_processes=4) print("results from each job") for (i, result) in enumerate(job_outputs): print("Job {0}- result: {1}".format(i, result)) if __name__ == "__main__": main()
gpl-3.0
dajhorn/ps2binutils
gdb/testsuite/gdb.python/py-pp-registration.py
32
2383
# Copyright (C) 2010-2015 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # This file is part of the GDB testsuite. It tests python pretty # printer registration. import re import gdb.types import gdb.printing def lookup_function_lookup_test(val): class PrintFunctionLookup(object): def __init__(self, val): self.val = val def to_string(self): return ("x=<" + str(self.val["x"]) + "> y=<" + str(self.val["y"]) + ">") typename = gdb.types.get_basic_type(val.type).tag # Note: typename could be None. if typename == "function_lookup_test": return PrintFunctionLookup(val) return None class pp_s1 (object): def __init__(self, val): self.val = val def to_string(self): a = self.val["a"] b = self.val["b"] return "s1 a=<" + str(self.val["a"]) + "> b=<" + str(self.val["b"]) + ">" class pp_s2 (object): def __init__(self, val): self.val = val def to_string(self): a = self.val["a"] b = self.val["b"] return "s2 a=<" + str(self.val["a"]) + "> b=<" + str(self.val["b"]) + ">" def build_pretty_printer1(): pp = gdb.printing.RegexpCollectionPrettyPrinter("pp-test") pp.add_printer('struct s', '^struct s$', pp_s1) pp.add_printer('s', '^s$', pp_s1) return pp def build_pretty_printer2(): # This intentionally has the same name as build_pretty_printer1. # It is used to test the "replace" functionality of # register_pretty_printer. pp = gdb.printing.RegexpCollectionPrettyPrinter("pp-test") pp.add_printer('struct s', '^struct s$', pp_s2) pp.add_printer('s', '^s$', pp_s2) return pp # Note: Registering the printers is done in the .exp file.
gpl-2.0
StealthMicro/OctoPi-Makerbot
env/Lib/site-packages/serial/rfc2217.py
10
58347
#! python # # Python Serial Port Extension for Win32, Linux, BSD, Jython # see __init__.py # # This module implements a RFC2217 compatible client. RF2217 descibes a # protocol to access serial ports over TCP/IP and allows setting the baud rate, # modem control lines etc. # # (C) 2001-2009 Chris Liechti <cliechti@gmx.net> # this is distributed under a free software license, see license.txt # TODO: # - setting control line -> answer is not checked (had problems with one of the # severs). consider implementing a compatibility mode flag to make check # conditional # - write timeout not implemented at all ############################################################################## # observations and issues with servers #============================================================================= # sredird V2.2.1 # - http://www.ibiblio.org/pub/Linux/system/serial/ sredird-2.2.2.tar.gz # - does not acknowledge SET_CONTROL (RTS/DTR) correctly, always responding # [105 1] instead of the actual value. # - SET_BAUDRATE answer contains 4 extra null bytes -> probably for larger # numbers than 2**32? # - To get the signature [COM_PORT_OPTION 0] has to be sent. # - run a server: while true; do nc -l -p 7000 -c "sredird debug /dev/ttyUSB0 /var/lock/sredir"; done #============================================================================= # telnetcpcd (untested) # - http://ftp.wayne.edu/kermit/sredird/telnetcpcd-1.09.tar.gz # - To get the signature [COM_PORT_OPTION] w/o data has to be sent. #============================================================================= # ser2net # - does not negotiate BINARY or COM_PORT_OPTION for his side but at least # acknowledges that the client activates these options # - The configuration may be that the server prints a banner. As this client # implementation does a flushInput on connect, this banner is hidden from # the user application. # - NOTIFY_MODEMSTATE: the poll interval of the server seems to be one # second. # - To get the signature [COM_PORT_OPTION 0] has to be sent. # - run a server: run ser2net daemon, in /etc/ser2net.conf: # 2000:telnet:0:/dev/ttyS0:9600 remctl banner ############################################################################## # How to identify ports? pySerial might want to support other protocols in the # future, so lets use an URL scheme. # for RFC2217 compliant servers we will use this: # rfc2217://<host>:<port>[/option[/option...]] # # options: # - "debug" print diagnostic messages # - "ign_set_control": do not look at the answers to SET_CONTROL # - "poll_modem": issue NOTIFY_MODEMSTATE requests when CTS/DTR/RI/CD is read. # Without this option it expects that the server sends notifications # automatically on change (which most servers do and is according to the # RFC). # the order of the options is not relevant from serial.serialutil import * import time import struct import socket import threading import Queue import logging # port string is expected to be something like this: # rfc2217://host:port # host may be an IP or including domain, whatever. # port is 0...65535 # map log level names to constants. used in fromURL() LOGGER_LEVELS = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, } # telnet protocol characters IAC = to_bytes([255]) # Interpret As Command DONT = to_bytes([254]) DO = to_bytes([253]) WONT = to_bytes([252]) WILL = to_bytes([251]) IAC_DOUBLED = to_bytes([IAC, IAC]) SE = to_bytes([240]) # Subnegotiation End NOP = to_bytes([241]) # No Operation DM = to_bytes([242]) # Data Mark BRK = to_bytes([243]) # Break IP = to_bytes([244]) # Interrupt process AO = to_bytes([245]) # Abort output AYT = to_bytes([246]) # Are You There EC = to_bytes([247]) # Erase Character EL = to_bytes([248]) # Erase Line GA = to_bytes([249]) # Go Ahead SB = to_bytes([250]) # Subnegotiation Begin # selected telnet options BINARY = to_bytes([0]) # 8-bit data path ECHO = to_bytes([1]) # echo SGA = to_bytes([3]) # suppress go ahead # RFC2217 COM_PORT_OPTION = to_bytes([44]) # Client to Access Server SET_BAUDRATE = to_bytes([1]) SET_DATASIZE = to_bytes([2]) SET_PARITY = to_bytes([3]) SET_STOPSIZE = to_bytes([4]) SET_CONTROL = to_bytes([5]) NOTIFY_LINESTATE = to_bytes([6]) NOTIFY_MODEMSTATE = to_bytes([7]) FLOWCONTROL_SUSPEND = to_bytes([8]) FLOWCONTROL_RESUME = to_bytes([9]) SET_LINESTATE_MASK = to_bytes([10]) SET_MODEMSTATE_MASK = to_bytes([11]) PURGE_DATA = to_bytes([12]) SERVER_SET_BAUDRATE = to_bytes([101]) SERVER_SET_DATASIZE = to_bytes([102]) SERVER_SET_PARITY = to_bytes([103]) SERVER_SET_STOPSIZE = to_bytes([104]) SERVER_SET_CONTROL = to_bytes([105]) SERVER_NOTIFY_LINESTATE = to_bytes([106]) SERVER_NOTIFY_MODEMSTATE = to_bytes([107]) SERVER_FLOWCONTROL_SUSPEND = to_bytes([108]) SERVER_FLOWCONTROL_RESUME = to_bytes([109]) SERVER_SET_LINESTATE_MASK = to_bytes([110]) SERVER_SET_MODEMSTATE_MASK = to_bytes([111]) SERVER_PURGE_DATA = to_bytes([112]) RFC2217_ANSWER_MAP = { SET_BAUDRATE: SERVER_SET_BAUDRATE, SET_DATASIZE: SERVER_SET_DATASIZE, SET_PARITY: SERVER_SET_PARITY, SET_STOPSIZE: SERVER_SET_STOPSIZE, SET_CONTROL: SERVER_SET_CONTROL, NOTIFY_LINESTATE: SERVER_NOTIFY_LINESTATE, NOTIFY_MODEMSTATE: SERVER_NOTIFY_MODEMSTATE, FLOWCONTROL_SUSPEND: SERVER_FLOWCONTROL_SUSPEND, FLOWCONTROL_RESUME: SERVER_FLOWCONTROL_RESUME, SET_LINESTATE_MASK: SERVER_SET_LINESTATE_MASK, SET_MODEMSTATE_MASK: SERVER_SET_MODEMSTATE_MASK, PURGE_DATA: SERVER_PURGE_DATA, } SET_CONTROL_REQ_FLOW_SETTING = to_bytes([0]) # Request Com Port Flow Control Setting (outbound/both) SET_CONTROL_USE_NO_FLOW_CONTROL = to_bytes([1]) # Use No Flow Control (outbound/both) SET_CONTROL_USE_SW_FLOW_CONTROL = to_bytes([2]) # Use XON/XOFF Flow Control (outbound/both) SET_CONTROL_USE_HW_FLOW_CONTROL = to_bytes([3]) # Use HARDWARE Flow Control (outbound/both) SET_CONTROL_REQ_BREAK_STATE = to_bytes([4]) # Request BREAK State SET_CONTROL_BREAK_ON = to_bytes([5]) # Set BREAK State ON SET_CONTROL_BREAK_OFF = to_bytes([6]) # Set BREAK State OFF SET_CONTROL_REQ_DTR = to_bytes([7]) # Request DTR Signal State SET_CONTROL_DTR_ON = to_bytes([8]) # Set DTR Signal State ON SET_CONTROL_DTR_OFF = to_bytes([9]) # Set DTR Signal State OFF SET_CONTROL_REQ_RTS = to_bytes([10]) # Request RTS Signal State SET_CONTROL_RTS_ON = to_bytes([11]) # Set RTS Signal State ON SET_CONTROL_RTS_OFF = to_bytes([12]) # Set RTS Signal State OFF SET_CONTROL_REQ_FLOW_SETTING_IN = to_bytes([13]) # Request Com Port Flow Control Setting (inbound) SET_CONTROL_USE_NO_FLOW_CONTROL_IN = to_bytes([14]) # Use No Flow Control (inbound) SET_CONTROL_USE_SW_FLOW_CONTOL_IN = to_bytes([15]) # Use XON/XOFF Flow Control (inbound) SET_CONTROL_USE_HW_FLOW_CONTOL_IN = to_bytes([16]) # Use HARDWARE Flow Control (inbound) SET_CONTROL_USE_DCD_FLOW_CONTROL = to_bytes([17]) # Use DCD Flow Control (outbound/both) SET_CONTROL_USE_DTR_FLOW_CONTROL = to_bytes([18]) # Use DTR Flow Control (inbound) SET_CONTROL_USE_DSR_FLOW_CONTROL = to_bytes([19]) # Use DSR Flow Control (outbound/both) LINESTATE_MASK_TIMEOUT = 128 # Time-out Error LINESTATE_MASK_SHIFTREG_EMPTY = 64 # Transfer Shift Register Empty LINESTATE_MASK_TRANSREG_EMPTY = 32 # Transfer Holding Register Empty LINESTATE_MASK_BREAK_DETECT = 16 # Break-detect Error LINESTATE_MASK_FRAMING_ERROR = 8 # Framing Error LINESTATE_MASK_PARTIY_ERROR = 4 # Parity Error LINESTATE_MASK_OVERRUN_ERROR = 2 # Overrun Error LINESTATE_MASK_DATA_READY = 1 # Data Ready MODEMSTATE_MASK_CD = 128 # Receive Line Signal Detect (also known as Carrier Detect) MODEMSTATE_MASK_RI = 64 # Ring Indicator MODEMSTATE_MASK_DSR = 32 # Data-Set-Ready Signal State MODEMSTATE_MASK_CTS = 16 # Clear-To-Send Signal State MODEMSTATE_MASK_CD_CHANGE = 8 # Delta Receive Line Signal Detect MODEMSTATE_MASK_RI_CHANGE = 4 # Trailing-edge Ring Detector MODEMSTATE_MASK_DSR_CHANGE = 2 # Delta Data-Set-Ready MODEMSTATE_MASK_CTS_CHANGE = 1 # Delta Clear-To-Send PURGE_RECEIVE_BUFFER = to_bytes([1]) # Purge access server receive data buffer PURGE_TRANSMIT_BUFFER = to_bytes([2]) # Purge access server transmit data buffer PURGE_BOTH_BUFFERS = to_bytes([3]) # Purge both the access server receive data buffer and the access server transmit data buffer RFC2217_PARITY_MAP = { PARITY_NONE: 1, PARITY_ODD: 2, PARITY_EVEN: 3, PARITY_MARK: 4, PARITY_SPACE: 5, } RFC2217_REVERSE_PARITY_MAP = dict((v,k) for k,v in RFC2217_PARITY_MAP.items()) RFC2217_STOPBIT_MAP = { STOPBITS_ONE: 1, STOPBITS_ONE_POINT_FIVE: 3, STOPBITS_TWO: 2, } RFC2217_REVERSE_STOPBIT_MAP = dict((v,k) for k,v in RFC2217_STOPBIT_MAP.items()) # Telnet filter states M_NORMAL = 0 M_IAC_SEEN = 1 M_NEGOTIATE = 2 # TelnetOption and TelnetSubnegotiation states REQUESTED = 'REQUESTED' ACTIVE = 'ACTIVE' INACTIVE = 'INACTIVE' REALLY_INACTIVE = 'REALLY_INACTIVE' class TelnetOption(object): """Manage a single telnet option, keeps track of DO/DONT WILL/WONT.""" def __init__(self, connection, name, option, send_yes, send_no, ack_yes, ack_no, initial_state, activation_callback=None): """Init option. :param connection: connection used to transmit answers :param name: a readable name for debug outputs :param send_yes: what to send when option is to be enabled. :param send_no: what to send when option is to be disabled. :param ack_yes: what to expect when remote agrees on option. :param ack_no: what to expect when remote disagrees on option. :param initial_state: options initialized with REQUESTED are tried to be enabled on startup. use INACTIVE for all others. """ self.connection = connection self.name = name self.option = option self.send_yes = send_yes self.send_no = send_no self.ack_yes = ack_yes self.ack_no = ack_no self.state = initial_state self.active = False self.activation_callback = activation_callback def __repr__(self): """String for debug outputs""" return "%s:%s(%s)" % (self.name, self.active, self.state) def process_incoming(self, command): """A DO/DONT/WILL/WONT was received for this option, update state and answer when needed.""" if command == self.ack_yes: if self.state is REQUESTED: self.state = ACTIVE self.active = True if self.activation_callback is not None: self.activation_callback() elif self.state is ACTIVE: pass elif self.state is INACTIVE: self.state = ACTIVE self.connection.telnetSendOption(self.send_yes, self.option) self.active = True if self.activation_callback is not None: self.activation_callback() elif self.state is REALLY_INACTIVE: self.connection.telnetSendOption(self.send_no, self.option) else: raise ValueError('option in illegal state %r' % self) elif command == self.ack_no: if self.state is REQUESTED: self.state = INACTIVE self.active = False elif self.state is ACTIVE: self.state = INACTIVE self.connection.telnetSendOption(self.send_no, self.option) self.active = False elif self.state is INACTIVE: pass elif self.state is REALLY_INACTIVE: pass else: raise ValueError('option in illegal state %r' % self) class TelnetSubnegotiation(object): """A object to handle subnegotiation of options. In this case actually sub-sub options for RFC 2217. It is used to track com port options.""" def __init__(self, connection, name, option, ack_option=None): if ack_option is None: ack_option = option self.connection = connection self.name = name self.option = option self.value = None self.ack_option = ack_option self.state = INACTIVE def __repr__(self): """String for debug outputs.""" return "%s:%s" % (self.name, self.state) def set(self, value): """request a change of the value. a request is sent to the server. if the client needs to know if the change is performed he has to check the state of this object.""" self.value = value self.state = REQUESTED self.connection.rfc2217SendSubnegotiation(self.option, self.value) if self.connection.logger: self.connection.logger.debug("SB Requesting %s -> %r" % (self.name, self.value)) def isReady(self): """check if answer from server has been received. when server rejects the change, raise a ValueError.""" if self.state == REALLY_INACTIVE: raise ValueError("remote rejected value for option %r" % (self.name)) return self.state == ACTIVE # add property to have a similar interface as TelnetOption active = property(isReady) def wait(self, timeout=3): """wait until the subnegotiation has been acknowledged or timeout. It can also throw a value error when the answer from the server does not match the value sent.""" timeout_time = time.time() + timeout while time.time() < timeout_time: time.sleep(0.05) # prevent 100% CPU load if self.isReady(): break else: raise SerialException("timeout while waiting for option %r" % (self.name)) def checkAnswer(self, suboption): """check an incoming subnegotiation block. the parameter already has cut off the header like sub option number and com port option value.""" if self.value == suboption[:len(self.value)]: self.state = ACTIVE else: # error propagation done in isReady self.state = REALLY_INACTIVE if self.connection.logger: self.connection.logger.debug("SB Answer %s -> %r -> %s" % (self.name, suboption, self.state)) class RFC2217Serial(SerialBase): """Serial port implementation for RFC 2217 remote serial ports.""" BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, 9600, 19200, 38400, 57600, 115200) def open(self): """Open port with current settings. This may throw a SerialException if the port cannot be opened.""" self.logger = None self._ignore_set_control_answer = False self._poll_modem_state = False self._network_timeout = 3 if self._port is None: raise SerialException("Port must be configured before it can be used.") if self._isOpen: raise SerialException("Port is already open.") try: self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.connect(self.fromURL(self.portstr)) self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) except Exception, msg: self._socket = None raise SerialException("Could not open port %s: %s" % (self.portstr, msg)) self._socket.settimeout(5) # XXX good value? # use a thread save queue as buffer. it also simplifies implementing # the read timeout self._read_buffer = Queue.Queue() # to ensure that user writes does not interfere with internal # telnet/rfc2217 options establish a lock self._write_lock = threading.Lock() # name the following separately so that, below, a check can be easily done mandadory_options = [ TelnetOption(self, 'we-BINARY', BINARY, WILL, WONT, DO, DONT, INACTIVE), TelnetOption(self, 'we-RFC2217', COM_PORT_OPTION, WILL, WONT, DO, DONT, REQUESTED), ] # all supported telnet options self._telnet_options = [ TelnetOption(self, 'ECHO', ECHO, DO, DONT, WILL, WONT, REQUESTED), TelnetOption(self, 'we-SGA', SGA, WILL, WONT, DO, DONT, REQUESTED), TelnetOption(self, 'they-SGA', SGA, DO, DONT, WILL, WONT, REQUESTED), TelnetOption(self, 'they-BINARY', BINARY, DO, DONT, WILL, WONT, INACTIVE), TelnetOption(self, 'they-RFC2217', COM_PORT_OPTION, DO, DONT, WILL, WONT, REQUESTED), ] + mandadory_options # RFC 2217 specific states # COM port settings self._rfc2217_port_settings = { 'baudrate': TelnetSubnegotiation(self, 'baudrate', SET_BAUDRATE, SERVER_SET_BAUDRATE), 'datasize': TelnetSubnegotiation(self, 'datasize', SET_DATASIZE, SERVER_SET_DATASIZE), 'parity': TelnetSubnegotiation(self, 'parity', SET_PARITY, SERVER_SET_PARITY), 'stopsize': TelnetSubnegotiation(self, 'stopsize', SET_STOPSIZE, SERVER_SET_STOPSIZE), } # There are more subnegotiation objects, combine all in one dictionary # for easy access self._rfc2217_options = { 'purge': TelnetSubnegotiation(self, 'purge', PURGE_DATA, SERVER_PURGE_DATA), 'control': TelnetSubnegotiation(self, 'control', SET_CONTROL, SERVER_SET_CONTROL), } self._rfc2217_options.update(self._rfc2217_port_settings) # cache for line and modem states that the server sends to us self._linestate = 0 self._modemstate = None self._modemstate_expires = 0 # RFC 2217 flow control between server and client self._remote_suspend_flow = False self._thread = threading.Thread(target=self._telnetReadLoop) self._thread.setDaemon(True) self._thread.setName('pySerial RFC 2217 reader thread for %s' % (self._port,)) self._thread.start() # negotiate Telnet/RFC 2217 -> send initial requests for option in self._telnet_options: if option.state is REQUESTED: self.telnetSendOption(option.send_yes, option.option) # now wait until important options are negotiated timeout_time = time.time() + self._network_timeout while time.time() < timeout_time: time.sleep(0.05) # prevent 100% CPU load if sum(o.active for o in mandadory_options) == len(mandadory_options): break else: raise SerialException("Remote does not seem to support RFC2217 or BINARY mode %r" % mandadory_options) if self.logger: self.logger.info("Negotiated options: %s" % self._telnet_options) # fine, go on, set RFC 2271 specific things self._reconfigurePort() # all things set up get, now a clean start self._isOpen = True if not self._rtscts: self.setRTS(True) self.setDTR(True) self.flushInput() self.flushOutput() def _reconfigurePort(self): """Set communication parameters on opened port.""" if self._socket is None: raise SerialException("Can only operate on open ports") # if self._timeout != 0 and self._interCharTimeout is not None: # XXX if self._writeTimeout is not None: raise NotImplementedError('writeTimeout is currently not supported') # XXX # Setup the connection # to get good performance, all parameter changes are sent first... if not isinstance(self._baudrate, (int, long)) or not 0 < self._baudrate < 2**32: raise ValueError("invalid baudrate: %r" % (self._baudrate)) self._rfc2217_port_settings['baudrate'].set(struct.pack('!I', self._baudrate)) self._rfc2217_port_settings['datasize'].set(struct.pack('!B', self._bytesize)) self._rfc2217_port_settings['parity'].set(struct.pack('!B', RFC2217_PARITY_MAP[self._parity])) self._rfc2217_port_settings['stopsize'].set(struct.pack('!B', RFC2217_STOPBIT_MAP[self._stopbits])) # and now wait until parameters are active items = self._rfc2217_port_settings.values() if self.logger: self.logger.debug("Negotiating settings: %s" % (items,)) timeout_time = time.time() + self._network_timeout while time.time() < timeout_time: time.sleep(0.05) # prevent 100% CPU load if sum(o.active for o in items) == len(items): break else: raise SerialException("Remote does not accept parameter change (RFC2217): %r" % items) if self.logger: self.logger.info("Negotiated settings: %s" % (items,)) if self._rtscts and self._xonxoff: raise ValueError('xonxoff and rtscts together are not supported') elif self._rtscts: self.rfc2217SetControl(SET_CONTROL_USE_HW_FLOW_CONTROL) elif self._xonxoff: self.rfc2217SetControl(SET_CONTROL_USE_SW_FLOW_CONTROL) else: self.rfc2217SetControl(SET_CONTROL_USE_NO_FLOW_CONTROL) def close(self): """Close port""" if self._isOpen: if self._socket: try: self._socket.shutdown(socket.SHUT_RDWR) self._socket.close() except: # ignore errors. pass self._socket = None if self._thread: self._thread.join() self._isOpen = False # in case of quick reconnects, give the server some time time.sleep(0.3) def makeDeviceName(self, port): raise SerialException("there is no sensible way to turn numbers into URLs") def fromURL(self, url): """extract host and port from an URL string""" if url.lower().startswith("rfc2217://"): url = url[10:] try: # is there a "path" (our options)? if '/' in url: # cut away options url, options = url.split('/', 1) # process options now, directly altering self for option in options.split('/'): if '=' in option: option, value = option.split('=', 1) else: value = None if option == 'logging': logging.basicConfig() # XXX is that good to call it here? self.logger = logging.getLogger('pySerial.rfc2217') self.logger.setLevel(LOGGER_LEVELS[value]) self.logger.debug('enabled logging') elif option == 'ign_set_control': self._ignore_set_control_answer = True elif option == 'poll_modem': self._poll_modem_state = True elif option == 'timeout': self._network_timeout = float(value) else: raise ValueError('unknown option: %r' % (option,)) # get host and port host, port = url.split(':', 1) # may raise ValueError because of unpacking port = int(port) # and this if it's not a number if not 0 <= port < 65536: raise ValueError("port not in range 0...65535") except ValueError, e: raise SerialException('expected a string in the form "[rfc2217://]<host>:<port>[/option[/option...]]": %s' % e) return (host, port) # - - - - - - - - - - - - - - - - - - - - - - - - def inWaiting(self): """Return the number of characters currently in the input buffer.""" if not self._isOpen: raise portNotOpenError return self._read_buffer.qsize() def read(self, size=1): """Read size bytes from the serial port. If a timeout is set it may return less characters as requested. With no timeout it will block until the requested number of bytes is read.""" if not self._isOpen: raise portNotOpenError data = bytearray() try: while len(data) < size: if self._thread is None: raise SerialException('connection failed (reader thread died)') data.append(self._read_buffer.get(True, self._timeout)) except Queue.Empty: # -> timeout pass return bytes(data) def write(self, data): """Output the given string over the serial port. Can block if the connection is blocked. May raise SerialException if the connection is closed.""" if not self._isOpen: raise portNotOpenError self._write_lock.acquire() try: try: self._socket.sendall(data.replace(IAC, IAC_DOUBLED)) except socket.error, e: raise SerialException("connection failed (socket error): %s" % e) # XXX what exception if socket connection fails finally: self._write_lock.release() return len(data) def flushInput(self): """Clear input buffer, discarding all that is in the buffer.""" if not self._isOpen: raise portNotOpenError self.rfc2217SendPurge(PURGE_RECEIVE_BUFFER) # empty read buffer while self._read_buffer.qsize(): self._read_buffer.get(False) def flushOutput(self): """Clear output buffer, aborting the current output and discarding all that is in the buffer.""" if not self._isOpen: raise portNotOpenError self.rfc2217SendPurge(PURGE_TRANSMIT_BUFFER) def sendBreak(self, duration=0.25): """Send break condition. Timed, returns to idle state after given duration.""" if not self._isOpen: raise portNotOpenError self.setBreak(True) time.sleep(duration) self.setBreak(False) def setBreak(self, level=True): """Set break: Controls TXD. When active, to transmitting is possible.""" if not self._isOpen: raise portNotOpenError if self.logger: self.logger.info('set BREAK to %s' % ('inactive', 'active')[bool(level)]) if level: self.rfc2217SetControl(SET_CONTROL_BREAK_ON) else: self.rfc2217SetControl(SET_CONTROL_BREAK_OFF) def setRTS(self, level=True): """Set terminal status line: Request To Send.""" if not self._isOpen: raise portNotOpenError if self.logger: self.logger.info('set RTS to %s' % ('inactive', 'active')[bool(level)]) if level: self.rfc2217SetControl(SET_CONTROL_RTS_ON) else: self.rfc2217SetControl(SET_CONTROL_RTS_OFF) def setDTR(self, level=True): """Set terminal status line: Data Terminal Ready.""" if not self._isOpen: raise portNotOpenError if self.logger: self.logger.info('set DTR to %s' % ('inactive', 'active')[bool(level)]) if level: self.rfc2217SetControl(SET_CONTROL_DTR_ON) else: self.rfc2217SetControl(SET_CONTROL_DTR_OFF) def getCTS(self): """Read terminal status line: Clear To Send.""" if not self._isOpen: raise portNotOpenError return bool(self.getModemState() & MODEMSTATE_MASK_CTS) def getDSR(self): """Read terminal status line: Data Set Ready.""" if not self._isOpen: raise portNotOpenError return bool(self.getModemState() & MODEMSTATE_MASK_DSR) def getRI(self): """Read terminal status line: Ring Indicator.""" if not self._isOpen: raise portNotOpenError return bool(self.getModemState() & MODEMSTATE_MASK_RI) def getCD(self): """Read terminal status line: Carrier Detect.""" if not self._isOpen: raise portNotOpenError return bool(self.getModemState() & MODEMSTATE_MASK_CD) # - - - platform specific - - - # None so far # - - - RFC2217 specific - - - def _telnetReadLoop(self): """read loop for the socket.""" mode = M_NORMAL suboption = None try: while self._socket is not None: try: data = self._socket.recv(1024) except socket.timeout: # just need to get out of recv form time to time to check if # still alive continue except socket.error, e: # connection fails -> terminate loop if self.logger: self.logger.debug("socket error in reader thread: %s" % (e,)) break if not data: break # lost connection for byte in data: if mode == M_NORMAL: # interpret as command or as data if byte == IAC: mode = M_IAC_SEEN else: # store data in read buffer or sub option buffer # depending on state if suboption is not None: suboption.append(byte) else: self._read_buffer.put(byte) elif mode == M_IAC_SEEN: if byte == IAC: # interpret as command doubled -> insert character # itself if suboption is not None: suboption.append(IAC) else: self._read_buffer.put(IAC) mode = M_NORMAL elif byte == SB: # sub option start suboption = bytearray() mode = M_NORMAL elif byte == SE: # sub option end -> process it now self._telnetProcessSubnegotiation(bytes(suboption)) suboption = None mode = M_NORMAL elif byte in (DO, DONT, WILL, WONT): # negotiation telnet_command = byte mode = M_NEGOTIATE else: # other telnet commands self._telnetProcessCommand(byte) mode = M_NORMAL elif mode == M_NEGOTIATE: # DO, DONT, WILL, WONT was received, option now following self._telnetNegotiateOption(telnet_command, byte) mode = M_NORMAL finally: self._thread = None if self.logger: self.logger.debug("read thread terminated") # - incoming telnet commands and options def _telnetProcessCommand(self, command): """Process commands other than DO, DONT, WILL, WONT.""" # Currently none. RFC2217 only uses negotiation and subnegotiation. if self.logger: self.logger.warning("ignoring Telnet command: %r" % (command,)) def _telnetNegotiateOption(self, command, option): """Process incoming DO, DONT, WILL, WONT.""" # check our registered telnet options and forward command to them # they know themselves if they have to answer or not known = False for item in self._telnet_options: # can have more than one match! as some options are duplicated for # 'us' and 'them' if item.option == option: item.process_incoming(command) known = True if not known: # handle unknown options # only answer to positive requests and deny them if command == WILL or command == DO: self.telnetSendOption((command == WILL and DONT or WONT), option) if self.logger: self.logger.warning("rejected Telnet option: %r" % (option,)) def _telnetProcessSubnegotiation(self, suboption): """Process subnegotiation, the data between IAC SB and IAC SE.""" if suboption[0:1] == COM_PORT_OPTION: if suboption[1:2] == SERVER_NOTIFY_LINESTATE and len(suboption) >= 3: self._linestate = ord(suboption[2:3]) # ensure it is a number if self.logger: self.logger.info("NOTIFY_LINESTATE: %s" % self._linestate) elif suboption[1:2] == SERVER_NOTIFY_MODEMSTATE and len(suboption) >= 3: self._modemstate = ord(suboption[2:3]) # ensure it is a number if self.logger: self.logger.info("NOTIFY_MODEMSTATE: %s" % self._modemstate) # update time when we think that a poll would make sense self._modemstate_expires = time.time() + 0.3 elif suboption[1:2] == FLOWCONTROL_SUSPEND: self._remote_suspend_flow = True elif suboption[1:2] == FLOWCONTROL_RESUME: self._remote_suspend_flow = False else: for item in self._rfc2217_options.values(): if item.ack_option == suboption[1:2]: #~ print "processing COM_PORT_OPTION: %r" % list(suboption[1:]) item.checkAnswer(bytes(suboption[2:])) break else: if self.logger: self.logger.warning("ignoring COM_PORT_OPTION: %r" % (suboption,)) else: if self.logger: self.logger.warning("ignoring subnegotiation: %r" % (suboption,)) # - outgoing telnet commands and options def _internal_raw_write(self, data): """internal socket write with no data escaping. used to send telnet stuff.""" self._write_lock.acquire() try: self._socket.sendall(data) finally: self._write_lock.release() def telnetSendOption(self, action, option): """Send DO, DONT, WILL, WONT.""" self._internal_raw_write(to_bytes([IAC, action, option])) def rfc2217SendSubnegotiation(self, option, value=''): """Subnegotiation of RFC2217 parameters.""" value = value.replace(IAC, IAC_DOUBLED) self._internal_raw_write(to_bytes([IAC, SB, COM_PORT_OPTION, option] + list(value) + [IAC, SE])) def rfc2217SendPurge(self, value): item = self._rfc2217_options['purge'] item.set(value) # transmit desired purge type item.wait(self._network_timeout) # wait for acknowledge from the server def rfc2217SetControl(self, value): item = self._rfc2217_options['control'] item.set(value) # transmit desired control type if self._ignore_set_control_answer: # answers are ignored when option is set. compatibility mode for # servers that answer, but not the expected one... (or no answer # at all) i.e. sredird time.sleep(0.1) # this helps getting the unit tests passed else: item.wait(self._network_timeout) # wait for acknowledge from the server def rfc2217FlowServerReady(self): """check if server is ready to receive data. block for some time when not.""" #~ if self._remote_suspend_flow: #~ wait--- def getModemState(self): """get last modem state (cached value. if value is "old", request a new one. this cache helps that we don't issue to many requests when e.g. all status lines, one after the other is queried by te user (getCTS, getDSR etc.)""" # active modem state polling enabled? is the value fresh enough? if self._poll_modem_state and self._modemstate_expires < time.time(): if self.logger: self.logger.debug('polling modem state') # when it is older, request an update self.rfc2217SendSubnegotiation(NOTIFY_MODEMSTATE) timeout_time = time.time() + self._network_timeout while time.time() < timeout_time: time.sleep(0.05) # prevent 100% CPU load # when expiration time is updated, it means that there is a new # value if self._modemstate_expires > time.time(): if self.logger: self.logger.warning('poll for modem state failed') break # even when there is a timeout, do not generate an error just # return the last known value. this way we can support buggy # servers that do not respond to polls, but send automatic # updates. if self._modemstate is not None: if self.logger: self.logger.debug('using cached modem state') return self._modemstate else: # never received a notification from the server raise SerialException("remote sends no NOTIFY_MODEMSTATE") # assemble Serial class with the platform specific implementation and the base # for file-like behavior. for Python 2.6 and newer, that provide the new I/O # library, derive from io.RawIOBase try: import io except ImportError: # classic version with our own file-like emulation class Serial(RFC2217Serial, FileLike): pass else: # io library present class Serial(RFC2217Serial, io.RawIOBase): pass ############################################################################# # The following is code that helps implementing an RFC 2217 server. class PortManager(object): """This class manages the state of Telnet and RFC 2217. It needs a serial instance and a connection to work with. Connection is expected to implement a (thread safe) write function, that writes the string to the network.""" def __init__(self, serial_port, connection, logger=None): self.serial = serial_port self.connection = connection self.logger = logger self._client_is_rfc2217 = False # filter state machine self.mode = M_NORMAL self.suboption = None self.telnet_command = None # states for modem/line control events self.modemstate_mask = 255 self.last_modemstate = None self.linstate_mask = 0 # all supported telnet options self._telnet_options = [ TelnetOption(self, 'ECHO', ECHO, WILL, WONT, DO, DONT, REQUESTED), TelnetOption(self, 'we-SGA', SGA, WILL, WONT, DO, DONT, REQUESTED), TelnetOption(self, 'they-SGA', SGA, DO, DONT, WILL, WONT, INACTIVE), TelnetOption(self, 'we-BINARY', BINARY, WILL, WONT, DO, DONT, INACTIVE), TelnetOption(self, 'they-BINARY', BINARY, DO, DONT, WILL, WONT, REQUESTED), TelnetOption(self, 'we-RFC2217', COM_PORT_OPTION, WILL, WONT, DO, DONT, REQUESTED, self._client_ok), TelnetOption(self, 'they-RFC2217', COM_PORT_OPTION, DO, DONT, WILL, WONT, INACTIVE, self._client_ok), ] # negotiate Telnet/RFC2217 -> send initial requests if self.logger: self.logger.debug("requesting initial Telnet/RFC 2217 options") for option in self._telnet_options: if option.state is REQUESTED: self.telnetSendOption(option.send_yes, option.option) # issue 1st modem state notification def _client_ok(self): """callback of telnet option. it gets called when option is activated. this one here is used to detect when the client agrees on RFC 2217. a flag is set so that other functions like check_modem_lines know if the client is ok.""" # The callback is used for we and they so if one party agrees, we're # already happy. it seems not all servers do the negotiation correctly # and i guess there are incorrect clients too.. so be happy if client # answers one or the other positively. self._client_is_rfc2217 = True if self.logger: self.logger.info("client accepts RFC 2217") # this is to ensure that the client gets a notification, even if there # was no change self.check_modem_lines(force_notification=True) # - outgoing telnet commands and options def telnetSendOption(self, action, option): """Send DO, DONT, WILL, WONT.""" self.connection.write(to_bytes([IAC, action, option])) def rfc2217SendSubnegotiation(self, option, value=''): """Subnegotiation of RFC 2217 parameters.""" value = value.replace(IAC, IAC_DOUBLED) self.connection.write(to_bytes([IAC, SB, COM_PORT_OPTION, option] + list(value) + [IAC, SE])) # - check modem lines, needs to be called periodically from user to # establish polling def check_modem_lines(self, force_notification=False): modemstate = ( (self.serial.getCTS() and MODEMSTATE_MASK_CTS) | (self.serial.getDSR() and MODEMSTATE_MASK_DSR) | (self.serial.getRI() and MODEMSTATE_MASK_RI) | (self.serial.getCD() and MODEMSTATE_MASK_CD) ) # check what has changed deltas = modemstate ^ (self.last_modemstate or 0) # when last is None -> 0 if deltas & MODEMSTATE_MASK_CTS: modemstate |= MODEMSTATE_MASK_CTS_CHANGE if deltas & MODEMSTATE_MASK_DSR: modemstate |= MODEMSTATE_MASK_DSR_CHANGE if deltas & MODEMSTATE_MASK_RI: modemstate |= MODEMSTATE_MASK_RI_CHANGE if deltas & MODEMSTATE_MASK_CD: modemstate |= MODEMSTATE_MASK_CD_CHANGE # if new state is different and the mask allows this change, send # notification. suppress notifications when client is not rfc2217 if modemstate != self.last_modemstate or force_notification: if (self._client_is_rfc2217 and (modemstate & self.modemstate_mask)) or force_notification: self.rfc2217SendSubnegotiation( SERVER_NOTIFY_MODEMSTATE, to_bytes([modemstate & self.modemstate_mask]) ) if self.logger: self.logger.info("NOTIFY_MODEMSTATE: %s" % (modemstate,)) # save last state, but forget about deltas. # otherwise it would also notify about changing deltas which is # probably not very useful self.last_modemstate = modemstate & 0xf0 # - outgoing data escaping def escape(self, data): """this generator function is for the user. all outgoing data has to be properly escaped, so that no IAC character in the data stream messes up the Telnet state machine in the server. socket.sendall(escape(data)) """ for byte in data: if byte == IAC: yield IAC yield IAC else: yield byte # - incoming data filter def filter(self, data): """handle a bunch of incoming bytes. this is a generator. it will yield all characters not of interest for Telnet/RFC 2217. The idea is that the reader thread pushes data from the socket through this filter: for byte in filter(socket.recv(1024)): # do things like CR/LF conversion/whatever # and write data to the serial port serial.write(byte) (socket error handling code left as exercise for the reader) """ for byte in data: if self.mode == M_NORMAL: # interpret as command or as data if byte == IAC: self.mode = M_IAC_SEEN else: # store data in sub option buffer or pass it to our # consumer depending on state if self.suboption is not None: self.suboption.append(byte) else: yield byte elif self.mode == M_IAC_SEEN: if byte == IAC: # interpret as command doubled -> insert character # itself if self.suboption is not None: self.suboption.append(byte) else: yield byte self.mode = M_NORMAL elif byte == SB: # sub option start self.suboption = bytearray() self.mode = M_NORMAL elif byte == SE: # sub option end -> process it now self._telnetProcessSubnegotiation(bytes(self.suboption)) self.suboption = None self.mode = M_NORMAL elif byte in (DO, DONT, WILL, WONT): # negotiation self.telnet_command = byte self.mode = M_NEGOTIATE else: # other telnet commands self._telnetProcessCommand(byte) self.mode = M_NORMAL elif self.mode == M_NEGOTIATE: # DO, DONT, WILL, WONT was received, option now following self._telnetNegotiateOption(self.telnet_command, byte) self.mode = M_NORMAL # - incoming telnet commands and options def _telnetProcessCommand(self, command): """Process commands other than DO, DONT, WILL, WONT.""" # Currently none. RFC2217 only uses negotiation and subnegotiation. if self.logger: self.logger.warning("ignoring Telnet command: %r" % (command,)) def _telnetNegotiateOption(self, command, option): """Process incoming DO, DONT, WILL, WONT.""" # check our registered telnet options and forward command to them # they know themselves if they have to answer or not known = False for item in self._telnet_options: # can have more than one match! as some options are duplicated for # 'us' and 'them' if item.option == option: item.process_incoming(command) known = True if not known: # handle unknown options # only answer to positive requests and deny them if command == WILL or command == DO: self.telnetSendOption((command == WILL and DONT or WONT), option) if self.logger: self.logger.warning("rejected Telnet option: %r" % (option,)) def _telnetProcessSubnegotiation(self, suboption): """Process subnegotiation, the data between IAC SB and IAC SE.""" if suboption[0:1] == COM_PORT_OPTION: if self.logger: self.logger.debug('received COM_PORT_OPTION: %r' % (suboption,)) if suboption[1:2] == SET_BAUDRATE: backup = self.serial.baudrate try: (self.serial.baudrate,) = struct.unpack("!I", suboption[2:6]) except ValueError, e: if self.logger: self.logger.error("failed to set baud rate: %s" % (e,)) self.serial.baudrate = backup else: if self.logger: self.logger.info("changed baud rate: %s" % (self.serial.baudrate,)) self.rfc2217SendSubnegotiation(SERVER_SET_BAUDRATE, struct.pack("!I", self.serial.baudrate)) elif suboption[1:2] == SET_DATASIZE: backup = self.serial.bytesize try: (self.serial.bytesize,) = struct.unpack("!B", suboption[2:3]) except ValueError, e: if self.logger: self.logger.error("failed to set data size: %s" % (e,)) self.serial.bytesize = backup else: if self.logger: self.logger.info("changed data size: %s" % (self.serial.bytesize,)) self.rfc2217SendSubnegotiation(SERVER_SET_DATASIZE, struct.pack("!B", self.serial.bytesize)) elif suboption[1:2] == SET_PARITY: backup = self.serial.parity try: self.serial.parity = RFC2217_REVERSE_PARITY_MAP[struct.unpack("!B", suboption[2:3])[0]] except ValueError, e: if self.logger: self.logger.error("failed to set parity: %s" % (e,)) self.serial.parity = backup else: if self.logger: self.logger.info("changed parity: %s" % (self.serial.parity,)) self.rfc2217SendSubnegotiation( SERVER_SET_PARITY, struct.pack("!B", RFC2217_PARITY_MAP[self.serial.parity]) ) elif suboption[1:2] == SET_STOPSIZE: backup = self.serial.stopbits try: self.serial.stopbits = RFC2217_REVERSE_STOPBIT_MAP[struct.unpack("!B", suboption[2:3])[0]] except ValueError, e: if self.logger: self.logger.error("failed to set stop bits: %s" % (e,)) self.serial.stopbits = backup else: if self.logger: self.logger.info("changed stop bits: %s" % (self.serial.stopbits,)) self.rfc2217SendSubnegotiation( SERVER_SET_STOPSIZE, struct.pack("!B", RFC2217_STOPBIT_MAP[self.serial.stopbits]) ) elif suboption[1:2] == SET_CONTROL: if suboption[2:3] == SET_CONTROL_REQ_FLOW_SETTING: if self.serial.xonxoff: self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_SW_FLOW_CONTROL) elif self.serial.rtscts: self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_HW_FLOW_CONTROL) else: self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_NO_FLOW_CONTROL) elif suboption[2:3] == SET_CONTROL_USE_NO_FLOW_CONTROL: self.serial.xonxoff = False self.serial.rtscts = False if self.logger: self.logger.info("changed flow control to None") self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_NO_FLOW_CONTROL) elif suboption[2:3] == SET_CONTROL_USE_SW_FLOW_CONTROL: self.serial.xonxoff = True if self.logger: self.logger.info("changed flow control to XON/XOFF") self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_SW_FLOW_CONTROL) elif suboption[2:3] == SET_CONTROL_USE_HW_FLOW_CONTROL: self.serial.rtscts = True if self.logger: self.logger.info("changed flow control to RTS/CTS") self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_HW_FLOW_CONTROL) elif suboption[2:3] == SET_CONTROL_REQ_BREAK_STATE: if self.logger: self.logger.warning("requested break state - not implemented") pass # XXX needs cached value elif suboption[2:3] == SET_CONTROL_BREAK_ON: self.serial.setBreak(True) if self.logger: self.logger.info("changed BREAK to active") self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_BREAK_ON) elif suboption[2:3] == SET_CONTROL_BREAK_OFF: self.serial.setBreak(False) if self.logger: self.logger.info("changed BREAK to inactive") self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_BREAK_OFF) elif suboption[2:3] == SET_CONTROL_REQ_DTR: if self.logger: self.logger.warning("requested DTR state - not implemented") pass # XXX needs cached value elif suboption[2:3] == SET_CONTROL_DTR_ON: self.serial.setDTR(True) if self.logger: self.logger.info("changed DTR to active") self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_DTR_ON) elif suboption[2:3] == SET_CONTROL_DTR_OFF: self.serial.setDTR(False) if self.logger: self.logger.info("changed DTR to inactive") self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_DTR_OFF) elif suboption[2:3] == SET_CONTROL_REQ_RTS: if self.logger: self.logger.warning("requested RTS state - not implemented") pass # XXX needs cached value #~ self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_ON) elif suboption[2:3] == SET_CONTROL_RTS_ON: self.serial.setRTS(True) if self.logger: self.logger.info("changed RTS to active") self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_ON) elif suboption[2:3] == SET_CONTROL_RTS_OFF: self.serial.setRTS(False) if self.logger: self.logger.info("changed RTS to inactive") self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_OFF) #~ elif suboption[2:3] == SET_CONTROL_REQ_FLOW_SETTING_IN: #~ elif suboption[2:3] == SET_CONTROL_USE_NO_FLOW_CONTROL_IN: #~ elif suboption[2:3] == SET_CONTROL_USE_SW_FLOW_CONTOL_IN: #~ elif suboption[2:3] == SET_CONTROL_USE_HW_FLOW_CONTOL_IN: #~ elif suboption[2:3] == SET_CONTROL_USE_DCD_FLOW_CONTROL: #~ elif suboption[2:3] == SET_CONTROL_USE_DTR_FLOW_CONTROL: #~ elif suboption[2:3] == SET_CONTROL_USE_DSR_FLOW_CONTROL: elif suboption[1:2] == NOTIFY_LINESTATE: # client polls for current state self.rfc2217SendSubnegotiation( SERVER_NOTIFY_LINESTATE, to_bytes([0]) # sorry, nothing like that implemented ) elif suboption[1:2] == NOTIFY_MODEMSTATE: if self.logger: self.logger.info("request for modem state") # client polls for current state self.check_modem_lines(force_notification=True) elif suboption[1:2] == FLOWCONTROL_SUSPEND: if self.logger: self.logger.info("suspend") self._remote_suspend_flow = True elif suboption[1:2] == FLOWCONTROL_RESUME: if self.logger: self.logger.info("resume") self._remote_suspend_flow = False elif suboption[1:2] == SET_LINESTATE_MASK: self.linstate_mask = ord(suboption[2:3]) # ensure it is a number if self.logger: self.logger.info("line state mask: 0x%02x" % (self.linstate_mask,)) elif suboption[1:2] == SET_MODEMSTATE_MASK: self.modemstate_mask = ord(suboption[2:3]) # ensure it is a number if self.logger: self.logger.info("modem state mask: 0x%02x" % (self.modemstate_mask,)) elif suboption[1:2] == PURGE_DATA: if suboption[2:3] == PURGE_RECEIVE_BUFFER: self.serial.flushInput() if self.logger: self.logger.info("purge in") self.rfc2217SendSubnegotiation(SERVER_PURGE_DATA, PURGE_RECEIVE_BUFFER) elif suboption[2:3] == PURGE_TRANSMIT_BUFFER: self.serial.flushOutput() if self.logger: self.logger.info("purge out") self.rfc2217SendSubnegotiation(SERVER_PURGE_DATA, PURGE_TRANSMIT_BUFFER) elif suboption[2:3] == PURGE_BOTH_BUFFERS: self.serial.flushInput() self.serial.flushOutput() if self.logger: self.logger.info("purge both") self.rfc2217SendSubnegotiation(SERVER_PURGE_DATA, PURGE_BOTH_BUFFERS) else: if self.logger: self.logger.error("undefined PURGE_DATA: %r" % list(suboption[2:])) else: if self.logger: self.logger.error("undefined COM_PORT_OPTION: %r" % list(suboption[1:])) else: if self.logger: self.logger.warning("unknown subnegotiation: %r" % (suboption,)) # simple client test if __name__ == '__main__': import sys s = Serial('rfc2217://localhost:7000', 115200) sys.stdout.write('%s\n' % s) #~ s.baudrate = 1898 sys.stdout.write("write...\n") s.write("hello\n") s.flush() sys.stdout.write("read: %s\n" % s.read(5)) #~ s.baudrate = 19200 #~ s.databits = 7 s.close()
agpl-3.0
Lh4cKg/sl4a
python-build/python-libs/gdata/src/gdata/tlslite/utils/RSAKey.py
253
8575
"""Abstract class for RSA.""" from cryptomath import * class RSAKey: """This is an abstract base class for RSA keys. Particular implementations of RSA keys, such as L{OpenSSL_RSAKey.OpenSSL_RSAKey}, L{Python_RSAKey.Python_RSAKey}, and L{PyCrypto_RSAKey.PyCrypto_RSAKey}, inherit from this. To create or parse an RSA key, don't use one of these classes directly. Instead, use the factory functions in L{tlslite.utils.keyfactory}. """ def __init__(self, n=0, e=0): """Create a new RSA key. If n and e are passed in, the new key will be initialized. @type n: int @param n: RSA modulus. @type e: int @param e: RSA public exponent. """ raise NotImplementedError() def __len__(self): """Return the length of this key in bits. @rtype: int """ return numBits(self.n) def hasPrivateKey(self): """Return whether or not this key has a private component. @rtype: bool """ raise NotImplementedError() def hash(self): """Return the cryptoID <keyHash> value corresponding to this key. @rtype: str """ raise NotImplementedError() def getSigningAlgorithm(self): """Return the cryptoID sigAlgo value corresponding to this key. @rtype: str """ return "pkcs1-sha1" def hashAndSign(self, bytes): """Hash and sign the passed-in bytes. This requires the key to have a private component. It performs a PKCS1-SHA1 signature on the passed-in data. @type bytes: str or L{array.array} of unsigned bytes @param bytes: The value which will be hashed and signed. @rtype: L{array.array} of unsigned bytes. @return: A PKCS1-SHA1 signature on the passed-in data. """ if not isinstance(bytes, type("")): bytes = bytesToString(bytes) hashBytes = stringToBytes(sha.sha(bytes).digest()) prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes) sigBytes = self.sign(prefixedHashBytes) return sigBytes def hashAndVerify(self, sigBytes, bytes): """Hash and verify the passed-in bytes with the signature. This verifies a PKCS1-SHA1 signature on the passed-in data. @type sigBytes: L{array.array} of unsigned bytes @param sigBytes: A PKCS1-SHA1 signature. @type bytes: str or L{array.array} of unsigned bytes @param bytes: The value which will be hashed and verified. @rtype: bool @return: Whether the signature matches the passed-in data. """ if not isinstance(bytes, type("")): bytes = bytesToString(bytes) hashBytes = stringToBytes(sha.sha(bytes).digest()) prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes) return self.verify(sigBytes, prefixedHashBytes) def sign(self, bytes): """Sign the passed-in bytes. This requires the key to have a private component. It performs a PKCS1 signature on the passed-in data. @type bytes: L{array.array} of unsigned bytes @param bytes: The value which will be signed. @rtype: L{array.array} of unsigned bytes. @return: A PKCS1 signature on the passed-in data. """ if not self.hasPrivateKey(): raise AssertionError() paddedBytes = self._addPKCS1Padding(bytes, 1) m = bytesToNumber(paddedBytes) if m >= self.n: raise ValueError() c = self._rawPrivateKeyOp(m) sigBytes = numberToBytes(c) return sigBytes def verify(self, sigBytes, bytes): """Verify the passed-in bytes with the signature. This verifies a PKCS1 signature on the passed-in data. @type sigBytes: L{array.array} of unsigned bytes @param sigBytes: A PKCS1 signature. @type bytes: L{array.array} of unsigned bytes @param bytes: The value which will be verified. @rtype: bool @return: Whether the signature matches the passed-in data. """ paddedBytes = self._addPKCS1Padding(bytes, 1) c = bytesToNumber(sigBytes) if c >= self.n: return False m = self._rawPublicKeyOp(c) checkBytes = numberToBytes(m) return checkBytes == paddedBytes def encrypt(self, bytes): """Encrypt the passed-in bytes. This performs PKCS1 encryption of the passed-in data. @type bytes: L{array.array} of unsigned bytes @param bytes: The value which will be encrypted. @rtype: L{array.array} of unsigned bytes. @return: A PKCS1 encryption of the passed-in data. """ paddedBytes = self._addPKCS1Padding(bytes, 2) m = bytesToNumber(paddedBytes) if m >= self.n: raise ValueError() c = self._rawPublicKeyOp(m) encBytes = numberToBytes(c) return encBytes def decrypt(self, encBytes): """Decrypt the passed-in bytes. This requires the key to have a private component. It performs PKCS1 decryption of the passed-in data. @type encBytes: L{array.array} of unsigned bytes @param encBytes: The value which will be decrypted. @rtype: L{array.array} of unsigned bytes or None. @return: A PKCS1 decryption of the passed-in data or None if the data is not properly formatted. """ if not self.hasPrivateKey(): raise AssertionError() c = bytesToNumber(encBytes) if c >= self.n: return None m = self._rawPrivateKeyOp(c) decBytes = numberToBytes(m) if (len(decBytes) != numBytes(self.n)-1): #Check first byte return None if decBytes[0] != 2: #Check second byte return None for x in range(len(decBytes)-1): #Scan through for zero separator if decBytes[x]== 0: break else: return None return decBytes[x+1:] #Return everything after the separator def _rawPrivateKeyOp(self, m): raise NotImplementedError() def _rawPublicKeyOp(self, c): raise NotImplementedError() def acceptsPassword(self): """Return True if the write() method accepts a password for use in encrypting the private key. @rtype: bool """ raise NotImplementedError() def write(self, password=None): """Return a string containing the key. @rtype: str @return: A string describing the key, in whichever format (PEM or XML) is native to the implementation. """ raise NotImplementedError() def writeXMLPublicKey(self, indent=''): """Return a string containing the key. @rtype: str @return: A string describing the public key, in XML format. """ return Python_RSAKey(self.n, self.e).write(indent) def generate(bits): """Generate a new key with the specified bit length. @rtype: L{tlslite.utils.RSAKey.RSAKey} """ raise NotImplementedError() generate = staticmethod(generate) # ************************************************************************** # Helper Functions for RSA Keys # ************************************************************************** def _addPKCS1SHA1Prefix(self, bytes): prefixBytes = createByteArraySequence(\ [48,33,48,9,6,5,43,14,3,2,26,5,0,4,20]) prefixedBytes = prefixBytes + bytes return prefixedBytes def _addPKCS1Padding(self, bytes, blockType): padLength = (numBytes(self.n) - (len(bytes)+3)) if blockType == 1: #Signature padding pad = [0xFF] * padLength elif blockType == 2: #Encryption padding pad = createByteArraySequence([]) while len(pad) < padLength: padBytes = getRandomBytes(padLength * 2) pad = [b for b in padBytes if b != 0] pad = pad[:padLength] else: raise AssertionError() #NOTE: To be proper, we should add [0,blockType]. However, #the zero is lost when the returned padding is converted #to a number, so we don't even bother with it. Also, #adding it would cause a misalignment in verify() padding = createByteArraySequence([blockType] + pad + [0]) paddedBytes = padding + bytes return paddedBytes
apache-2.0
waynesun09/tp-libvirt
libvirt/tests/src/libvirt_usb_hotplug_device.py
4
6819
import os import shutil from aexpect import ShellError from aexpect import ShellTimeoutError from autotest.client.shared import error from avocado.utils import process from virttest import data_dir from virttest import virsh from virttest import utils_misc from virttest import utils_selinux from virttest.remote import LoginError from virttest.utils_test import libvirt from virttest.virt_vm import VMError from virttest.libvirt_xml.vm_xml import VMXML from virttest.libvirt_xml.devices.controller import Controller from virttest.libvirt_xml.devices.disk import Disk from virttest.libvirt_xml.devices.input import Input def run(test, params, env): """ Test for hotplug usb device. """ # get the params from params vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) usb_type = params.get("usb_type", "kbd") attach_type = params.get("attach_type", "attach_device") attach_count = int(params.get("attach_count", "1")) if usb_type == "storage": model = params.get("model", "nec-xhci") index = params.get("index", "1") status_error = ("yes" == params.get("status_error", "no")) vm_xml = VMXML.new_from_inactive_dumpxml(vm_name) vm_xml_backup = vm_xml.copy() # Set selinux of host. backup_sestatus = utils_selinux.get_status() utils_selinux.set_status("permissive") if usb_type == "storage": controllers = vm_xml.get_devices(device_type="controller") devices = vm_xml.get_devices() for dev in controllers: if dev.type == "usb" and dev.index == "1": devices.remove(dev) controller = Controller("controller") controller.type = "usb" controller.index = index controller.model = model devices.append(controller) vm_xml.set_devices(devices) try: session = vm.wait_for_login() except (LoginError, VMError, ShellError), e: raise error.TestFail("Test failed: %s" % str(e)) def is_hotplug_ok(): try: output = session.cmd_output("fdisk -l | grep -c '^Disk /dev/.* 1 M'") if int(output.strip()) != attach_count: return False else: return True except ShellTimeoutError, detail: raise error.TestFail("unhotplug failed: %s, " % detail) tmp_dir = os.path.join(data_dir.get_tmp_dir(), "usb_hotplug_files") if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) try: result = None dev_xml = None opt = "--hmp" for i in range(attach_count): if usb_type == "storage": path = os.path.join(tmp_dir, "%s.img" % i) libvirt.create_local_disk("file", path, size="1M", disk_format="qcow2") os.chmod(path, 0666) if attach_type == "qemu_monitor": if usb_type == "storage": attach_cmd = "drive_add" attach_cmd += (" 0 id=drive-usb-%s,if=none,file=%s" % (i, path)) result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt) if result.exit_status or (result.stdout.find("OK") == -1): raise process.CmdError(result.command, result) attach_cmd = "device_add usb-storage," attach_cmd += ("id=drive-usb-%s,bus=usb1.0,drive=drive-usb-%s" % (i, i)) else: attach_cmd = "device_add" attach_cmd += " usb-%s,bus=usb1.0,id=%s%s" % (usb_type, usb_type, i) result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt) if result.exit_status: raise process.CmdError(result.command, result) else: attributes = {'type_name': "usb", 'bus': "1", 'port': "0"} if usb_type == "storage": dev_xml = Disk(type_name="file") dev_xml.device = "disk" dev_xml.source = dev_xml.new_disk_source(**{"attrs": {'file': path}}) dev_xml.driver = {"name": "qemu", "type": 'qcow2', "cache": "none"} dev_xml.target = {"dev": 'sdb', "bus": "usb"} dev_xml.address = dev_xml.new_disk_address(**{"attrs": attributes}) else: if usb_type == "mouse": dev_xml = Input("mouse") elif usb_type == "tablet": dev_xml = Input("tablet") else: dev_xml = Input("keyboard") dev_xml.input_bus = "usb" dev_xml.address = dev_xml.new_input_address(**{"attrs": attributes}) result = virsh.attach_device(vm_name, dev_xml.xml) if result.exit_status: raise process.CmdError(result.command, result) if status_error and usb_type == "storage": if utils_misc.wait_for(is_hotplug_ok, timeout=30): # Sometimes we meet an error but the ret in $? is 0. raise error.TestFail("\nAttach device successfully in negative case." "\nExcept it fail when attach count exceed maximum." "\nDetail: %s" % result) for i in range(attach_count): attach_cmd = "device_del" if attach_type == "qemu_monitor": if usb_type == "storage": attach_cmd += (" drive-usb-%s" % i) else: if usb_type == "mouse": attach_cmd += " mouse" elif usb_type == "tablet": attach_cmd += " tablet" else: attach_cmd += " keyboard" result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt) if result.exit_status: raise process.CmdError(result.command, result) else: result = virsh.detach_device(vm_name, dev_xml.xml) if result.exit_status: raise process.CmdError(result.command, result) except process.CmdError, e: if not status_error: # live attach of device 'input' is not supported ret = result.stderr.find("Operation not supported") if usb_type != "storage" and ret > -1: pass else: raise error.TestFail("failed to attach device.\nDetail: %s." % result) finally: session.close() if os.path.isdir(tmp_dir): shutil.rmtree(tmp_dir) utils_selinux.set_status(backup_sestatus) vm_xml_backup.sync()
gpl-2.0
szhem/spark
examples/src/main/python/mllib/recommendation_example.py
128
2054
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Collaborative Filtering Classification Example. """ from __future__ import print_function from pyspark import SparkContext # $example on$ from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating # $example off$ if __name__ == "__main__": sc = SparkContext(appName="PythonCollaborativeFilteringExample") # $example on$ # Load and parse the data data = sc.textFile("data/mllib/als/test.data") ratings = data.map(lambda l: l.split(','))\ .map(lambda l: Rating(int(l[0]), int(l[1]), float(l[2]))) # Build the recommendation model using Alternating Least Squares rank = 10 numIterations = 10 model = ALS.train(ratings, rank, numIterations) # Evaluate the model on training data testdata = ratings.map(lambda p: (p[0], p[1])) predictions = model.predictAll(testdata).map(lambda r: ((r[0], r[1]), r[2])) ratesAndPreds = ratings.map(lambda r: ((r[0], r[1]), r[2])).join(predictions) MSE = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).mean() print("Mean Squared Error = " + str(MSE)) # Save and load model model.save(sc, "target/tmp/myCollaborativeFilter") sameModel = MatrixFactorizationModel.load(sc, "target/tmp/myCollaborativeFilter") # $example off$
apache-2.0
devs1991/test_edx_docmode
venv/lib/python2.7/site-packages/search/result_processor.py
5
5747
""" overridable result processor object to allow additional properties to be exposed """ import inspect from itertools import chain import json import logging import re import textwrap from django.conf import settings from django.core.serializers.json import DjangoJSONEncoder from .utils import _load_class DESIRED_EXCERPT_LENGTH = 100 ELLIPSIS = '<span class="search-results-ellipsis"></span>' # log appears to be standard name used for logger log = logging.getLogger(__name__) # pylint: disable=invalid-name class SearchResultProcessor(object): """ Class to post-process a search result from the search. Each @property defined herein will be exposed as a member in the json-results given to the end user Users of this search app will override this class and update setting for SEARCH_RESULT_PROCESSOR In particular, an application using this search app will want to: * override `should_remove`: - This is where an application can decide whether to deny access to the result provided * provide additional properties to be included - Mark a method as a property and it's returned value will be added into the resultset given """ _results_fields = {} _match_phrase = None def __init__(self, dictionary, match_phrase): self._results_fields = dictionary self._match_phrase = match_phrase @staticmethod def strings_in_dictionary(dictionary): """ Used by default implementation for finding excerpt """ strings = [value for value in dictionary.itervalues() if not isinstance(value, dict)] for child_dict in [dv for dv in dictionary.itervalues() if isinstance(dv, dict)]: strings.extend(SearchResultProcessor.strings_in_dictionary(child_dict)) return strings @staticmethod def find_matches(strings, words, length_hoped): """ Used by default property excerpt """ lower_words = [w.lower() for w in words] def has_match(string): """ Do any of the words match within the string """ lower_string = string.lower() for test_word in lower_words: if test_word in lower_string: return True return False shortened_strings = [textwrap.wrap(s) for s in strings] short_string_list = list(chain.from_iterable(shortened_strings)) matches = [ms for ms in short_string_list if has_match(ms)] cumulative_len = 0 break_at = None for idx, match in enumerate(matches): cumulative_len += len(match) if cumulative_len >= length_hoped: break_at = idx break return matches[0:break_at] @staticmethod def decorate_matches(match_in, match_word): """ decorate the matches within the excerpt """ matches = re.finditer(match_word, match_in, re.IGNORECASE) for matched_string in set([match.group() for match in matches]): match_in = match_in.replace( matched_string, getattr(settings, "SEARCH_MATCH_DECORATION", u"<b>{}</b>").format(matched_string) ) return match_in # disabling pylint violations because overriders will want to use these def should_remove(self, user): # pylint: disable=unused-argument, no-self-use """ Override this in a class in order to add in last-chance access checks to the search process Your application will want to make this decision """ return False def add_properties(self): """ Called during post processing of result Any properties defined in your subclass will get exposed as members of the result json from the search """ for property_name in [p[0] for p in inspect.getmembers(self.__class__) if isinstance(p[1], property)]: self._results_fields[property_name] = getattr(self, property_name, None) @classmethod def process_result(cls, dictionary, match_phrase, user): """ Called from within search handler. Finds desired subclass and decides if the result should be removed and adds properties derived from the result information """ result_processor = _load_class(getattr(settings, "SEARCH_RESULT_PROCESSOR", None), cls) srp = result_processor(dictionary, match_phrase) if srp.should_remove(user): return None try: srp.add_properties() # protect around any problems introduced by subclasses within their properties except Exception as ex: # pylint: disable=broad-except log.exception("error processing properties for %s - %s: will remove from results", json.dumps(dictionary, cls=DjangoJSONEncoder), ex.message) return None return dictionary @property def excerpt(self): """ Property to display a useful excerpt representing the matches within the results """ if "content" not in self._results_fields: return None match_words = [self._match_phrase] separate_words = self._match_phrase.split(' ') if len(separate_words) > 1: match_words.extend(self._match_phrase.split(' ')) matches = SearchResultProcessor.find_matches( SearchResultProcessor.strings_in_dictionary(self._results_fields["content"]), match_words, DESIRED_EXCERPT_LENGTH ) excerpt_text = ELLIPSIS.join(matches) for match_word in match_words: excerpt_text = SearchResultProcessor.decorate_matches(excerpt_text, match_word) return excerpt_text
agpl-3.0
Conan-Kudo/snapd
tests/lib/snaps/test-snapd-python-webserver/server.py
13
1271
#!/usr/bin/python3 import os import sys import urllib.request from http.server import HTTPServer, SimpleHTTPRequestHandler class XkcdRequestHandler(SimpleHTTPRequestHandler): XKCD_URL = "http://xkcd.com/" XKCD_IMG_URL = "http://imgs.xkcd.com/" def _mini_proxy(self, url): fp = urllib.request.urlopen(url) body = fp.read() info = fp.info() self.send_response(200, "ok") for k, v in info.items(): self.send_header(k, v) self.end_headers() self.wfile.write(body) def do_GET(self): if self.path.startswith("/xkcd/"): url = self.XKCD_URL + self.path[len("/xkcd/"):] return self._mini_proxy(url) elif self.path.startswith("/img/xkcd/"): url = self.XKCD_IMG_URL + self.path[len("/img/xkcd/"):] return self._mini_proxy(url) else: return super(XkcdRequestHandler, self).do_GET() if __name__ == "__main__": # we start in the snappy base directory, ensure we are in "www" os.chdir(os.path.dirname(__file__) + "/../www") if len(sys.argv) > 1: port = int(sys.argv[1]) else: port = 80 httpd = HTTPServer(('', port), XkcdRequestHandler) httpd.serve_forever()
gpl-3.0
pschmitt/home-assistant
homeassistant/components/denon/media_player.py
7
8655
"""Support for Denon Network Receivers.""" import logging import telnetlib import voluptuous as vol from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity from homeassistant.components.media_player.const import ( SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PREVIOUS_TRACK, SUPPORT_SELECT_SOURCE, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, ) from homeassistant.const import CONF_HOST, CONF_NAME, STATE_OFF, STATE_ON import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "Music station" SUPPORT_DENON = ( SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE ) SUPPORT_MEDIA_MODES = ( SUPPORT_PAUSE | SUPPORT_STOP | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_PLAY ) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) NORMAL_INPUTS = { "Cd": "CD", "Dvd": "DVD", "Blue ray": "BD", "TV": "TV", "Satellite / Cable": "SAT/CBL", "Game": "GAME", "Game2": "GAME2", "Video Aux": "V.AUX", "Dock": "DOCK", } MEDIA_MODES = { "Tuner": "TUNER", "Media server": "SERVER", "Ipod dock": "IPOD", "Net/USB": "NET/USB", "Rapsody": "RHAPSODY", "Napster": "NAPSTER", "Pandora": "PANDORA", "LastFM": "LASTFM", "Flickr": "FLICKR", "Favorites": "FAVORITES", "Internet Radio": "IRADIO", "USB/IPOD": "USB/IPOD", } # Sub-modes of 'NET/USB' # {'USB': 'USB', 'iPod Direct': 'IPD', 'Internet Radio': 'IRP', # 'Favorites': 'FVP'} def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Denon platform.""" denon = DenonDevice(config[CONF_NAME], config[CONF_HOST]) if denon.update(): add_entities([denon]) class DenonDevice(MediaPlayerEntity): """Representation of a Denon device.""" def __init__(self, name, host): """Initialize the Denon device.""" self._name = name self._host = host self._pwstate = "PWSTANDBY" self._volume = 0 # Initial value 60dB, changed if we get a MVMAX self._volume_max = 60 self._source_list = NORMAL_INPUTS.copy() self._source_list.update(MEDIA_MODES) self._muted = False self._mediasource = "" self._mediainfo = "" self._should_setup_sources = True def _setup_sources(self, telnet): # NSFRN - Network name nsfrn = self.telnet_request(telnet, "NSFRN ?")[len("NSFRN ") :] if nsfrn: self._name = nsfrn # SSFUN - Configured sources with names self._source_list = {} for line in self.telnet_request(telnet, "SSFUN ?", all_lines=True): source, configured_name = line[len("SSFUN") :].split(" ", 1) self._source_list[configured_name] = source # SSSOD - Deleted sources for line in self.telnet_request(telnet, "SSSOD ?", all_lines=True): source, status = line[len("SSSOD") :].split(" ", 1) if status == "DEL": for pretty_name, name in self._source_list.items(): if source == name: del self._source_list[pretty_name] break @classmethod def telnet_request(cls, telnet, command, all_lines=False): """Execute `command` and return the response.""" _LOGGER.debug("Sending: %s", command) telnet.write(command.encode("ASCII") + b"\r") lines = [] while True: line = telnet.read_until(b"\r", timeout=0.2) if not line: break lines.append(line.decode("ASCII").strip()) _LOGGER.debug("Received: %s", line) if all_lines: return lines return lines[0] if lines else "" def telnet_command(self, command): """Establish a telnet connection and sends `command`.""" telnet = telnetlib.Telnet(self._host) _LOGGER.debug("Sending: %s", command) telnet.write(command.encode("ASCII") + b"\r") telnet.read_very_eager() # skip response telnet.close() def update(self): """Get the latest details from the device.""" try: telnet = telnetlib.Telnet(self._host) except OSError: return False if self._should_setup_sources: self._setup_sources(telnet) self._should_setup_sources = False self._pwstate = self.telnet_request(telnet, "PW?") for line in self.telnet_request(telnet, "MV?", all_lines=True): if line.startswith("MVMAX "): # only grab two digit max, don't care about any half digit self._volume_max = int(line[len("MVMAX ") : len("MVMAX XX")]) continue if line.startswith("MV"): self._volume = int(line[len("MV") :]) self._muted = self.telnet_request(telnet, "MU?") == "MUON" self._mediasource = self.telnet_request(telnet, "SI?")[len("SI") :] if self._mediasource in MEDIA_MODES.values(): self._mediainfo = "" answer_codes = [ "NSE0", "NSE1X", "NSE2X", "NSE3X", "NSE4", "NSE5", "NSE6", "NSE7", "NSE8", ] for line in self.telnet_request(telnet, "NSE", all_lines=True): self._mediainfo += f"{line[len(answer_codes.pop(0)) :]}\n" else: self._mediainfo = self.source telnet.close() return True @property def name(self): """Return the name of the device.""" return self._name @property def state(self): """Return the state of the device.""" if self._pwstate == "PWSTANDBY": return STATE_OFF if self._pwstate == "PWON": return STATE_ON return None @property def volume_level(self): """Volume level of the media player (0..1).""" return self._volume / self._volume_max @property def is_volume_muted(self): """Return boolean if volume is currently muted.""" return self._muted @property def source_list(self): """Return the list of available input sources.""" return sorted(list(self._source_list.keys())) @property def media_title(self): """Return the current media info.""" return self._mediainfo @property def supported_features(self): """Flag media player features that are supported.""" if self._mediasource in MEDIA_MODES.values(): return SUPPORT_DENON | SUPPORT_MEDIA_MODES return SUPPORT_DENON @property def source(self): """Return the current input source.""" for pretty_name, name in self._source_list.items(): if self._mediasource == name: return pretty_name def turn_off(self): """Turn off media player.""" self.telnet_command("PWSTANDBY") def volume_up(self): """Volume up media player.""" self.telnet_command("MVUP") def volume_down(self): """Volume down media player.""" self.telnet_command("MVDOWN") def set_volume_level(self, volume): """Set volume level, range 0..1.""" self.telnet_command(f"MV{round(volume * self._volume_max):02}") def mute_volume(self, mute): """Mute (true) or unmute (false) media player.""" mute_status = "ON" if mute else "OFF" self.telnet_command(f"MU{mute_status})") def media_play(self): """Play media player.""" self.telnet_command("NS9A") def media_pause(self): """Pause media player.""" self.telnet_command("NS9B") def media_stop(self): """Pause media player.""" self.telnet_command("NS9C") def media_next_track(self): """Send the next track command.""" self.telnet_command("NS9D") def media_previous_track(self): """Send the previous track command.""" self.telnet_command("NS9E") def turn_on(self): """Turn the media player on.""" self.telnet_command("PWON") def select_source(self, source): """Select input source.""" self.telnet_command(f"SI{self._source_list.get(source)}")
apache-2.0
sgiavasis/nipype
nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py
10
2351
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from ....testing import assert_equal from ..dti import TrackBedpostxDeter def test_TrackBedpostxDeter_inputs(): input_map = dict(anisfile=dict(argstr='-anisfile %s', ), anisthresh=dict(argstr='-anisthresh %f', ), args=dict(argstr='%s', ), bedpostxdir=dict(argstr='-bedpostxdir %s', mandatory=True, ), curveinterval=dict(argstr='-curveinterval %f', requires=['curvethresh'], ), curvethresh=dict(argstr='-curvethresh %f', ), data_dims=dict(argstr='-datadims %s', units='voxels', ), environ=dict(nohash=True, usedefault=True, ), gzip=dict(argstr='-gzip', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', ), inputmodel=dict(argstr='-inputmodel %s', usedefault=True, ), interpolator=dict(argstr='-interpolator %s', ), ipthresh=dict(argstr='-ipthresh %f', ), maxcomponents=dict(argstr='-maxcomponents %d', units='NA', ), min_vol_frac=dict(argstr='-bedpostxminf %d', units='NA', ), numpds=dict(argstr='-numpds %d', units='NA', ), out_file=dict(argstr='-outputfile %s', genfile=True, position=-1, ), output_root=dict(argstr='-outputroot %s', position=-1, ), outputtracts=dict(argstr='-outputtracts %s', ), seed_file=dict(argstr='-seedfile %s', position=2, ), stepsize=dict(argstr='-stepsize %f', requires=['tracker'], ), terminal_output=dict(nohash=True, ), tracker=dict(argstr='-tracker %s', usedefault=True, ), voxel_dims=dict(argstr='-voxeldims %s', units='mm', ), ) inputs = TrackBedpostxDeter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_TrackBedpostxDeter_outputs(): output_map = dict(tracked=dict(), ) outputs = TrackBedpostxDeter.output_spec() for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): yield assert_equal, getattr(outputs.traits()[key], metakey), value
bsd-3-clause
forcedotcom/todomvc
labs/architecture-examples/backbone.xmpp/server/bootstrap.py
27
4119
############################################################################## # # Copyright (c) 2006 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Bootstrap a buildout-based project Simply run this script in a directory containing a buildout.cfg. The script accepts buildout command-line options, so you can use the -c option to specify an alternate configuration file. $Id$ """ import os, shutil, sys, tempfile, urllib2 from optparse import OptionParser tmpeggs = tempfile.mkdtemp() is_jython = sys.platform.startswith('java') # parsing arguments parser = OptionParser( 'This is a custom version of the zc.buildout %prog script. It is ' 'intended to meet a temporary need if you encounter problems with ' 'the zc.buildout 1.5 release.') parser.add_option("-v", "--version", dest="version", default='1.5.2', help='Use a specific zc.buildout version. *This ' 'bootstrap script defaults to ' '1.5.2, unlike usual buildout bootstrap scripts.*') parser.add_option("-d", "--distribute", action="store_true", dest="distribute", default=True, help="Use Disribute rather than Setuptools.") parser.add_option("-c", None, action="store", dest="config_file", help=("Specify the path to the buildout configuration " "file to be used.")) options, args = parser.parse_args() # if -c was provided, we push it back into args for buildout' main function if options.config_file is not None: args += ['-c', options.config_file] if options.version is not None: VERSION = '==%s' % options.version else: VERSION = '' USE_DISTRIBUTE = options.distribute args = args + ['bootstrap'] to_reload = False try: import pkg_resources if not hasattr(pkg_resources, '_distribute'): to_reload = True raise ImportError except ImportError: ez = {} if USE_DISTRIBUTE: exec urllib2.urlopen('http://python-distribute.org/distribute_setup.py' ).read() in ez ez['use_setuptools'](to_dir=tmpeggs, download_delay=0, no_fake=True) else: exec urllib2.urlopen('http://peak.telecommunity.com/dist/ez_setup.py' ).read() in ez ez['use_setuptools'](to_dir=tmpeggs, download_delay=0) if to_reload: reload(pkg_resources) else: import pkg_resources if sys.platform == 'win32': def quote(c): if ' ' in c: return '"%s"' % c # work around spawn lamosity on windows else: return c else: def quote (c): return c ws = pkg_resources.working_set if USE_DISTRIBUTE: requirement = 'distribute' else: requirement = 'setuptools' env = dict(os.environ, PYTHONPATH= ws.find(pkg_resources.Requirement.parse(requirement)).location ) cmd = [quote(sys.executable), '-c', quote('from setuptools.command.easy_install import main; main()'), '-mqNxd', quote(tmpeggs)] if 'bootstrap-testing-find-links' in os.environ: cmd.extend(['-f', os.environ['bootstrap-testing-find-links']]) cmd.append('zc.buildout' + VERSION) if is_jython: import subprocess exitcode = subprocess.Popen(cmd, env=env).wait() else: # Windows prefers this, apparently; otherwise we would prefer subprocess exitcode = os.spawnle(*([os.P_WAIT, sys.executable] + cmd + [env])) assert exitcode == 0 ws.add_entry(tmpeggs) ws.require('zc.buildout' + VERSION) import zc.buildout.buildout zc.buildout.buildout.main(args) shutil.rmtree(tmpeggs)
mit
sandeepkoduri/GAE-html-to-pdf
libs/html5lib/treewalkers/__init__.py
499
5766
"""A collection of modules for iterating through different kinds of tree, generating tokens identical to those produced by the tokenizer module. To create a tree walker for a new type of tree, you need to do implement a tree walker object (called TreeWalker by convention) that implements a 'serialize' method taking a tree as sole argument and returning an iterator generating tokens. """ from __future__ import absolute_import, division, unicode_literals __all__ = ["getTreeWalker", "pprint", "dom", "etree", "genshistream", "lxmletree", "pulldom"] import sys from .. import constants from ..utils import default_etree treeWalkerCache = {} def getTreeWalker(treeType, implementation=None, **kwargs): """Get a TreeWalker class for various types of tree with built-in support treeType - the name of the tree type required (case-insensitive). Supported values are: "dom" - The xml.dom.minidom DOM implementation "pulldom" - The xml.dom.pulldom event stream "etree" - A generic walker for tree implementations exposing an elementtree-like interface (known to work with ElementTree, cElementTree and lxml.etree). "lxml" - Optimized walker for lxml.etree "genshi" - a Genshi stream implementation - (Currently applies to the "etree" tree type only). A module implementing the tree type e.g. xml.etree.ElementTree or cElementTree.""" treeType = treeType.lower() if treeType not in treeWalkerCache: if treeType in ("dom", "pulldom"): name = "%s.%s" % (__name__, treeType) __import__(name) mod = sys.modules[name] treeWalkerCache[treeType] = mod.TreeWalker elif treeType == "genshi": from . import genshistream treeWalkerCache[treeType] = genshistream.TreeWalker elif treeType == "lxml": from . import lxmletree treeWalkerCache[treeType] = lxmletree.TreeWalker elif treeType == "etree": from . import etree if implementation is None: implementation = default_etree # XXX: NEVER cache here, caching is done in the etree submodule return etree.getETreeModule(implementation, **kwargs).TreeWalker return treeWalkerCache.get(treeType) def concatenateCharacterTokens(tokens): pendingCharacters = [] for token in tokens: type = token["type"] if type in ("Characters", "SpaceCharacters"): pendingCharacters.append(token["data"]) else: if pendingCharacters: yield {"type": "Characters", "data": "".join(pendingCharacters)} pendingCharacters = [] yield token if pendingCharacters: yield {"type": "Characters", "data": "".join(pendingCharacters)} def pprint(walker): """Pretty printer for tree walkers""" output = [] indent = 0 for token in concatenateCharacterTokens(walker): type = token["type"] if type in ("StartTag", "EmptyTag"): # tag name if token["namespace"] and token["namespace"] != constants.namespaces["html"]: if token["namespace"] in constants.prefixes: ns = constants.prefixes[token["namespace"]] else: ns = token["namespace"] name = "%s %s" % (ns, token["name"]) else: name = token["name"] output.append("%s<%s>" % (" " * indent, name)) indent += 2 # attributes (sorted for consistent ordering) attrs = token["data"] for (namespace, localname), value in sorted(attrs.items()): if namespace: if namespace in constants.prefixes: ns = constants.prefixes[namespace] else: ns = namespace name = "%s %s" % (ns, localname) else: name = localname output.append("%s%s=\"%s\"" % (" " * indent, name, value)) # self-closing if type == "EmptyTag": indent -= 2 elif type == "EndTag": indent -= 2 elif type == "Comment": output.append("%s<!-- %s -->" % (" " * indent, token["data"])) elif type == "Doctype": if token["name"]: if token["publicId"]: output.append("""%s<!DOCTYPE %s "%s" "%s">""" % (" " * indent, token["name"], token["publicId"], token["systemId"] if token["systemId"] else "")) elif token["systemId"]: output.append("""%s<!DOCTYPE %s "" "%s">""" % (" " * indent, token["name"], token["systemId"])) else: output.append("%s<!DOCTYPE %s>" % (" " * indent, token["name"])) else: output.append("%s<!DOCTYPE >" % (" " * indent,)) elif type == "Characters": output.append("%s\"%s\"" % (" " * indent, token["data"])) elif type == "SpaceCharacters": assert False, "concatenateCharacterTokens should have got rid of all Space tokens" else: raise ValueError("Unknown token type, %s" % type) return "\n".join(output)
mit
tensorflow/model-analysis
tensorflow_model_analysis/eval_saved_model/exporter_test.py
1
6887
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test for exporters. Note that we actually train and export models within these tests. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np import tensorflow as tf from tensorflow_model_analysis.eval_saved_model import exporter from tensorflow_model_analysis.eval_saved_model import load from tensorflow_model_analysis.eval_saved_model import testutil from tensorflow_model_analysis.eval_saved_model.example_trainers import fixed_prediction_estimator class ExporterTest(testutil.TensorflowModelAnalysisTest): def _getEvalExportDir(self): return os.path.join(self._getTempDir(), 'eval_export_dir') def runTestForExporter(self, exporter_class): estimator_metadata = ( fixed_prediction_estimator .get_simple_fixed_prediction_estimator_and_metadata()) exporter_name = 'TFMA' temp_eval_export_dir = self._getEvalExportDir() exporter_instance = exporter_class( name=exporter_name, eval_input_receiver_fn=estimator_metadata['eval_input_receiver_fn'], serving_input_receiver_fn=estimator_metadata[ 'serving_input_receiver_fn']) self.assertEqual(exporter_name, exporter_instance.name) estimator_metadata['estimator'].train( input_fn=estimator_metadata['train_input_fn'], steps=100) eval_export_dir = exporter_instance.export( estimator=estimator_metadata['estimator'], export_path=temp_eval_export_dir, checkpoint_path=None, eval_result=None, is_the_final_export=True) # Check the eval graph. eval_saved_model = load.EvalSavedModel(eval_export_dir) example1 = self._makeExample(prediction=0.9, label=0.0).SerializeToString() eval_saved_model.metrics_reset_update_get(example1) metric_values = eval_saved_model.get_metric_values() self.assertDictElementsAlmostEqual(metric_values, {'average_loss': 0.81}) # Check the serving graph. # TODO(b/124466113): Remove tf.compat.v2 once TF 2.0 is the default. if hasattr(tf, 'compat.v2'): imported = tf.compat.v2.saved_model.load( eval_export_dir, tags=tf.saved_model.SERVING) predictions = imported.signatures[ tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]( inputs=tf.constant([example1.SerializeToString()])) self.assertAllClose(predictions['outputs'], np.array([[0.9]])) def testFinalExporter(self): self.runTestForExporter(exporter.FinalExporter) def testLatestExporter(self): self.runTestForExporter(exporter.LatestExporter) def testAdaptToRemoveMetricsRemoveList(self): estimator_metadata = ( fixed_prediction_estimator .get_simple_fixed_prediction_estimator_and_metadata()) exporter_name = 'TFMA' temp_eval_export_dir = self._getEvalExportDir() exporter_instance = exporter.FinalExporter( name=exporter_name, eval_input_receiver_fn=estimator_metadata['eval_input_receiver_fn'], serving_input_receiver_fn=estimator_metadata[ 'serving_input_receiver_fn']) exporter_instance = exporter.adapt_to_remove_metrics( exporter_instance, ['average_loss']) self.assertEqual(exporter_name, exporter_instance.name) estimator_metadata['estimator'].train( input_fn=estimator_metadata['train_input_fn'], steps=100) eval_export_dir = exporter_instance.export( estimator=estimator_metadata['estimator'], export_path=temp_eval_export_dir, checkpoint_path=None, eval_result=None, is_the_final_export=True) # Check the eval graph. eval_saved_model = load.EvalSavedModel(eval_export_dir) example1 = self._makeExample(prediction=0.9, label=0.0).SerializeToString() eval_saved_model.metrics_reset_update_get(example1) metric_values = eval_saved_model.get_metric_values() self.assertNotIn('average_loss', metric_values) # Check the serving graph. # TODO(b/124466113): Remove tf.compat.v2 once TF 2.0 is the default. if hasattr(tf, 'compat.v2'): imported = tf.compat.v2.saved_model.load( eval_export_dir, tags=tf.saved_model.SERVING) predictions = imported.signatures[ tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]( inputs=tf.constant([example1.SerializeToString()])) self.assertAllClose(predictions['outputs'], np.array([[0.9]])) def testAdaptToRemoveMetricsRemoveFn(self): estimator_metadata = ( fixed_prediction_estimator .get_simple_fixed_prediction_estimator_and_metadata()) exporter_name = 'TFMA' temp_eval_export_dir = self._getEvalExportDir() exporter_instance = exporter.FinalExporter( name=exporter_name, eval_input_receiver_fn=estimator_metadata['eval_input_receiver_fn'], serving_input_receiver_fn=estimator_metadata[ 'serving_input_receiver_fn']) exporter_instance = exporter.adapt_to_remove_metrics( exporter_instance, lambda key: key.endswith('loss')) self.assertEqual(exporter_name, exporter_instance.name) estimator_metadata['estimator'].train( input_fn=estimator_metadata['train_input_fn'], steps=100) eval_export_dir = exporter_instance.export( estimator=estimator_metadata['estimator'], export_path=temp_eval_export_dir, checkpoint_path=None, eval_result=None, is_the_final_export=True) # Check the eval graph. eval_saved_model = load.EvalSavedModel(eval_export_dir) example1 = self._makeExample(prediction=0.9, label=0.0).SerializeToString() eval_saved_model.metrics_reset_update_get(example1) metric_values = eval_saved_model.get_metric_values() self.assertNotIn('average_loss', metric_values) # Check the serving graph. # TODO(b/124466113): Remove tf.compat.v2 once TF 2.0 is the default. if hasattr(tf, 'compat.v2'): imported = tf.compat.v2.saved_model.load( eval_export_dir, tags=tf.saved_model.SERVING) predictions = imported.signatures[ tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]( inputs=tf.constant([example1.SerializeToString()])) self.assertAllClose(predictions['outputs'], np.array([[0.9]])) if __name__ == '__main__': tf.test.main()
apache-2.0
caesar2164/edx-platform
lms/djangoapps/course_api/forms.py
33
2327
""" Course API forms """ from collections import namedtuple from django.core.exceptions import ValidationError from django.forms import Form, CharField from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey from openedx.core.djangoapps.util.forms import ExtendedNullBooleanField class UsernameValidatorMixin(object): """ Mixin class for validating the username parameter. """ def clean_username(self): """ Ensures the username is provided unless the request is made as an anonymous user. """ username = self.cleaned_data.get('username') return username or '' class CourseDetailGetForm(UsernameValidatorMixin, Form): """ A form to validate query parameters in the course detail endpoint """ username = CharField(required=False) course_key = CharField(required=True) def clean_course_key(self): """ Ensure a valid `course_key` was provided. """ course_key_string = self.cleaned_data['course_key'] try: return CourseKey.from_string(course_key_string) except InvalidKeyError: raise ValidationError("'{}' is not a valid course key.".format(unicode(course_key_string))) class CourseListGetForm(UsernameValidatorMixin, Form): """ A form to validate query parameters in the course list retrieval endpoint """ username = CharField(required=False) org = CharField(required=False) # white list of all supported filter fields filter_type = namedtuple('filter_type', ['param_name', 'field_name']) supported_filters = [ filter_type(param_name='mobile', field_name='mobile_available'), ] mobile = ExtendedNullBooleanField(required=False) def clean(self): """ Return cleaned data, including additional filters. """ cleaned_data = super(CourseListGetForm, self).clean() # create a filter for all supported filter fields filter_ = dict() for supported_filter in self.supported_filters: if cleaned_data.get(supported_filter.param_name) is not None: filter_[supported_filter.field_name] = cleaned_data[supported_filter.param_name] cleaned_data['filter_'] = filter_ or None return cleaned_data
agpl-3.0
puttarajubr/commcare-hq
corehq/apps/es/tests.py
1
9490
import json from unittest import TestCase from corehq.elastic import ESError, SIZE_LIMIT from .es_query import HQESQuery, ESQuerySet from . import facets from . import filters from . import forms, users class ElasticTestMixin(object): def checkQuery(self, query, json_output): msg = "Expected Query:\n{}\nGenerated Query:\n{}".format( json.dumps(json_output, indent=4), query.dumps(pretty=True), ) # NOTE: This method thinks [a, b, c] != [b, c, a] self.assertEqual(query.raw_query, json_output, msg=msg) class TestESQuery(ElasticTestMixin, TestCase): maxDiff = 1000 def test_basic_query(self): json_output = { "query": { "filtered": { "filter": { "and": [ {"match_all": {}} ] }, "query": {"match_all": {}} } }, "size": SIZE_LIMIT } self.checkQuery(HQESQuery('forms'), json_output) def test_query_size(self): json_output = { "query": { "filtered": { "filter": { "and": [ {"match_all": {}} ] }, "query": {"match_all": {}} } }, "size": 0 } # use `is not None`; 0 or 1000000 == 1000000 self.checkQuery(HQESQuery('forms').size(0), json_output) json_output['size'] = 123 self.checkQuery(HQESQuery('forms').size(123), json_output) def test_form_query(self): json_output = { "query": { "filtered": { "filter": { "and": [ {"term": {"doc_type": "xforminstance"}}, {"not": {"missing": {"field": "xmlns"}}}, {"not": {"missing": {"field": "form.meta.userID"}}}, ] }, "query": {"match_all": {}} } }, "size": SIZE_LIMIT } query = forms.FormES() self.checkQuery(query, json_output) def test_user_query(self): json_output = { "query": { "filtered": { "filter": { "and": [ {"term": {"is_active": True}}, {"term": {"base_doc": "couchuser"}}, ] }, "query": {"match_all": {}} } }, "size": SIZE_LIMIT } query = users.UserES() self.checkQuery(query, json_output) def test_filtered_forms(self): json_output = { "query": { "filtered": { "filter": { "and": [ {"term": {"domain.exact": "zombocom"}}, {"term": {"xmlns.exact": "banana"}}, {"term": {"doc_type": "xforminstance"}}, {"not": {"missing": {"field": "xmlns"}}}, {"not": {"missing": {"field": "form.meta.userID"}}}, ] }, "query": {"match_all": {}} } }, "size": SIZE_LIMIT } query = forms.FormES()\ .filter(filters.domain("zombocom"))\ .xmlns('banana') self.checkQuery(query, json_output) class TestESQuerySet(TestCase): example_response = { u'_shards': {u'failed': 0, u'successful': 5, u'total': 5}, u'hits': {u'hits': [ { u'_id': u'8063dff5-460b-46f2-b4d0-5871abfd97d4', u'_index': u'xforms_1cce1f049a1b4d864c9c25dc42648a45', u'_score': 1.0, u'_type': u'xform', u'fields': { u'app_id': u'fe8481a39c3738749e6a4766fca99efd', u'doc_type': u'xforminstance', u'domain': u'mikesproject', u'xmlns': u'http://openrosa.org/formdesigner/3a7cc07c-551c-4651-ab1a-d60be3017485' } }, { u'_id': u'dc1376cd-0869-4c13-a267-365dfc2fa754', u'_index': u'xforms_1cce1f049a1b4d864c9c25dc42648a45', u'_score': 1.0, u'_type': u'xform', u'fields': { u'app_id': u'3d622620ca00d7709625220751a7b1f9', u'doc_type': u'xforminstance', u'domain': u'mikesproject', u'xmlns': u'http://openrosa.org/formdesigner/54db1962-b938-4e2b-b00e-08414163ead4' } } ], u'max_score': 1.0, u'total': 5247 }, u'timed_out': False, u'took': 4 } example_error = {u'error': u'IndexMissingException[[xforms_123jlajlaf] missing]', u'status': 404} def test_response(self): hits = [ { u'app_id': u'fe8481a39c3738749e6a4766fca99efd', u'doc_type': u'xforminstance', u'domain': u'mikesproject', u'xmlns': u'http://openrosa.org/formdesigner/3a7cc07c-551c-4651-ab1a-d60be3017485' }, { u'app_id': u'3d622620ca00d7709625220751a7b1f9', u'doc_type': u'xforminstance', u'domain': u'mikesproject', u'xmlns': u'http://openrosa.org/formdesigner/54db1962-b938-4e2b-b00e-08414163ead4' } ] fields = [u'app_id', u'doc_type', u'domain', u'xmlns'] response = ESQuerySet( self.example_response, HQESQuery('forms').fields(fields) ) self.assertEquals(response.total, 5247) self.assertEquals(response.hits, hits) def test_error(self): with self.assertRaises(ESError): ESQuerySet(self.example_error, HQESQuery('forms')) class TestESFacet(ElasticTestMixin, TestCase): def test_terms_facet(self): json_output = { "query": { "filtered": { "filter": { "and": [ {"match_all": {}} ] }, "query": {"match_all": {}} } }, "facets": { "babies_saved": { "terms": { "field": "babies.count", "size": 10, "shard_size": SIZE_LIMIT, } } }, "size": SIZE_LIMIT, } query = HQESQuery('forms')\ .terms_facet('babies.count', 'babies_saved', size=10) self.checkQuery(query, json_output) def test_facet_response(self): example_response = { "hits": {}, "shards": {}, "facets": { "user": { "_type": "terms", "missing": 0, "total": 3406, "other": 619, "terms": [ { "term": "92647b9eafd9ea5ace2d1470114dbddd", "count": 579 }, { "term": "df5123010b24fc35260a84547148de93", "count": 310 }, { "term": "df5123010b24fc35260a84547148d47e", "count": 303 }, { "term": "7334d1ab1cd8847c69fba75043ed43d3", "count": 298 } ] } } } expected_output = { "92647b9eafd9ea5ace2d1470114dbddd": 579, "df5123010b24fc35260a84547148de93": 310, "df5123010b24fc35260a84547148d47e": 303, "7334d1ab1cd8847c69fba75043ed43d3": 298, } query = HQESQuery('forms')\ .terms_facet('form.meta.userID', 'user', size=10) res = ESQuerySet(example_response, query) output = res.facets.user.counts_by_term() self.assertEqual(output, expected_output) def test_bad_facet_name(self): with self.assertRaises(AssertionError): HQESQuery('forms')\ .terms_facet('form.meta.userID', 'form.meta.userID', size=10) def test_query(self): json_output = { "query": { "filtered": { "filter": { "and": [ {"match_all": {}} ] }, "query": {"fancy_query": {"foo": "bar"}} } }, "size": SIZE_LIMIT } query = HQESQuery('forms').set_query({"fancy_query": {"foo": "bar"}}) self.checkQuery(query, json_output)
bsd-3-clause
lesserwhirls/scipy-cwt
scipy/sparse/linalg/eigen/lobpcg/tests/benchmark.py
11
1437
from scipy import * from scipy.sparse.linalg import lobpcg from symeig import symeig from pylab import plot, show, legend, xlabel, ylabel set_printoptions(precision=3,linewidth=90) import time def test(n): x = arange(1,n+1) B = diag(1./x) y = arange(n-1,0,-1) z = arange(2*n-1,0,-2) A = diag(z)-diag(y,-1)-diag(y,1) return A,B def as2d( ar ): if ar.ndim == 2: return ar else: # Assume 1! aux = nm.array( ar, copy = False ) aux.shape = (ar.shape[0], 1) return aux def precond(x): y= linalg.cho_solve((LorU, lower),x) return as2d(y) m = 10 # Blocksize N = array(([128,256,512,1024,2048])) # Increasing matrix size data1=[] data2=[] for n in N: print '******', n A,B = test(n) # Mikota pair X = rand(n,m) X = linalg.orth(X) tt = time.clock() (LorU, lower) = linalg.cho_factor(A, lower=0, overwrite_a=0) eigs,vecs = lobpcg.lobpcg(X,A,B,operatorT = precond, residualTolerance = 1e-4, maxIterations = 40) data1.append(time.clock()-tt) eigs = sort(eigs) print print 'Results by LOBPCG' print print n,eigs tt = time.clock() w,v=symeig(A,B,range=(1,m)) data2.append(time.clock()-tt) print print 'Results by symeig' print print n, w xlabel(r'Size $n$') ylabel(r'Elapsed time $t$') plot(N,data1,label='LOBPCG') plot(N,data2,label='SYMEIG') legend() show()
bsd-3-clause
wwj718/ANALYSE
common/lib/xmodule/xmodule/modulestore/tests/sample_courses.py
29
9611
# encoding: utf-8 """ The data type and use of it for declaratively creating test courses. """ # used to create course subtrees in ModuleStoreTestCase.create_test_course # adds to self properties w/ the given block_id which hold the UsageKey for easy retrieval. # fields is a dictionary of keys and values. sub_tree is a collection of BlockInfo from collections import namedtuple import datetime BlockInfo = namedtuple('BlockInfo', 'block_id, category, fields, sub_tree') # pylint: disable=invalid-name default_block_info_tree = [ # pylint: disable=invalid-name BlockInfo( 'chapter_x', 'chapter', {}, [ BlockInfo( 'sequential_x1', 'sequential', {}, [ BlockInfo( 'vertical_x1a', 'vertical', {}, [ BlockInfo('problem_x1a_1', 'problem', {}, []), BlockInfo('problem_x1a_2', 'problem', {}, []), BlockInfo('problem_x1a_3', 'problem', {}, []), BlockInfo('html_x1a_1', 'html', {}, []), ] ) ] ) ] ), BlockInfo( 'chapter_y', 'chapter', {}, [ BlockInfo( 'sequential_y1', 'sequential', {}, [ BlockInfo( 'vertical_y1a', 'vertical', {}, [ BlockInfo('problem_y1a_1', 'problem', {}, []), BlockInfo('problem_y1a_2', 'problem', {}, []), BlockInfo('problem_y1a_3', 'problem', {}, []), ] ) ] ) ] ) ] # equivalent to toy course in xml TOY_BLOCK_INFO_TREE = [ BlockInfo( 'Overview', "chapter", {"display_name": "Overview"}, [ BlockInfo( "Toy_Videos", "videosequence", { "xml_attributes": {"filename": ["", None]}, "display_name": "Toy Videos", "format": "Lecture Sequence" }, [ BlockInfo( "secret:toylab", "html", { "data": "<b>Lab 2A: Superposition Experiment</b>\n\n<<<<<<< Updated upstream\n<p>Isn't the toy course great?</p>\n\n<p>Let's add some markup that uses non-ascii characters.\nFor example, we should be able to write words like encyclop&aelig;dia, or foreign words like fran&ccedil;ais.\nLooking beyond latin-1, we should handle math symbols: &pi;r&sup2 &le; &#8734.\nAnd it shouldn't matter if we use entities or numeric codes &mdash; &Omega; &ne; &pi; &equiv; &#937; &#8800; &#960;.\n</p>\n=======\n<p>Isn't the toy course great? — &le;</p>\n>>>>>>> Stashed changes\n", "xml_attributes": {"filename": ["html/secret/toylab.xml", "html/secret/toylab.xml"]}, "display_name": "Toy lab" }, [] ), BlockInfo( "toyjumpto", "html", { "data": "<a href=\"/jump_to_id/vertical_test\">This is a link to another page and some Chinese 四節比分和七年前</a> <p>Some more Chinese 四節比分和七年前</p>\n", "xml_attributes": {"filename": ["html/toyjumpto.xml", "html/toyjumpto.xml"]} }, []), BlockInfo( "toyhtml", "html", { "data": "<a href='/static/handouts/sample_handout.txt'>Sample</a>", "xml_attributes": {"filename": ["html/toyhtml.xml", "html/toyhtml.xml"]} }, []), BlockInfo( "nonportable", "html", { "data": "<a href=\"/static/foo.jpg\">link</a>\n", "xml_attributes": {"filename": ["html/nonportable.xml", "html/nonportable.xml"]} }, []), BlockInfo( "nonportable_link", "html", { "data": "<a href=\"/jump_to_id/nonportable_link\">link</a>\n\n", "xml_attributes": {"filename": ["html/nonportable_link.xml", "html/nonportable_link.xml"]} }, []), BlockInfo( "badlink", "html", { "data": "<img src=\"/static//file.jpg\" />\n", "xml_attributes": {"filename": ["html/badlink.xml", "html/badlink.xml"]} }, []), BlockInfo( "with_styling", "html", { "data": "<p style=\"font:italic bold 72px/30px Georgia, serif; color: red; \">Red text here</p>", "xml_attributes": {"filename": ["html/with_styling.xml", "html/with_styling.xml"]} }, []), BlockInfo( "just_img", "html", { "data": "<img src=\"/static/foo_bar.jpg\" />", "xml_attributes": {"filename": ["html/just_img.xml", "html/just_img.xml"]} }, []), BlockInfo( "Video_Resources", "video", { "youtube_id_1_0": "1bK-WdDi6Qw", "display_name": "Video Resources" }, []), ]), BlockInfo( "Welcome", "video", {"data": "", "youtube_id_1_0": "p2Q6BrNhdh8", "display_name": "Welcome"}, [] ), BlockInfo( "video_123456789012", "video", {"data": "", "youtube_id_1_0": "p2Q6BrNhdh8", "display_name": "Test Video"}, [] ), BlockInfo( "video_4f66f493ac8f", "video", {"youtube_id_1_0": "p2Q6BrNhdh8"}, [] ) ] ), BlockInfo( "secret:magic", "chapter", { "xml_attributes": {"filename": ["chapter/secret/magic.xml", "chapter/secret/magic.xml"]} }, [ BlockInfo( "toyvideo", "video", {"youtube_id_1_0": "OEoXaMPEzfMA", "display_name": "toyvideo"}, [] ) ] ), BlockInfo( "poll_test", "chapter", {}, [ BlockInfo( "T1_changemind_poll_foo", "poll_question", { "question": "<p>Have you changed your mind? ’</p>", "answers": [{"text": "Yes", "id": "yes"}, {"text": "No", "id": "no"}], "xml_attributes": {"reset": "false", "filename": ["", None]}, "display_name": "Change your answer" }, [])] ), BlockInfo( "vertical_container", "chapter", { "xml_attributes": {"filename": ["chapter/vertical_container.xml", "chapter/vertical_container.xml"]} }, [ BlockInfo("vertical_sequential", "sequential", {}, [ BlockInfo("vertical_test", "vertical", { "xml_attributes": {"filename": ["vertical/vertical_test.xml", "vertical_test"]} }, [ BlockInfo( "sample_video", "video", { "youtube_id_1_25": "AKqURZnYqpk", "youtube_id_0_75": "JMD_ifUUfsU", "youtube_id_1_0": "OEoXaMPEzfM", "display_name": "default", "youtube_id_1_5": "DYpADpL7jAY" }, []), BlockInfo( "separate_file_video", "video", { "youtube_id_1_25": "AKqURZnYqpk", "youtube_id_0_75": "JMD_ifUUfsU", "youtube_id_1_0": "OEoXaMPEzfM", "display_name": "default", "youtube_id_1_5": "DYpADpL7jAY" }, []), BlockInfo( "video_with_end_time", "video", { "youtube_id_1_25": "AKqURZnYqpk", "display_name": "default", "youtube_id_1_0": "OEoXaMPEzfM", "end_time": datetime.timedelta(seconds=10), "youtube_id_1_5": "DYpADpL7jAY", "youtube_id_0_75": "JMD_ifUUfsU" }, []), BlockInfo( "T1_changemind_poll_foo_2", "poll_question", { "question": "<p>Have you changed your mind?</p>", "answers": [{"text": "Yes", "id": "yes"}, {"text": "No", "id": "no"}], "xml_attributes": {"reset": "false", "filename": ["", None]}, "display_name": "Change your answer" }, []), ]), BlockInfo("unicode", "html", { "data": "…", "xml_attributes": {"filename": ["", None]} }, []) ]), ] ), BlockInfo( "handout_container", "chapter", { "xml_attributes": {"filename": ["chapter/handout_container.xml", "chapter/handout_container.xml"]} }, [ BlockInfo( "html_7e5578f25f79", "html", { "data": "<a href=\"/static/handouts/sample_handout.txt\"> handouts</a>", "xml_attributes": {"filename": ["", None]} }, [] ), ] ) ]
agpl-3.0
CMDann/filesync-server
src/backends/db/schemas/fsync_shard/patch_11.py
6
1270
# Copyright 2008-2015 Canonical # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # For further info, check http://launchpad.net/filesync-server """Add ObjectsToDelete table for the dead nodes cleanup scripts. This table will be used until we can use temporary tables via pgbouncer or directly talking to the shards. """ SQL = ["CREATE TABLE ObjectsToDelete (id uuid, content_hash BYTEA)", "CREATE INDEX objectstodelete_idx ON ObjectsToDelete(id)", "GRANT SELECT,INSERT,DELETE,UPDATE,TRUNCATE ON TABLE ObjectsToDelete TO" " storage, webapp;"] def apply(store): """Apply the patch.""" for sql in SQL: store.execute(sql)
agpl-3.0
betterlife/psi
tests/utils/format_util_test.py
2
1502
# coding=utf-8 import unittest from psi.app.utils import format_util class TestFormatUtil(unittest.TestCase): def test_decimal_to_percent(self): self.assertEqual("33.33%", format_util.decimal_to_percent(0.3333)) self.assertEqual("200.00%", format_util.decimal_to_percent(2)) self.assertEqual("0.01%", format_util.decimal_to_percent(0.0001)) def test_format_decimal(self): self.assertEqual('20.00', str(format_util.format_decimal(20.0005))) self.assertEqual('0.01', str(format_util.format_decimal(0.009))) self.assertEquals("2.25", str(format_util.format_decimal(2.249))) self.assertEquals("2.24", str(format_util.format_decimal(2.244))) def test_pinyin_first_letter(self): input = u'张三' self.assertEquals('zs', format_util.get_pinyin_first_letters(input)) def test_pinyin_first_letter_multiple(self): input = u'朝小宇' self.assertEquals('cxy|zxy', format_util.get_pinyin_first_letters(input)) def test_pinyin_last_letter_multiple(self): input = u'毛小调' self.assertEquals('mxd|mxt', format_util.get_pinyin_first_letters(input)) def test_pinyin_middle_last_letter_multiple(self): input = u'毛朝调' self.assertEquals('mcd|mzd|mct|mzt', format_util.get_pinyin_first_letters(input)) def get_pinyin_for_english(self): input = u'David Allen' self.assertEquals('David Allen', format_util.get_pinyin_first_letters(input))
mit