repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
aarondewindt/paparazzi_torrap
sw/ground_segment/python/udp_link/datalink_to_w5100.py
89
1441
#!/usr/bin/python import os import sys import socket import struct from optparse import OptionParser sys.path.append(os.getenv("PAPARAZZI_HOME") + "/sw/lib/python") parser = OptionParser() parser.add_option("-d", "--destip", dest="dest_addr", help="Destination IP for messages picked up from local socket", default="192.168.25.47") parser.add_option("-p", "--destport", dest="dest_port", default=1234, help="Destination UDP port to send messages to") parser.add_option("-l", "--localport", dest="local_port", default=4243, help="Local port to listen to for UDP messages") (options, args) = parser.parse_args() msock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) msock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) msock.bind(("", int(options.local_port))) # mreq = struct.pack("4sl", socket.inet_aton(telemip), socket.INADDR_ANY) # msock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) # initialize a socket, think of it as a cable # SOCK_DGRAM specifies that this is UDP destsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0) while( 1 ): data = None try: data, addr = msock.recvfrom(1024) format = 'B' * (len(data)) strdata = struct.unpack( format, data ) print len( strdata ), ":", strdata # send the command destsock.sendto( data, (options.dest_addr, options.dest_port) ) except socket.error, e: print 'Exception', e
gpl-2.0
52ai/django-ccsds
tests/template_tests/filter_tests/test_cut.py
521
2269
from django.template.defaultfilters import cut from django.test import SimpleTestCase from django.utils.safestring import mark_safe from ..utils import setup class CutTests(SimpleTestCase): @setup({'cut01': '{% autoescape off %}{{ a|cut:"x" }} {{ b|cut:"x" }}{% endautoescape %}'}) def test_cut01(self): output = self.engine.render_to_string('cut01', {"a": "x&y", "b": mark_safe("x&y")}) self.assertEqual(output, "&y &y") @setup({'cut02': '{{ a|cut:"x" }} {{ b|cut:"x" }}'}) def test_cut02(self): output = self.engine.render_to_string('cut02', {"a": "x&y", "b": mark_safe("x&y")}) self.assertEqual(output, "&y &y") @setup({'cut03': '{% autoescape off %}{{ a|cut:"&" }} {{ b|cut:"&" }}{% endautoescape %}'}) def test_cut03(self): output = self.engine.render_to_string('cut03', {"a": "x&y", "b": mark_safe("x&y")}) self.assertEqual(output, "xy xamp;y") @setup({'cut04': '{{ a|cut:"&" }} {{ b|cut:"&" }}'}) def test_cut04(self): output = self.engine.render_to_string('cut04', {"a": "x&y", "b": mark_safe("x&y")}) self.assertEqual(output, "xy xamp;y") # Passing ';' to cut can break existing HTML entities, so those strings # are auto-escaped. @setup({'cut05': '{% autoescape off %}{{ a|cut:";" }} {{ b|cut:";" }}{% endautoescape %}'}) def test_cut05(self): output = self.engine.render_to_string('cut05', {"a": "x&y", "b": mark_safe("x&y")}) self.assertEqual(output, "x&y x&ampy") @setup({'cut06': '{{ a|cut:";" }} {{ b|cut:";" }}'}) def test_cut06(self): output = self.engine.render_to_string('cut06', {"a": "x&y", "b": mark_safe("x&y")}) self.assertEqual(output, "x&y x&ampy") class FunctionTests(SimpleTestCase): def test_character(self): self.assertEqual(cut('a string to be mangled', 'a'), ' string to be mngled') def test_characters(self): self.assertEqual(cut('a string to be mangled', 'ng'), 'a stri to be maled') def test_non_matching_string(self): self.assertEqual(cut('a string to be mangled', 'strings'), 'a string to be mangled') def test_non_string_input(self): self.assertEqual(cut(123, '2'), '13')
bsd-3-clause
scaramallion/pynetdicom
pynetdicom/tests/encoded_dimse_n_msg.py
2
15444
"""Encoding DIMSE-N messages for use in testing.""" n_er_rq_cmd = ( # Message control header byte: command set, last fragment b'\x03' # Command Group Length, length 4, value 110 b'\x00\x00\x00\x00\x04\x00\x00\x00\x6e\x00\x00\x00' # Affected SOP Class UID, length 26, value 1.2.840.10008.5.1.4.1.1.2 b'\x00\x00\x02\x00\x1a\x00\x00\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' b'\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x34\x2e\x31\x2e\x31\x2e\x32\x00' # Command Field | length 2 | value 0x0100 b'\x00\x00\x00\x01\x02\x00\x00\x00\x00\x01' # Message ID | length 2 | value 7 b'\x00\x00\x10\x01\x02\x00\x00\x00\x07\x00' # Command Data Set Type, length 2 | value 0x0001 b'\x00\x00\x00\x08\x02\x00\x00\x00\x01\x00' # Affected SOP Instance UID, length 28, value 1.2.392.200036.9116.2.6.1.48 b'\x00\x00\x00\x10\x1c\x00\x00\x00\x31\x2e\x32\x2e\x33\x39\x32\x2e\x32' b'\x30\x30\x30\x33\x36\x2e\x39\x31\x31\x36\x2e\x32\x2e\x36\x2e\x31\x2e' b'\x34\x38' # Event Type ID | length 2 | value 2 b'\x00\x00\x02\x10\x02\x00\x00\x00\x02\x00' ) n_er_rq_ds = ( # Message control header byte: dataset, last fragment b'\x02' # (0010, 0020) PatientName = "Tube HeNe" b'\x10\x00\x10\x00\x0a\x00\x00\x00\x54\x75\x62\x65\x20\x48\x65\x4e\x65' b'\x20' # (0010, 0010) PatientID = 'Test1101' b'\x10\x00\x20\x00\x08\x00\x00\x00\x54\x65\x73\x74\x31\x31\x30\x31' ) n_er_rsp_cmd = ( b'\x03' # Command Group Length, length 4, value 76 b'\x00\x00\x00\x00\x04\x00\x00\x00\x56\x00\x00\x00' # Affected SOP Class UID, length 8, value 1.2.4.10 b'\x00\x00\x02\x00\x08\x00\x00\x00\x31\x2e\x32\x2e\x34\x2e\x31\x30' # Command Field | length 2 | value 0x8100 b'\x00\x00\x00\x01\x02\x00\x00\x00\x00\x81' # Message ID Being Responded To, length 2 | value 5 b'\x00\x00\x20\x01\x02\x00\x00\x00\x05\x00' # Command Data Set Type, length 2 | value 0x0001 b'\x00\x00\x00\x08\x02\x00\x00\x00\x01\x00' # Status | length 2 | value 0x0000 b'\x00\x00\x00\x09\x02\x00\x00\x00\x00\x00' # Affected SOP Instance UID, length 12, value 1.2.4.5.7.8 b'\x00\x00\x00\x10\x0c\x00\x00\x00\x31\x2e\x32\x2e\x34\x2e\x35\x2e\x37' b'\x2e\x38\x00' # Event Type ID | length 2 | value 2 b'\x00\x00\x02\x10\x02\x00\x00\x00\x02\x00' ) n_er_rsp_ds = ( # Message control header byte: dataset, last fragment b'\x02' # (0010, 0020) PatientName = "Tube HeNe" b'\x10\x00\x10\x00\x0a\x00\x00\x00\x54\x75\x62\x65\x20\x48\x65\x4e\x65' b'\x20' # (0010, 0010) PatientID = 'Test1101' b'\x10\x00\x20\x00\x08\x00\x00\x00\x54\x65\x73\x74\x31\x31\x30\x31' ) n_get_rq_cmd = ( # Message control header byte: command set, last fragment b'\x03' # Command Group Length, length 4, value 120 b'\x00\x00\x00\x00\x04\x00\x00\x00\x78\x00\x00\x00' # Requested SOP Class UID, length 26, value 1.2.840.10008.5.1.4.1.1.2 b'\x00\x00\x03\x00\x1a\x00\x00\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' b'\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x34\x2e\x31\x2e\x31\x2e\x32\x00' # Command Field | length 2 | value 0x0110 b'\x00\x00\x00\x01\x02\x00\x00\x00\x10\x01' # Message ID | length 2 | value 7 b'\x00\x00\x10\x01\x02\x00\x00\x00\x07\x00' # Command Data Set Type, length 2 | value 0x0101 b'\x00\x00\x00\x08\x02\x00\x00\x00\x01\x01' # Requested SOP Instance UID, length 28, value 1.2.392.200036.9116.2.6.1.48 b'\x00\x00\x01\x10\x1c\x00\x00\x00\x31\x2e\x32\x2e\x33\x39\x32\x2e\x32' b'\x30\x30\x30\x33\x36\x2e\x39\x31\x31\x36\x2e\x32\x2e\x36\x2e\x31\x2e' b'\x34\x38' # Attribute Identifier List, length 12, value 0x7fe0 0x0010 0x0000 0x0000 0xffff 0xffff b'\x00\x00\x05\x10\x0c\x00\x00\x00\xe0\x7f\x10\x00\x00\x00\x00\x00' b'\xff\xff\xff\xff' ) n_get_rsp_cmd = ( b'\x03' # Command Group Length, length 4, value 76 b'\x00\x00\x00\x00\x04\x00\x00\x00\x4c\x00\x00\x00' # Affected SOP Class UID, length 8, value 1.2.4.10 b'\x00\x00\x02\x00\x08\x00\x00\x00\x31\x2e\x32\x2e\x34\x2e\x31\x30' # Command Field | length 2 | value 0x8110 b'\x00\x00\x00\x01\x02\x00\x00\x00\x10\x81' # Message ID Being Responded To, length 2 | value 5 b'\x00\x00\x20\x01\x02\x00\x00\x00\x05\x00' # Command Data Set Type, length 2 | value 0x0001 b'\x00\x00\x00\x08\x02\x00\x00\x00\x01\x00' # Status | length 2 | value 0x0000 b'\x00\x00\x00\x09\x02\x00\x00\x00\x00\x00' # Affected SOP Instance UID, length 12, value 1.2.4.5.7.8 b'\x00\x00\x00\x10\x0c\x00\x00\x00\x31\x2e\x32\x2e\x34\x2e\x35\x2e\x37' b'\x2e\x38\x00' ) n_get_rsp_ds = ( # Message control header byte: dataset, last fragment b'\x02' # (0010, 0020) PatientName = "Tube HeNe" b'\x10\x00\x10\x00\x0a\x00\x00\x00\x54\x75\x62\x65\x20\x48\x65\x4e\x65' b'\x20' # (0010, 0010) PatientID = 'Test1101' b'\x10\x00\x20\x00\x08\x00\x00\x00\x54\x65\x73\x74\x31\x31\x30\x31' ) n_delete_rq_cmd = ( # Message control header byte: command set, last fragment b'\x03' # Command Group Length, length 4, value 58 b'\x00\x00\x00\x00\x04\x00\x00\x00\x3a\x00\x00\x00' # Requested SOP Class UID, length 6, value 1.2.3 b'\x00\x00\x03\x00\x06\x00\x00\x00\x31\x2e\x32\x2e\x33\x00' # Command Field | length 2 | value 0x0150 b'\x00\x00\x00\x01\x02\x00\x00\x00\x50\x01' # Message ID | length 2 | value 7 b'\x00\x00\x10\x01\x02\x00\x00\x00\x07\x00' # Command Data Set Type, length 2 | value 0x0101 b'\x00\x00\x00\x08\x02\x00\x00\x00\x01\x01' # Requested SOP Instance UID, length 6, value 1.2.30 b'\x00\x00\x01\x10\x06\x00\x00\x00\x31\x2e\x32\x2e\x33\x30' ) n_delete_rsp_cmd = ( b'\x03' # Command Group Length, length 4, value 76 b'\x00\x00\x00\x00\x04\x00\x00\x00\x4c\x00\x00\x00' # Affected SOP Class UID, length 8, value 1.2.4.10 b'\x00\x00\x02\x00\x08\x00\x00\x00\x31\x2e\x32\x2e\x34\x2e\x31\x30' # Command Field | length 2 | value 0x8150 b'\x00\x00\x00\x01\x02\x00\x00\x00\x50\x81' # Message ID Being Responded To, length 2 | value 5 b'\x00\x00\x20\x01\x02\x00\x00\x00\x05\x00' # Command Data Set Type, length 2 | value 0x0101 b'\x00\x00\x00\x08\x02\x00\x00\x00\x01\x01' # Status | length 2 | value 0xC201 b'\x00\x00\x00\x09\x02\x00\x00\x00\x01\xC2' # Affected SOP Instance UID, length 12, value 1.2.4.5.7.8 b'\x00\x00\x00\x10\x0c\x00\x00\x00\x31\x2e\x32\x2e\x34\x2e\x35\x2e\x37' b'\x2e\x38\x00' ) n_set_rq_cmd = ( b'\x03' # Command Group Length, length 4, value 120 b'\x00\x00\x00\x00\x04\x00\x00\x00\x64\x00\x00\x00' # Requested SOP Class UID, length 26, value 1.2.840.10008.5.1.4.1.1.2 b'\x00\x00\x03\x00\x1a\x00\x00\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' b'\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x34\x2e\x31\x2e\x31\x2e\x32\x00' # Command Field | length 2 | value 0x0120 b'\x00\x00\x00\x01\x02\x00\x00\x00\x20\x01' # Message ID | length 2 | value 7 b'\x00\x00\x10\x01\x02\x00\x00\x00\x07\x00' # Command Data Set Type, length 2 | value 0x0001 b'\x00\x00\x00\x08\x02\x00\x00\x00\x01\x00' # Requested SOP Instance UID, length 28, value 1.2.392.200036.9116.2.6.1.48 b'\x00\x00\x01\x10\x1c\x00\x00\x00\x31\x2e\x32\x2e\x33\x39\x32\x2e\x32' b'\x30\x30\x30\x33\x36\x2e\x39\x31\x31\x36\x2e\x32\x2e\x36\x2e\x31\x2e' b'\x34\x38' ) n_set_rq_cmd_empty = ( b'\x03' # Command Group Length, length 4, value 46 b'\x00\x00\x00\x00\x04\x00\x00\x00\x2e\x00\x00\x00' # Requested SOP Class UID, length 0 b'\x00\x00\x03\x00\x00\x00\x00\x00' # Command Field | length 2 | value 0x0120 b'\x00\x00\x00\x01\x02\x00\x00\x00\x20\x01' # Message ID | length 2 | value 7 b'\x00\x00\x10\x01\x02\x00\x00\x00\x07\x00' # Command Data Set Type, length 2 | value 0x0101 b'\x00\x00\x00\x08\x02\x00\x00\x00\x01\x01' # Requested SOP Instance UID, length 0 b'\x00\x00\x01\x10\x00\x00\x00\x00' ) n_set_rq_ds = ( # Message control header byte: dataset, last fragment b'\x02' # (0010, 0020) PatientName = "Tube HeNe" b'\x10\x00\x10\x00\x0a\x00\x00\x00\x54\x75\x62\x65\x20\x48\x65\x4e\x65' b'\x20' # (0010, 0010) PatientID = 'Test1101' b'\x10\x00\x20\x00\x08\x00\x00\x00\x54\x65\x73\x74\x31\x31\x30\x31' ) n_set_rsp_cmd = ( b'\x03' # Command Group Length, length 4, value 76 b'\x00\x00\x00\x00\x04\x00\x00\x00\x4c\x00\x00\x00' # Affected SOP Class UID, length 8, value 1.2.4.10 b'\x00\x00\x02\x00\x08\x00\x00\x00\x31\x2e\x32\x2e\x34\x2e\x31\x30' # Command Field | length 2 | value 0x8120 b'\x00\x00\x00\x01\x02\x00\x00\x00\x20\x81' # Message ID Being Responded To, length 2 | value 5 b'\x00\x00\x20\x01\x02\x00\x00\x00\x05\x00' # Command Data Set Type, length 2 | value 0x0001 b'\x00\x00\x00\x08\x02\x00\x00\x00\x01\x00' # Status | length 2 | value 0x0000 b'\x00\x00\x00\x09\x02\x00\x00\x00\x00\x00' # Affected SOP Instance UID, length 12, value 1.2.4.5.7.8 b'\x00\x00\x00\x10\x0c\x00\x00\x00\x31\x2e\x32\x2e\x34\x2e\x35\x2e\x37' b'\x2e\x38\x00' ) n_set_rsp_ds = ( # Message control header byte: dataset, last fragment b'\x02' # (0010, 0020) PatientName = "Tube HeNe" b'\x10\x00\x10\x00\x0a\x00\x00\x00\x54\x75\x62\x65\x20\x48\x65\x4e\x65' b'\x20' # (0010, 0010) PatientID = 'Test1101' b'\x10\x00\x20\x00\x08\x00\x00\x00\x54\x65\x73\x74\x31\x31\x30\x31' ) n_action_rq_cmd = ( b'\x03' # Command Group Length, length 4, value 110 b'\x00\x00\x00\x00\x04\x00\x00\x00\x6e\x00\x00\x00' # Requested SOP Class UID, length 26, value 1.2.840.10008.5.1.4.1.1.2 b'\x00\x00\x03\x00\x1a\x00\x00\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' b'\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x34\x2e\x31\x2e\x31\x2e\x32\x00' # Command Field | length 2 | value 0x0130 b'\x00\x00\x00\x01\x02\x00\x00\x00\x30\x01' # Message ID | length 2 | value 7 b'\x00\x00\x10\x01\x02\x00\x00\x00\x07\x00' # Command Data Set Type, length 2 | value 0x0001 b'\x00\x00\x00\x08\x02\x00\x00\x00\x01\x00' # Requested SOP Instance UID, length 28, value 1.2.392.200036.9116.2.6.1.48 b'\x00\x00\x01\x10\x1c\x00\x00\x00\x31\x2e\x32\x2e\x33\x39\x32\x2e\x32' b'\x30\x30\x30\x33\x36\x2e\x39\x31\x31\x36\x2e\x32\x2e\x36\x2e\x31\x2e' b'\x34\x38' # Action Type ID | length 2 | value 1 b'\x00\x00\x08\x10\x02\x00\x00\x00\x01\x00' ) n_action_rq_ds = ( # Message control header byte: dataset, last fragment b'\x02' # (0010, 0020) PatientName = "Tube HeNe" b'\x10\x00\x10\x00\x0a\x00\x00\x00\x54\x75\x62\x65\x20\x48\x65\x4e\x65' b'\x20' # (0010, 0010) PatientID = 'Test1101' b'\x10\x00\x20\x00\x08\x00\x00\x00\x54\x65\x73\x74\x31\x31\x30\x31' ) n_action_rsp_cmd = ( b'\x03' # Command Group Length, length 4, value 86 b'\x00\x00\x00\x00\x04\x00\x00\x00\x56\x00\x00\x00' # Affected SOP Class UID, length 8, value 1.2.4.10 b'\x00\x00\x02\x00\x08\x00\x00\x00\x31\x2e\x32\x2e\x34\x2e\x31\x30' # Command Field | length 2 | value 0x8130 b'\x00\x00\x00\x01\x02\x00\x00\x00\x30\x81' # Message ID Being Responded To, length 2 | value 5 b'\x00\x00\x20\x01\x02\x00\x00\x00\x05\x00' # Command Data Set Type, length 2 | value 0x0001 b'\x00\x00\x00\x08\x02\x00\x00\x00\x01\x00' # Status | length 2 | value 0x0000 b'\x00\x00\x00\x09\x02\x00\x00\x00\x00\x00' # Affected SOP Instance UID, length 12, value 1.2.4.5.7.8 b'\x00\x00\x00\x10\x0c\x00\x00\x00\x31\x2e\x32\x2e\x34\x2e\x35\x2e\x37' b'\x2e\x38\x00' # Action Type ID | length 2 | value 1 b'\x00\x00\x08\x10\x02\x00\x00\x00\x01\x00' ) n_action_rsp_ds = ( # Message control header byte: dataset, last fragment b'\x02' # (0010, 0020) PatientName = "Tube HeNe" b'\x10\x00\x10\x00\x0a\x00\x00\x00\x54\x75\x62\x65\x20\x48\x65\x4e\x65' b'\x20' # (0010, 0010) PatientID = 'Test1101' b'\x10\x00\x20\x00\x08\x00\x00\x00\x54\x65\x73\x74\x31\x31\x30\x31' ) n_create_rq_cmd = ( # Message control header byte: command set, last fragment b'\x03' # Command Group Length, length 4, value 100 b'\x00\x00\x00\x00\x04\x00\x00\x00\x64\x00\x00\x00' # Affected SOP Class UID, length 26, value 1.2.840.10008.5.1.4.1.1.2 b'\x00\x00\x02\x00\x1a\x00\x00\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' b'\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x34\x2e\x31\x2e\x31\x2e\x32\x00' # Command Field | length 2 | value 0x0140 b'\x00\x00\x00\x01\x02\x00\x00\x00\x40\x01' # Message ID | length 2 | value 7 b'\x00\x00\x10\x01\x02\x00\x00\x00\x07\x00' # Command Data Set Type, length 2 | value 0x0001 b'\x00\x00\x00\x08\x02\x00\x00\x00\x01\x00' # Affected SOP Instance UID, length 28, value 1.2.392.200036.9116.2.6.1.48 b'\x00\x00\x00\x10\x1c\x00\x00\x00\x31\x2e\x32\x2e\x33\x39\x32\x2e\x32' b'\x30\x30\x30\x33\x36\x2e\x39\x31\x31\x36\x2e\x32\x2e\x36\x2e\x31\x2e' b'\x34\x38' ) n_create_rq_cmd_empty = ( # Message control header byte: command set, last fragment b'\x03' # Command Group Length, length 4, value b'\x00\x00\x00\x00\x04\x00\x00\x00\x36\x00\x00\x00' # Affected SOP Class UID, length 8, value 1.2.3.4 b'\x00\x00\x02\x00\x08\x00\x00\x00\x31\x2e\x32\x2e\x33\x2e\x34\x00' # Command Field | length 2 | value 0x0140 b'\x00\x00\x00\x01\x02\x00\x00\x00\x40\x01' # Message ID | length 2 | value 7 b'\x00\x00\x10\x01\x02\x00\x00\x00\x07\x00' # Command Data Set Type, length 2 | value 0x0101 b'\x00\x00\x00\x08\x02\x00\x00\x00\x01\x01' # Affected SOP Instance UID, length 0 b'\x00\x00\x00\x10\x00\x00\x00\x00' ) n_create_rq_ds = ( # Message control header byte: dataset, last fragment b'\x02' # (0010, 0020) PatientName = "Tube HeNe" b'\x10\x00\x10\x00\x0a\x00\x00\x00\x54\x75\x62\x65\x20\x48\x65\x4e\x65' b'\x20' # (0010, 0010) PatientID = 'Test1101' b'\x10\x00\x20\x00\x08\x00\x00\x00\x54\x65\x73\x74\x31\x31\x30\x31' ) n_create_rsp_cmd = ( b'\x03' # Command Group Length, length 4, value 66 b'\x00\x00\x00\x00\x04\x00\x00\x00\x4c\x00\x00\x00' # Affected SOP Class UID, length 8, value 1.2.4.10 b'\x00\x00\x02\x00\x08\x00\x00\x00\x31\x2e\x32\x2e\x34\x2e\x31\x30' # Command Field | length 2 | value 0x8140 b'\x00\x00\x00\x01\x02\x00\x00\x00\x40\x81' # Message ID Being Responded To, length 2 | value 5 b'\x00\x00\x20\x01\x02\x00\x00\x00\x05\x00' # Command Data Set Type, length 2 | value 0x0001 b'\x00\x00\x00\x08\x02\x00\x00\x00\x01\x00' # Status | length 2 | value 0x0000 b'\x00\x00\x00\x09\x02\x00\x00\x00\x00\x00' # Affected SOP Instance UID, length 12, value 1.2.4.5.7.8 b'\x00\x00\x00\x10\x0c\x00\x00\x00\x31\x2e\x32\x2e\x34\x2e\x35\x2e\x37' b'\x2e\x38\x00' ) n_create_rsp_ds = ( # Message control header byte: dataset, last fragment b'\x02' # (0010, 0020) PatientName = "Tube HeNe" b'\x10\x00\x10\x00\x0a\x00\x00\x00\x54\x75\x62\x65\x20\x48\x65\x4e\x65' b'\x20' # (0010, 0010) PatientID = 'Test1101' b'\x10\x00\x20\x00\x08\x00\x00\x00\x54\x65\x73\x74\x31\x31\x30\x31' )
mit
getcircle/protobuf-to-dict
src/tests/sample_proto3_pb2.py
1
5497
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: tests/sample-proto3.proto from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='tests/sample-proto3.proto', package='tests.proto3', syntax='proto3', serialized_pb=b'\n\x19tests/sample-proto3.proto\x12\x0ctests.proto3\"\xb7\x01\n\x0bSomeMessage\x12\x38\n\x08some_map\x18\x01 \x03(\x0b\x32&.tests.proto3.SomeMessage.SomeMapEntry\x12*\n\nenum_field\x18\x02 \x01(\x0e\x32\x16.tests.proto3.SomeEnum\x12\x12\n\nbool_field\x18\x03 \x01(\x08\x1a.\n\x0cSomeMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01*%\n\x08SomeEnum\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x0c\n\x08OPTION_1\x10\x01\x62\x06proto3' ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _SOMEENUM = _descriptor.EnumDescriptor( name='SomeEnum', full_name='tests.proto3.SomeEnum', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='DEFAULT', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='OPTION_1', index=1, number=1, options=None, type=None), ], containing_type=None, options=None, serialized_start=229, serialized_end=266, ) _sym_db.RegisterEnumDescriptor(_SOMEENUM) SomeEnum = enum_type_wrapper.EnumTypeWrapper(_SOMEENUM) DEFAULT = 0 OPTION_1 = 1 _SOMEMESSAGE_SOMEMAPENTRY = _descriptor.Descriptor( name='SomeMapEntry', full_name='tests.proto3.SomeMessage.SomeMapEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='tests.proto3.SomeMessage.SomeMapEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='value', full_name='tests.proto3.SomeMessage.SomeMapEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001'), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=181, serialized_end=227, ) _SOMEMESSAGE = _descriptor.Descriptor( name='SomeMessage', full_name='tests.proto3.SomeMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='some_map', full_name='tests.proto3.SomeMessage.some_map', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='enum_field', full_name='tests.proto3.SomeMessage.enum_field', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='bool_field', full_name='tests.proto3.SomeMessage.bool_field', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[_SOMEMESSAGE_SOMEMAPENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=44, serialized_end=227, ) _SOMEMESSAGE_SOMEMAPENTRY.containing_type = _SOMEMESSAGE _SOMEMESSAGE.fields_by_name['some_map'].message_type = _SOMEMESSAGE_SOMEMAPENTRY _SOMEMESSAGE.fields_by_name['enum_field'].enum_type = _SOMEENUM DESCRIPTOR.message_types_by_name['SomeMessage'] = _SOMEMESSAGE DESCRIPTOR.enum_types_by_name['SomeEnum'] = _SOMEENUM SomeMessage = _reflection.GeneratedProtocolMessageType('SomeMessage', (_message.Message,), dict( SomeMapEntry = _reflection.GeneratedProtocolMessageType('SomeMapEntry', (_message.Message,), dict( DESCRIPTOR = _SOMEMESSAGE_SOMEMAPENTRY, __module__ = 'tests.sample_proto3_pb2' # @@protoc_insertion_point(class_scope:tests.proto3.SomeMessage.SomeMapEntry) )) , DESCRIPTOR = _SOMEMESSAGE, __module__ = 'tests.sample_proto3_pb2' # @@protoc_insertion_point(class_scope:tests.proto3.SomeMessage) )) _sym_db.RegisterMessage(SomeMessage) _sym_db.RegisterMessage(SomeMessage.SomeMapEntry) _SOMEMESSAGE_SOMEMAPENTRY.has_options = True _SOMEMESSAGE_SOMEMAPENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001') # @@protoc_insertion_point(module_scope)
unlicense
samithaj/headphones
lib/unidecode/x017.py
252
4190
data = ( '[?]', # 0x00 '[?]', # 0x01 '[?]', # 0x02 '[?]', # 0x03 '[?]', # 0x04 '[?]', # 0x05 '[?]', # 0x06 '[?]', # 0x07 '[?]', # 0x08 '[?]', # 0x09 '[?]', # 0x0a '[?]', # 0x0b '[?]', # 0x0c '[?]', # 0x0d '[?]', # 0x0e '[?]', # 0x0f '[?]', # 0x10 '[?]', # 0x11 '[?]', # 0x12 '[?]', # 0x13 '[?]', # 0x14 '[?]', # 0x15 '[?]', # 0x16 '[?]', # 0x17 '[?]', # 0x18 '[?]', # 0x19 '[?]', # 0x1a '[?]', # 0x1b '[?]', # 0x1c '[?]', # 0x1d '[?]', # 0x1e '[?]', # 0x1f '[?]', # 0x20 '[?]', # 0x21 '[?]', # 0x22 '[?]', # 0x23 '[?]', # 0x24 '[?]', # 0x25 '[?]', # 0x26 '[?]', # 0x27 '[?]', # 0x28 '[?]', # 0x29 '[?]', # 0x2a '[?]', # 0x2b '[?]', # 0x2c '[?]', # 0x2d '[?]', # 0x2e '[?]', # 0x2f '[?]', # 0x30 '[?]', # 0x31 '[?]', # 0x32 '[?]', # 0x33 '[?]', # 0x34 '[?]', # 0x35 '[?]', # 0x36 '[?]', # 0x37 '[?]', # 0x38 '[?]', # 0x39 '[?]', # 0x3a '[?]', # 0x3b '[?]', # 0x3c '[?]', # 0x3d '[?]', # 0x3e '[?]', # 0x3f '[?]', # 0x40 '[?]', # 0x41 '[?]', # 0x42 '[?]', # 0x43 '[?]', # 0x44 '[?]', # 0x45 '[?]', # 0x46 '[?]', # 0x47 '[?]', # 0x48 '[?]', # 0x49 '[?]', # 0x4a '[?]', # 0x4b '[?]', # 0x4c '[?]', # 0x4d '[?]', # 0x4e '[?]', # 0x4f '[?]', # 0x50 '[?]', # 0x51 '[?]', # 0x52 '[?]', # 0x53 '[?]', # 0x54 '[?]', # 0x55 '[?]', # 0x56 '[?]', # 0x57 '[?]', # 0x58 '[?]', # 0x59 '[?]', # 0x5a '[?]', # 0x5b '[?]', # 0x5c '[?]', # 0x5d '[?]', # 0x5e '[?]', # 0x5f '[?]', # 0x60 '[?]', # 0x61 '[?]', # 0x62 '[?]', # 0x63 '[?]', # 0x64 '[?]', # 0x65 '[?]', # 0x66 '[?]', # 0x67 '[?]', # 0x68 '[?]', # 0x69 '[?]', # 0x6a '[?]', # 0x6b '[?]', # 0x6c '[?]', # 0x6d '[?]', # 0x6e '[?]', # 0x6f '[?]', # 0x70 '[?]', # 0x71 '[?]', # 0x72 '[?]', # 0x73 '[?]', # 0x74 '[?]', # 0x75 '[?]', # 0x76 '[?]', # 0x77 '[?]', # 0x78 '[?]', # 0x79 '[?]', # 0x7a '[?]', # 0x7b '[?]', # 0x7c '[?]', # 0x7d '[?]', # 0x7e '[?]', # 0x7f 'k', # 0x80 'kh', # 0x81 'g', # 0x82 'gh', # 0x83 'ng', # 0x84 'c', # 0x85 'ch', # 0x86 'j', # 0x87 'jh', # 0x88 'ny', # 0x89 't', # 0x8a 'tth', # 0x8b 'd', # 0x8c 'ddh', # 0x8d 'nn', # 0x8e 't', # 0x8f 'th', # 0x90 'd', # 0x91 'dh', # 0x92 'n', # 0x93 'p', # 0x94 'ph', # 0x95 'b', # 0x96 'bh', # 0x97 'm', # 0x98 'y', # 0x99 'r', # 0x9a 'l', # 0x9b 'v', # 0x9c 'sh', # 0x9d 'ss', # 0x9e 's', # 0x9f 'h', # 0xa0 'l', # 0xa1 'q', # 0xa2 'a', # 0xa3 'aa', # 0xa4 'i', # 0xa5 'ii', # 0xa6 'u', # 0xa7 'uk', # 0xa8 'uu', # 0xa9 'uuv', # 0xaa 'ry', # 0xab 'ryy', # 0xac 'ly', # 0xad 'lyy', # 0xae 'e', # 0xaf 'ai', # 0xb0 'oo', # 0xb1 'oo', # 0xb2 'au', # 0xb3 'a', # 0xb4 'aa', # 0xb5 'aa', # 0xb6 'i', # 0xb7 'ii', # 0xb8 'y', # 0xb9 'yy', # 0xba 'u', # 0xbb 'uu', # 0xbc 'ua', # 0xbd 'oe', # 0xbe 'ya', # 0xbf 'ie', # 0xc0 'e', # 0xc1 'ae', # 0xc2 'ai', # 0xc3 'oo', # 0xc4 'au', # 0xc5 'M', # 0xc6 'H', # 0xc7 'a`', # 0xc8 '', # 0xc9 '', # 0xca '', # 0xcb 'r', # 0xcc '', # 0xcd '!', # 0xce '', # 0xcf '', # 0xd0 '', # 0xd1 '', # 0xd2 '', # 0xd3 '.', # 0xd4 ' // ', # 0xd5 ':', # 0xd6 '+', # 0xd7 '++', # 0xd8 ' * ', # 0xd9 ' /// ', # 0xda 'KR', # 0xdb '\'', # 0xdc '[?]', # 0xdd '[?]', # 0xde '[?]', # 0xdf '0', # 0xe0 '1', # 0xe1 '2', # 0xe2 '3', # 0xe3 '4', # 0xe4 '5', # 0xe5 '6', # 0xe6 '7', # 0xe7 '8', # 0xe8 '9', # 0xe9 '[?]', # 0xea '[?]', # 0xeb '[?]', # 0xec '[?]', # 0xed '[?]', # 0xee '[?]', # 0xef '[?]', # 0xf0 '[?]', # 0xf1 '[?]', # 0xf2 '[?]', # 0xf3 '[?]', # 0xf4 '[?]', # 0xf5 '[?]', # 0xf6 '[?]', # 0xf7 '[?]', # 0xf8 '[?]', # 0xf9 '[?]', # 0xfa '[?]', # 0xfb '[?]', # 0xfc '[?]', # 0xfd '[?]', # 0xfe )
gpl-3.0
vollov/net-audit
src/werkzeug/testsuite/datastructures.py
76
19939
# -*- coding: utf-8 -*- """ werkzeug.testsuite.datastructures ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tests the functionality of the provided Werkzeug datastructures. TODO: - FileMultiDict - convert to proper asserts - Immutable types undertested - Split up dict tests :copyright: (c) 2011 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ from __future__ import with_statement import unittest import pickle from copy import copy from werkzeug.testsuite import WerkzeugTestCase from werkzeug import datastructures from werkzeug.exceptions import BadRequestKeyError class MutableMultiDictBaseTestCase(WerkzeugTestCase): storage_class = None def test_pickle(self): cls = self.storage_class for protocol in xrange(pickle.HIGHEST_PROTOCOL + 1): d = cls() d.setlist('foo', [1, 2, 3, 4]) d.setlist('bar', 'foo bar baz'.split()) s = pickle.dumps(d, protocol) ud = pickle.loads(s) self.assert_equal(type(ud), type(d)) self.assert_equal(ud, d) self.assert_equal(pickle.loads( s.replace('werkzeug.datastructures', 'werkzeug')), d) ud['newkey'] = 'bla' self.assert_not_equal(ud, d) def test_basic_interface(self): md = self.storage_class() assert isinstance(md, dict) mapping = [('a', 1), ('b', 2), ('a', 2), ('d', 3), ('a', 1), ('a', 3), ('d', 4), ('c', 3)] md = self.storage_class(mapping) # simple getitem gives the first value assert md['a'] == 1 assert md['c'] == 3 with self.assert_raises(KeyError): md['e'] assert md.get('a') == 1 # list getitem assert md.getlist('a') == [1, 2, 1, 3] assert md.getlist('d') == [3, 4] # do not raise if key not found assert md.getlist('x') == [] # simple setitem overwrites all values md['a'] = 42 assert md.getlist('a') == [42] # list setitem md.setlist('a', [1, 2, 3]) assert md['a'] == 1 assert md.getlist('a') == [1, 2, 3] # verify that it does not change original lists l1 = [1, 2, 3] md.setlist('a', l1) del l1[:] assert md['a'] == 1 # setdefault, setlistdefault assert md.setdefault('u', 23) == 23 assert md.getlist('u') == [23] del md['u'] md.setlist('u', [-1, -2]) # delitem del md['u'] with self.assert_raises(KeyError): md['u'] del md['d'] assert md.getlist('d') == [] # keys, values, items, lists assert list(sorted(md.keys())) == ['a', 'b', 'c'] assert list(sorted(md.iterkeys())) == ['a', 'b', 'c'] assert list(sorted(md.values())) == [1, 2, 3] assert list(sorted(md.itervalues())) == [1, 2, 3] assert list(sorted(md.items())) == [('a', 1), ('b', 2), ('c', 3)] assert list(sorted(md.items(multi=True))) == \ [('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)] assert list(sorted(md.iteritems())) == [('a', 1), ('b', 2), ('c', 3)] assert list(sorted(md.iteritems(multi=True))) == \ [('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)] assert list(sorted(md.lists())) == [('a', [1, 2, 3]), ('b', [2]), ('c', [3])] assert list(sorted(md.iterlists())) == [('a', [1, 2, 3]), ('b', [2]), ('c', [3])] # copy method c = md.copy() assert c['a'] == 1 assert c.getlist('a') == [1, 2, 3] # copy method 2 c = copy(md) assert c['a'] == 1 assert c.getlist('a') == [1, 2, 3] # update with a multidict od = self.storage_class([('a', 4), ('a', 5), ('y', 0)]) md.update(od) assert md.getlist('a') == [1, 2, 3, 4, 5] assert md.getlist('y') == [0] # update with a regular dict md = c od = {'a': 4, 'y': 0} md.update(od) assert md.getlist('a') == [1, 2, 3, 4] assert md.getlist('y') == [0] # pop, poplist, popitem, popitemlist assert md.pop('y') == 0 assert 'y' not in md assert md.poplist('a') == [1, 2, 3, 4] assert 'a' not in md assert md.poplist('missing') == [] # remaining: b=2, c=3 popped = md.popitem() assert popped in [('b', 2), ('c', 3)] popped = md.popitemlist() assert popped in [('b', [2]), ('c', [3])] # type conversion md = self.storage_class({'a': '4', 'b': ['2', '3']}) assert md.get('a', type=int) == 4 assert md.getlist('b', type=int) == [2, 3] # repr md = self.storage_class([('a', 1), ('a', 2), ('b', 3)]) assert "('a', 1)" in repr(md) assert "('a', 2)" in repr(md) assert "('b', 3)" in repr(md) # add and getlist md.add('c', '42') md.add('c', '23') assert md.getlist('c') == ['42', '23'] md.add('c', 'blah') assert md.getlist('c', type=int) == [42, 23] # setdefault md = self.storage_class() md.setdefault('x', []).append(42) md.setdefault('x', []).append(23) assert md['x'] == [42, 23] # to dict md = self.storage_class() md['foo'] = 42 md.add('bar', 1) md.add('bar', 2) assert md.to_dict() == {'foo': 42, 'bar': 1} assert md.to_dict(flat=False) == {'foo': [42], 'bar': [1, 2]} # popitem from empty dict with self.assert_raises(KeyError): self.storage_class().popitem() with self.assert_raises(KeyError): self.storage_class().popitemlist() # key errors are of a special type with self.assert_raises(BadRequestKeyError): self.storage_class()[42] # setlist works md = self.storage_class() md['foo'] = 42 md.setlist('foo', [1, 2]) assert md.getlist('foo') == [1, 2] class ImmutableDictBaseTestCase(WerkzeugTestCase): storage_class = None def test_follows_dict_interface(self): cls = self.storage_class data = {'foo': 1, 'bar': 2, 'baz': 3} d = cls(data) self.assert_equal(d['foo'], 1) self.assert_equal(d['bar'], 2) self.assert_equal(d['baz'], 3) self.assert_equal(sorted(d.keys()), ['bar', 'baz', 'foo']) self.assert_('foo' in d) self.assert_('foox' not in d) self.assert_equal(len(d), 3) def test_copies_are_mutable(self): cls = self.storage_class immutable = cls({'a': 1}) with self.assert_raises(TypeError): immutable.pop('a') mutable = immutable.copy() mutable.pop('a') self.assert_('a' in immutable) self.assert_(mutable is not immutable) self.assert_(copy(immutable) is immutable) def test_dict_is_hashable(self): cls = self.storage_class immutable = cls({'a': 1, 'b': 2}) immutable2 = cls({'a': 2, 'b': 2}) x = set([immutable]) self.assert_(immutable in x) self.assert_(immutable2 not in x) x.discard(immutable) self.assert_(immutable not in x) self.assert_(immutable2 not in x) x.add(immutable2) self.assert_(immutable not in x) self.assert_(immutable2 in x) x.add(immutable) self.assert_(immutable in x) self.assert_(immutable2 in x) class ImmutableTypeConversionDictTestCase(ImmutableDictBaseTestCase): storage_class = datastructures.ImmutableTypeConversionDict class ImmutableMultiDictTestCase(ImmutableDictBaseTestCase): storage_class = datastructures.ImmutableMultiDict def test_multidict_is_hashable(self): cls = self.storage_class immutable = cls({'a': [1, 2], 'b': 2}) immutable2 = cls({'a': [1], 'b': 2}) x = set([immutable]) self.assert_(immutable in x) self.assert_(immutable2 not in x) x.discard(immutable) self.assert_(immutable not in x) self.assert_(immutable2 not in x) x.add(immutable2) self.assert_(immutable not in x) self.assert_(immutable2 in x) x.add(immutable) self.assert_(immutable in x) self.assert_(immutable2 in x) class ImmutableDictTestCase(ImmutableDictBaseTestCase): storage_class = datastructures.ImmutableDict class ImmutableOrderedMultiDictTestCase(ImmutableDictBaseTestCase): storage_class = datastructures.ImmutableOrderedMultiDict def test_ordered_multidict_is_hashable(self): a = self.storage_class([('a', 1), ('b', 1), ('a', 2)]) b = self.storage_class([('a', 1), ('a', 2), ('b', 1)]) self.assert_not_equal(hash(a), hash(b)) class MultiDictTestCase(MutableMultiDictBaseTestCase): storage_class = datastructures.MultiDict def test_multidict_pop(self): make_d = lambda: self.storage_class({'foo': [1, 2, 3, 4]}) d = make_d() assert d.pop('foo') == 1 assert not d d = make_d() assert d.pop('foo', 32) == 1 assert not d d = make_d() assert d.pop('foos', 32) == 32 assert d with self.assert_raises(KeyError): d.pop('foos') def test_setlistdefault(self): md = self.storage_class() assert md.setlistdefault('u', [-1, -2]) == [-1, -2] assert md.getlist('u') == [-1, -2] assert md['u'] == -1 def test_iter_interfaces(self): mapping = [('a', 1), ('b', 2), ('a', 2), ('d', 3), ('a', 1), ('a', 3), ('d', 4), ('c', 3)] md = self.storage_class(mapping) assert list(zip(md.keys(), md.listvalues())) == list(md.lists()) assert list(zip(md, md.iterlistvalues())) == list(md.iterlists()) assert list(zip(md.iterkeys(), md.iterlistvalues())) == list(md.iterlists()) class OrderedMultiDictTestCase(MutableMultiDictBaseTestCase): storage_class = datastructures.OrderedMultiDict def test_ordered_interface(self): cls = self.storage_class d = cls() assert not d d.add('foo', 'bar') assert len(d) == 1 d.add('foo', 'baz') assert len(d) == 1 assert d.items() == [('foo', 'bar')] assert list(d) == ['foo'] assert d.items(multi=True) == [('foo', 'bar'), ('foo', 'baz')] del d['foo'] assert not d assert len(d) == 0 assert list(d) == [] d.update([('foo', 1), ('foo', 2), ('bar', 42)]) d.add('foo', 3) assert d.getlist('foo') == [1, 2, 3] assert d.getlist('bar') == [42] assert d.items() == [('foo', 1), ('bar', 42)] assert d.keys() == list(d) == list(d.iterkeys()) == ['foo', 'bar'] assert d.items(multi=True) == [('foo', 1), ('foo', 2), ('bar', 42), ('foo', 3)] assert len(d) == 2 assert d.pop('foo') == 1 assert d.pop('blafasel', None) is None assert d.pop('blafasel', 42) == 42 assert len(d) == 1 assert d.poplist('bar') == [42] assert not d d.get('missingkey') is None d.add('foo', 42) d.add('foo', 23) d.add('bar', 2) d.add('foo', 42) assert d == datastructures.MultiDict(d) id = self.storage_class(d) assert d == id d.add('foo', 2) assert d != id d.update({'blah': [1, 2, 3]}) assert d['blah'] == 1 assert d.getlist('blah') == [1, 2, 3] # setlist works d = self.storage_class() d['foo'] = 42 d.setlist('foo', [1, 2]) assert d.getlist('foo') == [1, 2] with self.assert_raises(BadRequestKeyError): d.pop('missing') with self.assert_raises(BadRequestKeyError): d['missing'] # popping d = self.storage_class() d.add('foo', 23) d.add('foo', 42) d.add('foo', 1) assert d.popitem() == ('foo', 23) with self.assert_raises(BadRequestKeyError): d.popitem() assert not d d.add('foo', 23) d.add('foo', 42) d.add('foo', 1) assert d.popitemlist() == ('foo', [23, 42, 1]) with self.assert_raises(BadRequestKeyError): d.popitemlist() class CombinedMultiDictTestCase(WerkzeugTestCase): storage_class = datastructures.CombinedMultiDict def test_basic_interface(self): d1 = datastructures.MultiDict([('foo', '1')]) d2 = datastructures.MultiDict([('bar', '2'), ('bar', '3')]) d = self.storage_class([d1, d2]) # lookup assert d['foo'] == '1' assert d['bar'] == '2' assert d.getlist('bar') == ['2', '3'] assert sorted(d.items()) == [('bar', '2'), ('foo', '1')], d.items() assert sorted(d.items(multi=True)) == [('bar', '2'), ('bar', '3'), ('foo', '1')] assert 'missingkey' not in d assert 'foo' in d # type lookup assert d.get('foo', type=int) == 1 assert d.getlist('bar', type=int) == [2, 3] # get key errors for missing stuff with self.assert_raises(KeyError): d['missing'] # make sure that they are immutable with self.assert_raises(TypeError): d['foo'] = 'blub' # copies are immutable d = d.copy() with self.assert_raises(TypeError): d['foo'] = 'blub' # make sure lists merges md1 = datastructures.MultiDict((("foo", "bar"),)) md2 = datastructures.MultiDict((("foo", "blafasel"),)) x = self.storage_class((md1, md2)) assert x.lists() == [('foo', ['bar', 'blafasel'])] class HeadersTestCase(WerkzeugTestCase): storage_class = datastructures.Headers def test_basic_interface(self): headers = self.storage_class() headers.add('Content-Type', 'text/plain') headers.add('X-Foo', 'bar') assert 'x-Foo' in headers assert 'Content-type' in headers headers['Content-Type'] = 'foo/bar' assert headers['Content-Type'] == 'foo/bar' assert len(headers.getlist('Content-Type')) == 1 # list conversion assert headers.to_list() == [ ('Content-Type', 'foo/bar'), ('X-Foo', 'bar') ] assert str(headers) == ( "Content-Type: foo/bar\r\n" "X-Foo: bar\r\n" "\r\n") assert str(self.storage_class()) == "\r\n" # extended add headers.add('Content-Disposition', 'attachment', filename='foo') assert headers['Content-Disposition'] == 'attachment; filename=foo' headers.add('x', 'y', z='"') assert headers['x'] == r'y; z="\""' def test_defaults_and_conversion(self): # defaults headers = self.storage_class([ ('Content-Type', 'text/plain'), ('X-Foo', 'bar'), ('X-Bar', '1'), ('X-Bar', '2') ]) assert headers.getlist('x-bar') == ['1', '2'] assert headers.get('x-Bar') == '1' assert headers.get('Content-Type') == 'text/plain' assert headers.setdefault('X-Foo', 'nope') == 'bar' assert headers.setdefault('X-Bar', 'nope') == '1' assert headers.setdefault('X-Baz', 'quux') == 'quux' assert headers.setdefault('X-Baz', 'nope') == 'quux' headers.pop('X-Baz') # type conversion assert headers.get('x-bar', type=int) == 1 assert headers.getlist('x-bar', type=int) == [1, 2] # list like operations assert headers[0] == ('Content-Type', 'text/plain') assert headers[:1] == self.storage_class([('Content-Type', 'text/plain')]) del headers[:2] del headers[-1] assert headers == self.storage_class([('X-Bar', '1')]) def test_copying(self): a = self.storage_class([('foo', 'bar')]) b = a.copy() a.add('foo', 'baz') assert a.getlist('foo') == ['bar', 'baz'] assert b.getlist('foo') == ['bar'] def test_popping(self): headers = self.storage_class([('a', 1)]) assert headers.pop('a') == 1 assert headers.pop('b', 2) == 2 with self.assert_raises(KeyError): headers.pop('c') def test_set_arguments(self): a = self.storage_class() a.set('Content-Disposition', 'useless') a.set('Content-Disposition', 'attachment', filename='foo') assert a['Content-Disposition'] == 'attachment; filename=foo' def test_reject_newlines(self): h = self.storage_class() for variation in 'foo\nbar', 'foo\r\nbar', 'foo\rbar': with self.assert_raises(ValueError): h['foo'] = variation with self.assert_raises(ValueError): h.add('foo', variation) with self.assert_raises(ValueError): h.add('foo', 'test', option=variation) with self.assert_raises(ValueError): h.set('foo', variation) with self.assert_raises(ValueError): h.set('foo', 'test', option=variation) class EnvironHeadersTestCase(WerkzeugTestCase): storage_class = datastructures.EnvironHeaders def test_basic_interface(self): # this happens in multiple WSGI servers because they # use a vary naive way to convert the headers; broken_env = { 'HTTP_CONTENT_TYPE': 'text/html', 'CONTENT_TYPE': 'text/html', 'HTTP_CONTENT_LENGTH': '0', 'CONTENT_LENGTH': '0', 'HTTP_ACCEPT': '*', 'wsgi.version': (1, 0) } headers = self.storage_class(broken_env) assert headers assert len(headers) == 3 assert sorted(headers) == [ ('Accept', '*'), ('Content-Length', '0'), ('Content-Type', 'text/html') ] assert not self.storage_class({'wsgi.version': (1, 0)}) assert len(self.storage_class({'wsgi.version': (1, 0)})) == 0 class HeaderSetTestCase(WerkzeugTestCase): storage_class = datastructures.HeaderSet def test_basic_interface(self): hs = self.storage_class() hs.add('foo') hs.add('bar') assert 'Bar' in hs assert hs.find('foo') == 0 assert hs.find('BAR') == 1 assert hs.find('baz') < 0 hs.discard('missing') hs.discard('foo') assert hs.find('foo') < 0 assert hs.find('bar') == 0 with self.assert_raises(IndexError): hs.index('missing') assert hs.index('bar') == 0 assert hs hs.clear() assert not hs class ImmutableListTestCase(WerkzeugTestCase): storage_class = datastructures.ImmutableList def test_list_hashable(self): t = (1, 2, 3, 4) l = self.storage_class(t) self.assert_equal(hash(t), hash(l)) self.assert_not_equal(t, l) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(MultiDictTestCase)) suite.addTest(unittest.makeSuite(OrderedMultiDictTestCase)) suite.addTest(unittest.makeSuite(CombinedMultiDictTestCase)) suite.addTest(unittest.makeSuite(ImmutableTypeConversionDictTestCase)) suite.addTest(unittest.makeSuite(ImmutableMultiDictTestCase)) suite.addTest(unittest.makeSuite(ImmutableDictTestCase)) suite.addTest(unittest.makeSuite(ImmutableOrderedMultiDictTestCase)) suite.addTest(unittest.makeSuite(HeadersTestCase)) suite.addTest(unittest.makeSuite(EnvironHeadersTestCase)) suite.addTest(unittest.makeSuite(HeaderSetTestCase)) return suite
mit
googleinterns/userjourneytool
tests/test_transformers.py
1
9933
# pylint: disable=redefined-outer-name from unittest.mock import Mock, patch import pytest from graph_structures_pb2 import Client, Node, NodeType, Status, VirtualNode import ujt.constants import ujt.transformers @pytest.fixture def patch_path(): return "ujt.transformers" def test_apply_node_property_classes(assert_same_elements): node_name = "node" node_name_message_map = { node_name: Node( name=node_name, status=Status.STATUS_HEALTHY, node_type=NodeType.NODETYPE_SERVICE, ), } client_name = "client" client_name_message_map = { client_name: Client( name=client_name, ), } virtual_node_name = "virtual_node" virtual_node_name_message_map = { virtual_node_name: VirtualNode( name=virtual_node_name, status=Status.STATUS_HEALTHY, node_type=NodeType.NODETYPE_VIRTUAL, ) } elements = [ { "data": { "ujt_id": node_name, }, "classes": "", }, { "data": { "ujt_id": client_name, }, "classes": "", }, { "data": { "ujt_id": virtual_node_name, }, "classes": "", }, ] expected_elements = [ { "data": { "ujt_id": node_name, }, "classes": " NODETYPE_SERVICE STATUS_HEALTHY", }, { "data": { "ujt_id": client_name, }, "classes": f" {ujt.constants.CLIENT_CLASS}", }, { "data": { "ujt_id": virtual_node_name, }, "classes": " NODETYPE_VIRTUAL STATUS_HEALTHY", }, ] returned_elements = ujt.transformers.apply_node_property_classes( elements, node_name_message_map, client_name_message_map, virtual_node_name_message_map, ) assert_same_elements(returned_elements, expected_elements) def test_apply_highlighted_edge_class_to_elements(): # Node0 connects to Node1 which connects to Node2 (UJ0) # Node0 connects to Node3 (UJ1) node_names = ["Node0", "Node1", "Node2", "Node3"] user_journey_names = ["UJ0", "UJ1"] elements = [ { "data": { "source": node_names[0], "target": node_names[1], "user_journey_name": user_journey_names[0], }, "classes": "", }, { "data": { "source": node_names[1], "target": node_names[2], }, "classes": "", }, { "data": { "source": node_names[0], "target": node_names[3], "user_journey_name": user_journey_names[1], }, "classes": "", }, ] new_elements = ujt.transformers.apply_highlighted_edge_class_to_elements( elements, user_journey_names[0] ) for e in new_elements: if ( e["data"]["source"] == node_names[0] and e["data"]["target"] == node_names[3] ): assert ujt.constants.HIGHLIGHTED_UJ_EDGE_CLASS not in e["classes"] else: assert ujt.constants.HIGHLIGHTED_UJ_EDGE_CLASS in e["classes"] def test_apply_virtual_nodes_to_elements(patch_path, assert_same_elements): # Node0 and Node1 are in collapsed VirtualNode0 # Node1 is a child of Node0 # Node2 in collapsed VirtualNode1 # Node0 has an edge to Node2 # Node1 has an edge to Node2 node_names = ["Node0", "Node1", "Node2"] virtual_node_names = ["VirtualNode0", "VirtualNode1"] node_elements = [ { "data": { "ujt_id": node_names[0], }, "classes": "", }, { "data": { "ujt_id": node_names[1], "parent": node_names[0], }, "classes": "", }, { "data": { "ujt_id": node_names[2], }, "classes": "", }, ] edge_elements = [ { "data": { "source": node_names[0], "target": node_names[2], }, "classes": "", }, { "data": { "source": node_names[1], "target": node_names[2], }, "classes": "", }, ] virtual_node_map = { # apply_virtual_nodes_to_edges doesn't actually access the value inside # the map, only the key. However, utils.get_highest_collapsed_virtual_node_name does. virtual_node_names[0]: VirtualNode(name=virtual_node_names[0], collapsed=True), virtual_node_names[1]: VirtualNode(name=virtual_node_names[1], collapsed=False), } parent_virtual_node_map = { node_names[0]: virtual_node_names[0], node_names[1]: virtual_node_names[0], node_names[2]: virtual_node_names[1], } expected_elements = [ { "data": { "ujt_id": node_names[2], "parent": virtual_node_names[1], }, "classes": "", }, { "data": { "source": virtual_node_names[0], "target": node_names[2], "id": f"{virtual_node_names[0]}/{node_names[2]}", }, "classes": "", }, { "data": { "label": virtual_node_names[0], "id": virtual_node_names[0], "ujt_id": virtual_node_names[0], }, "classes": "", }, { "data": { "label": virtual_node_names[1], "id": virtual_node_names[1], "ujt_id": virtual_node_names[1], }, "classes": "", }, ] with patch( f"{patch_path}.state.get_virtual_node_map", Mock(return_value=virtual_node_map) ), patch( f"{patch_path}.state.get_parent_virtual_node_map", Mock(return_value=parent_virtual_node_map), ): returned_elements = ujt.transformers.apply_virtual_nodes_to_elements( node_elements + edge_elements ) assert_same_elements(returned_elements, expected_elements) def test_apply_uuid_to_elements(patch_path, assert_same_elements): node_names = ["Node0", "Node1"] node_elements = [ { "data": { "id": node_names[0], }, }, { "data": { "id": node_names[1], "parent": node_names[0], }, }, ] edge_elements = [ { "data": { "id": f"{node_names[0]}.{node_names[1]}", "source": node_names[0], "target": node_names[1], }, }, ] expected_elements = [ { "data": { "id": f"{node_names[0]}#uuid", }, }, { "data": { "id": f"{node_names[1]}#uuid", "parent": f"{node_names[0]}#uuid", }, }, { "data": { "id": f"{node_names[0]}.{node_names[1]}#uuid", "source": f"{node_names[0]}#uuid", "target": f"{node_names[1]}#uuid", }, }, ] with patch(f"{patch_path}.uuid.uuid4", Mock(return_value="uuid")): returned_elements = ujt.transformers.apply_uuid_to_elements( node_elements + edge_elements ) assert_same_elements(returned_elements, expected_elements) def test_sort_nodes_by_parent_relationship(): node_names = ["Node0", "Node1", "Node2", "Node3", "Node4"] # Node0 contains Node1 and Node2 # Node1 contains Node3 # Node2 contains Node4 node_elements = [ { "data": { "id": node_names[0], }, }, { "data": { "id": node_names[1], "parent": node_names[0], }, }, { "data": { "id": node_names[2], "parent": node_names[0], }, }, { "data": { "id": node_names[3], "parent": node_names[1], }, }, { "data": { "id": node_names[4], "parent": node_names[2], }, }, ] # We declare the input in topological order so it's easier to understand # Reverse so it's in reverse topological order reversed_node_elements = node_elements[::-1] edge_elements = [ { "data": { "source": node_names[0], "target": node_names[1], "id": f"{node_names[0]}/{node_names[1]}", }, }, ] returned_elements = ujt.transformers.sort_nodes_by_parent_relationship( reversed_node_elements + edge_elements ) expected_elements = [ { "data": { "source": f"{node_names[0]}", "target": f"{node_names[1]}", "id": f"{node_names[0]}/{node_names[1]}", } }, {"data": {"id": f"{node_names[0]}"}}, {"data": {"id": f"{node_names[1]}", "parent": f"{node_names[0]}"}}, {"data": {"id": f"{node_names[2]}", "parent": f"{node_names[0]}"}}, {"data": {"id": f"{node_names[3]}", "parent": f"{node_names[1]}"}}, {"data": {"id": f"{node_names[4]}", "parent": f"{node_names[2]}"}}, ] assert returned_elements == expected_elements
apache-2.0
schleichdi2/OPENNFR-6.3-CORE
meta-oe-alliance/meta-brands/meta-edision/recipes-linux/linux-edision-5.3/findkerneldevice.py
7
3237
#!/usr/bin/python import os import sys import collections import struct import sys import uuid # http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_table_header_.28LBA_1.29 GPT_HEADER_FORMAT = """ 8s signature 4s revision L header_size L crc32 4x _ Q current_lba Q backup_lba Q first_usable_lba Q last_usable_lba 16s disk_guid Q part_entry_start_lba L num_part_entries L part_entry_size L crc32_part_array """ # http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_entries_.28LBA_2.E2.80.9333.29 GPT_PARTITION_FORMAT = """ 16s type 16s unique Q first_lba Q last_lba Q flags 72s name """ def _make_fmt(name, format, extras=[]): type_and_name = [l.split(None, 1) for l in format.strip().splitlines()] fmt = ''.join(t for (t,n) in type_and_name) fmt = '<'+fmt tupletype = collections.namedtuple(name, [n for (t,n) in type_and_name if n!='_']+extras) return (fmt, tupletype) class GPTError(Exception): pass def read_header(fp, lba_size=512): # skip MBR fp.seek(1*lba_size) fmt, GPTHeader = _make_fmt('GPTHeader', GPT_HEADER_FORMAT) data = fp.read(struct.calcsize(fmt)) header = GPTHeader._make(struct.unpack(fmt, data)) if header.signature != 'EFI PART': raise GPTError('Bad signature: %r' % header.signature) if header.revision != '\x00\x00\x01\x00': raise GPTError('Bad revision: %r' % header.revision) if header.header_size < 92: raise GPTError('Bad header size: %r' % header.header_size) header = header._replace( disk_guid=str(uuid.UUID(bytes_le=header.disk_guid)), ) return header def read_partitions(fp, header, lba_size=512): fp.seek(header.part_entry_start_lba * lba_size) fmt, GPTPartition = _make_fmt('GPTPartition', GPT_PARTITION_FORMAT, extras=['index']) for idx in xrange(1, 1+header.num_part_entries): data = fp.read(header.part_entry_size) if len(data) < struct.calcsize(fmt): raise GPTError('Short partition entry') part = GPTPartition._make(struct.unpack(fmt, data) + (idx,)) if part.type == 16*'\x00': continue part = part._replace( type=str(uuid.UUID(bytes_le=part.type)), unique=str(uuid.UUID(bytes_le=part.unique)), name=part.name.decode('utf-16').split('\0', 1)[0], ) yield part def find_kernel_device_udevadm(kernelpartition): try: for partition in os.listdir('/sys/block/mmcblk1'): if partition.startswith('mmcblk1p'): name = os.popen('udevadm info --query all --path /sys/block/mmcblk1/' + partition + ' | grep PARTNAME').readline().split('=')[1].strip() if kernelpartition == name: return '/dev/' + partition return '' except: return '' def find_kernel_device_gpt(kernelpartition): try: p = 1 header = read_header(open('/dev/mmcblk1', 'r')) for part in read_partitions(open('/dev/mmcblk1', 'r'), header): if kernelpartition == part.name: return '/dev/mmcblk1p' + str(p) p += 1 return '' except: return '' try: kerneldev = open('/sys/firmware/devicetree/base/chosen/kerneldev', 'r').readline().split('.') if 'emmcflash0' in kerneldev[0]: kerneldevice = find_kernel_device_udevadm(kerneldev[1].strip('\0')) if kerneldevice == '': kerneldevice = find_kernel_device_gpt(kerneldev[1].strip('\0')) if kerneldevice != '': os.symlink(kerneldevice, '/dev/kernel') except: pass
gpl-2.0
labordoc/labordoc-next
modules/webdeposit/lib/webdeposit_form.py
2
4312
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2013 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA from wtforms import Label from invenio.wtforms_utils import InvenioForm as Form from invenio.webdeposit_config_utils import WebDepositConfiguration from invenio.webdeposit_cook_json_utils import cook_files, uncook_files class WebDepositForm(Form): """ Generic WebDeposit Form class """ def __init__(self, **kwargs): super(WebDepositForm, self).__init__(**kwargs) # Load and apply configuration from config file self.config = WebDepositConfiguration(form_type=self.__class__.__name__) custom_title = self.config.get_form_title(self.__class__.__name__) if custom_title is not None: self._title = custom_title for field in self._fields.values(): custom_label = self.config.get_label(field.__class__.__name__) if custom_label is not None: setattr(field, 'label', Label(field.id, custom_label)) custom_widget = self.config.get_widget(field.__class__.__name__) if custom_widget is not None: setattr(field, 'widget', custom_widget) self.groups_meta = {} if hasattr(self, 'groups'): for group in self.groups: group_name = group[0] fields = group[1] for field in fields: setattr(self[field], 'group', group_name) if len(group) == 3: # If group has metadata group_meta = group[2] self.groups_meta[group_name] = group_meta def cook_json(self, json_reader): for field in self._fields.values(): try: json_reader = field.cook_json(json_reader) except AttributeError: # Some fields (eg. SubmitField) don't have a cook json function pass cook_files_function = self.config.get_files_cook_function() or cook_files json_reader = cook_files_function(json_reader, self.files) return json_reader def uncook_json(self, json_reader, webdeposit_json, recid=None): for field in self._fields.values(): if hasattr(field, 'uncook_json'): # WTFields are not mapped with rec json webdeposit_json = field.uncook_json(json_reader, webdeposit_json) webdeposit_json = uncook_files(webdeposit_json, recid=recid, json_reader=json_reader) return webdeposit_json def get_groups(self): groups = [({"name": 'Rest'}, [])] # Just a dict for optimization groups_hash = {} for field in self: if hasattr(field, 'group') and field.group is not None: if not field.group in groups_hash: groups_hash[field.group] = len(groups) # Append group to the list groups.append(({"name": field.group}, [])) # Append field to group's field list groups[groups_hash[field.group]][1].append(field) if field.group in self.groups_meta: # Add group's meta (description etc) groups[groups_hash[field.group]][0]['meta'] = \ self.groups_meta[field.group] else: # Append to Rest groups[0][1].append(field) # Append rest fields in the end rest = groups.pop(0) groups.append(rest) return groups
gpl-2.0
DirtyUnicorns/android_external_chromium-org
build/android/pylib/host_driven/test_runner.py
27
4658
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Runs host-driven tests on a particular device.""" import logging import sys import time import traceback from pylib.base import base_test_result from pylib.base import base_test_runner from pylib.instrumentation import test_result import test_case class HostDrivenExceptionTestResult(test_result.InstrumentationTestResult): """Test result corresponding to a python exception in a host-driven test.""" def __init__(self, test_name, start_date_ms, exc_info): """Constructs a HostDrivenExceptionTestResult object. Args: test_name: name of the test which raised an exception. start_date_ms: the starting time for the test. exc_info: exception info, ostensibly from sys.exc_info(). """ exc_type, exc_value, exc_traceback = exc_info trace_info = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback)) log_msg = 'Exception:\n' + trace_info duration_ms = (int(time.time()) * 1000) - start_date_ms super(HostDrivenExceptionTestResult, self).__init__( test_name, base_test_result.ResultType.FAIL, start_date_ms, duration_ms, log=str(exc_type) + ' ' + log_msg) class HostDrivenTestRunner(base_test_runner.BaseTestRunner): """Orchestrates running a set of host-driven tests. Any Python exceptions in the tests are caught and translated into a failed result, rather than being re-raised on the main thread. """ #override def __init__(self, device, shard_index, tool, push_deps, cleanup_test_files): """Creates a new HostDrivenTestRunner. Args: device: Attached android device. shard_index: Shard index. tool: Name of the Valgrind tool. push_deps: If True, push all dependencies to the device. cleanup_test_files: Whether or not to cleanup test files on device. """ super(HostDrivenTestRunner, self).__init__(device, tool, push_deps, cleanup_test_files) # The shard index affords the ability to create unique port numbers (e.g. # DEFAULT_PORT + shard_index) if the test so wishes. self.shard_index = shard_index #override def RunTest(self, test): """Sets up and runs a test case. Args: test: An object which is ostensibly a subclass of HostDrivenTestCase. Returns: A TestRunResults object which contains the result produced by the test and, in the case of a failure, the test that should be retried. """ assert isinstance(test, test_case.HostDrivenTestCase) start_date_ms = int(time.time()) * 1000 exception_raised = False try: test.SetUp(self.device, self.shard_index, self._push_deps, self._cleanup_test_files) except Exception: logging.exception( 'Caught exception while trying to run SetUp() for test: ' + test.tagged_name) # Tests whose SetUp() method has failed are likely to fail, or at least # yield invalid results. exc_info = sys.exc_info() results = base_test_result.TestRunResults() results.AddResult(HostDrivenExceptionTestResult( test.tagged_name, start_date_ms, exc_info)) return results, test try: results = test.Run() except Exception: # Setting this lets TearDown() avoid stomping on our stack trace from # Run() should TearDown() also raise an exception. exception_raised = True logging.exception('Caught exception while trying to run test: ' + test.tagged_name) exc_info = sys.exc_info() results = base_test_result.TestRunResults() results.AddResult(HostDrivenExceptionTestResult( test.tagged_name, start_date_ms, exc_info)) try: test.TearDown() except Exception: logging.exception( 'Caught exception while trying run TearDown() for test: ' + test.tagged_name) if not exception_raised: # Don't stomp the error during the test if TearDown blows up. This is a # trade-off: if the test fails, this will mask any problem with TearDown # until the test is fixed. exc_info = sys.exc_info() results = base_test_result.TestRunResults() results.AddResult(HostDrivenExceptionTestResult( test.tagged_name, start_date_ms, exc_info)) if not results.DidRunPass(): return results, test else: return results, None
bsd-3-clause
jasonthomas/zamboni
mkt/search/tests/test_middleware.py
19
1188
from django.test.client import RequestFactory import elasticsearch import mock from nose.tools import eq_ import mkt.site.tests from mkt.search.middleware import ElasticsearchExceptionMiddleware as ESM class TestElasticsearchExceptionMiddleware(mkt.site.tests.TestCase): def setUp(self): self.request = RequestFactory() @mock.patch('mkt.search.middleware.render') def test_exceptions_we_catch(self, render_mock): # These are instantiated with an error string. for e in [elasticsearch.ElasticsearchException, elasticsearch.SerializationError, elasticsearch.TransportError, elasticsearch.NotFoundError, elasticsearch.RequestError]: ESM().process_exception(self.request, e(503, 'ES ERROR')) render_mock.assert_called_with(self.request, 'search/down.html', status=503) render_mock.reset_mock() @mock.patch('mkt.search.middleware.render') def test_exceptions_we_do_not_catch(self, render_mock): ESM().process_exception(self.request, Exception) eq_(render_mock.called, False)
bsd-3-clause
gangadhar-kadam/verve-erp
erpnext/stock/doctype/packing_slip/packing_slip.py
3
6006
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.utils import flt, cint from frappe import _ from frappe.model.document import Document class PackingSlip(Document): def validate(self): """ * Validate existence of submitted Delivery Note * Case nos do not overlap * Check if packed qty doesn't exceed actual qty of delivery note It is necessary to validate case nos before checking quantity """ self.validate_delivery_note() self.validate_items_mandatory() self.validate_case_nos() self.validate_qty() from erpnext.utilities.transaction_base import validate_uom_is_integer validate_uom_is_integer(self, "stock_uom", "qty") validate_uom_is_integer(self, "weight_uom", "net_weight") def validate_delivery_note(self): """ Validates if delivery note has status as draft """ if cint(frappe.db.get_value("Delivery Note", self.delivery_note, "docstatus")) != 0: frappe.throw(_("Delivery Note {0} must not be submitted").format(self.delivery_note)) def validate_items_mandatory(self): rows = [d.item_code for d in self.get("items")] if not rows: frappe.msgprint(_("No Items to pack"), raise_exception=1) def validate_case_nos(self): """ Validate if case nos overlap. If they do, recommend next case no. """ if not cint(self.from_case_no): frappe.msgprint(_("Please specify a valid 'From Case No.'"), raise_exception=1) elif not self.to_case_no: self.to_case_no = self.from_case_no elif self.from_case_no > self.to_case_no: frappe.msgprint(_("'To Case No.' cannot be less than 'From Case No.'"), raise_exception=1) res = frappe.db.sql("""SELECT name FROM `tabPacking Slip` WHERE delivery_note = %(delivery_note)s AND docstatus = 1 AND ((from_case_no BETWEEN %(from_case_no)s AND %(to_case_no)s) OR (to_case_no BETWEEN %(from_case_no)s AND %(to_case_no)s) OR (%(from_case_no)s BETWEEN from_case_no AND to_case_no)) """, {"delivery_note":self.delivery_note, "from_case_no":self.from_case_no, "to_case_no":self.to_case_no}) if res: frappe.throw(_("""Case No(s) already in use. Try from Case No {0}""").format(self.get_recommended_case_no())) def validate_qty(self): """ Check packed qty across packing slips and delivery note """ # Get Delivery Note Items, Item Quantity Dict and No. of Cases for this Packing slip dn_details, ps_item_qty, no_of_cases = self.get_details_for_packing() for item in dn_details: new_packed_qty = (flt(ps_item_qty[item['item_code']]) * no_of_cases) + \ flt(item['packed_qty']) if new_packed_qty > flt(item['qty']) and no_of_cases: self.recommend_new_qty(item, ps_item_qty, no_of_cases) def get_details_for_packing(self): """ Returns * 'Delivery Note Items' query result as a list of dict * Item Quantity dict of current packing slip doc * No. of Cases of this packing slip """ rows = [d.item_code for d in self.get("items")] condition = "" if rows: condition = " and item_code in (%s)" % (", ".join(["%s"]*len(rows))) # gets item code, qty per item code, latest packed qty per item code and stock uom res = frappe.db.sql("""select item_code, ifnull(sum(qty), 0) as qty, (select sum(ifnull(psi.qty, 0) * (abs(ps.to_case_no - ps.from_case_no) + 1)) from `tabPacking Slip` ps, `tabPacking Slip Item` psi where ps.name = psi.parent and ps.docstatus = 1 and ps.delivery_note = dni.parent and psi.item_code=dni.item_code) as packed_qty, stock_uom, item_name from `tabDelivery Note Item` dni where parent=%s %s group by item_code""" % ("%s", condition), tuple([self.delivery_note] + rows), as_dict=1) ps_item_qty = dict([[d.item_code, d.qty] for d in self.get("items")]) no_of_cases = cint(self.to_case_no) - cint(self.from_case_no) + 1 return res, ps_item_qty, no_of_cases def recommend_new_qty(self, item, ps_item_qty, no_of_cases): """ Recommend a new quantity and raise a validation exception """ item['recommended_qty'] = (flt(item['qty']) - flt(item['packed_qty'])) / no_of_cases item['specified_qty'] = flt(ps_item_qty[item['item_code']]) if not item['packed_qty']: item['packed_qty'] = 0 frappe.throw(_("Quantity for Item {0} must be less than {1}").format(item.get("item_code"), item.get("recommended_qty"))) def update_item_details(self): """ Fill empty columns in Packing Slip Item """ if not self.from_case_no: self.from_case_no = self.get_recommended_case_no() for d in self.get("items"): res = frappe.db.get_value("Item", d.item_code, ["net_weight", "weight_uom"], as_dict=True) if res and len(res)>0: d.net_weight = res["net_weight"] d.weight_uom = res["weight_uom"] def get_recommended_case_no(self): """ Returns the next case no. for a new packing slip for a delivery note """ recommended_case_no = frappe.db.sql("""SELECT MAX(to_case_no) FROM `tabPacking Slip` WHERE delivery_note = %s AND docstatus=1""", self.delivery_note) return cint(recommended_case_no[0][0]) + 1 def get_items(self): self.set("items", []) dn_details = self.get_details_for_packing()[0] for item in dn_details: if flt(item.qty) > flt(item.packed_qty): ch = self.append('items', {}) ch.item_code = item.item_code ch.item_name = item.item_name ch.stock_uom = item.stock_uom ch.qty = flt(item.qty) - flt(item.packed_qty) self.update_item_details() def item_details(doctype, txt, searchfield, start, page_len, filters): from erpnext.controllers.queries import get_match_cond return frappe.db.sql("""select name, item_name, description from `tabItem` where name in ( select item_code FROM `tabDelivery Note Item` where parent= %s) and %s like "%s" %s limit %s, %s """ % ("%s", searchfield, "%s", get_match_cond(doctype), "%s", "%s"), (filters["delivery_note"], "%%%s%%" % txt, start, page_len))
agpl-3.0
Appono/hey-eurydices
test/distutils/setup.py
82
1104
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys import envoy try: from setuptools import setup except ImportError: from distutils.core import setup if sys.argv[-1] == "publish": os.system("python setup.py sdist upload") sys.exit() required = [] setup( name='envoy', version=envoy.__version__, description='Simple API for running external processes.', author='Kenneth Reitz', author_email='me@kennethreitz.com', url='https://github.com/kennethreitz/envoy', py_modules= ['envoy'], install_requires=required, license='MIT', classifiers=( 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Natural Language :: English', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', # 'Programming Language :: Python :: 3.0', # 'Programming Language :: Python :: 3.1', ), )
mit
JaDogg/__py_playground
reference/sketchbook/lex/dfa_minimize.py
1
2237
import DFA # 'his' DFAs def minimal_state_count(dfa): """Return the minimum number of states an equivalent dfa can have. Don't count the error state if one must be added to satisfy the DFA module's definition of a dfa.""" dfa_with_error = maybe_add_error_state(dfa) error_diff = len(dfa_with_error) - len(dfa) his = his_from_mine(dfa_with_error) his.minimize() return len(his.states) - error_diff def his_from_mine(dfa): dfa = maybe_add_error_state(dfa) states = range(len(dfa)) return DFA.DFA(states, get_alphabet(dfa), lambda state, c: dfa[state][1][c], 0, [state for state in states if dfa[state][0]]) def get_alphabet(dfa): return set(c for _, moves in dfa for c in moves) def maybe_add_error_state(dfa): "Return an equivalent to dfa with no missing move-table entries." alphabet = get_alphabet(dfa) if all(len(moves) == len(alphabet) for _, moves in dfa): return dfa error_state = len(dfa) # A new state for the missing moves to go to return [(accepting, dict((c, moves.get(c, error_state)) for c in alphabet)) for accepting, moves in dfa + [(False, {})]] def mine_from_his(his): # Not quite right -- we still need to make state 0 the start state. nstates = len(his.states) perm = dict((his_state, i) for i, his_state in enumerate(his.states)) print perm states = [(state in his.accepts, dict((c, perm[his.delta(state, c)]) for c in his.alphabet)) for state in his.states] return perm[his.start], states ## dfa = [(False, {'0': 1}), (True, {'1': 0, '0': 2}), (False, {'0': 1})] ## maybe_add_error_state(dfa) #. [(False, {'1': 3, '0': 1}), (True, {'1': 0, '0': 2}), (False, {'1': 3, '0': 1}), (False, {'1': 3, '0': 3})] ## his = his_from_mine(dfa) ## his.states #. set([0, 1, 2, 3]) ## his.alphabet #. set(['1', '0']) ## his.accepts #. set([1]) ## his.minimize() ## his.states #. [1, 0, 3] ## mine_from_his(his) #. {0: 1, 1: 0, 3: 2} #. #. (1, [(True, {'1': 1, '0': 1}), (False, {'1': 2, '0': 0}), (False, {'1': 2, '0': 2})])
mit
farhaanbukhsh/networkx
networkx/readwrite/json_graph/adjacency.py
41
4896
# Copyright (C) 2011-2013 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. from itertools import chain, count import networkx as nx __author__ = """Aric Hagberg <aric.hagberg@gmail.com>""" __all__ = ['adjacency_data', 'adjacency_graph'] _attrs = dict(id='id', key='key') def adjacency_data(G, attrs=_attrs): """Return data in adjacency format that is suitable for JSON serialization and use in Javascript documents. Parameters ---------- G : NetworkX graph attrs : dict A dictionary that contains two keys 'id' and 'key'. The corresponding values provide the attribute names for storing NetworkX-internal graph data. The values should be unique. Default value: :samp:`dict(id='id', key='key')`. If some user-defined graph data use these attribute names as data keys, they may be silently dropped. Returns ------- data : dict A dictionary with adjacency formatted data. Raises ------ NetworkXError If values in attrs are not unique. Examples -------- >>> from networkx.readwrite import json_graph >>> G = nx.Graph([(1,2)]) >>> data = json_graph.adjacency_data(G) To serialize with json >>> import json >>> s = json.dumps(data) Notes ----- Graph, node, and link attributes will be written when using this format but attribute keys must be strings if you want to serialize the resulting data with JSON. The default value of attrs will be changed in a future release of NetworkX. See Also -------- adjacency_graph, node_link_data, tree_data """ multigraph = G.is_multigraph() id_ = attrs['id'] # Allow 'key' to be omitted from attrs if the graph is not a multigraph. key = None if not multigraph else attrs['key'] if id_ == key: raise nx.NetworkXError('Attribute names are not unique.') data = {} data['directed'] = G.is_directed() data['multigraph'] = multigraph data['graph'] = list(G.graph.items()) data['nodes'] = [] data['adjacency'] = [] for n, nbrdict in G.adjacency_iter(): data['nodes'].append(dict(chain(G.node[n].items(), [(id_, n)]))) adj = [] if multigraph: for nbr, keys in nbrdict.items(): for k, d in keys.items(): adj.append(dict(chain(d.items(), [(id_, nbr), (key, k)]))) else: for nbr, d in nbrdict.items(): adj.append(dict(chain(d.items(), [(id_, nbr)]))) data['adjacency'].append(adj) return data def adjacency_graph(data, directed=False, multigraph=True, attrs=_attrs): """Return graph from adjacency data format. Parameters ---------- data : dict Adjacency list formatted graph data Returns ------- G : NetworkX graph A NetworkX graph object directed : bool If True, and direction not specified in data, return a directed graph. multigraph : bool If True, and multigraph not specified in data, return a multigraph. attrs : dict A dictionary that contains two keys 'id' and 'key'. The corresponding values provide the attribute names for storing NetworkX-internal graph data. The values should be unique. Default value: :samp:`dict(id='id', key='key')`. Examples -------- >>> from networkx.readwrite import json_graph >>> G = nx.Graph([(1,2)]) >>> data = json_graph.adjacency_data(G) >>> H = json_graph.adjacency_graph(data) Notes ----- The default value of attrs will be changed in a future release of NetworkX. See Also -------- adjacency_graph, node_link_data, tree_data """ multigraph = data.get('multigraph', multigraph) directed = data.get('directed', directed) if multigraph: graph = nx.MultiGraph() else: graph = nx.Graph() if directed: graph = graph.to_directed() id_ = attrs['id'] # Allow 'key' to be omitted from attrs if the graph is not a multigraph. key = None if not multigraph else attrs['key'] graph.graph = dict(data.get('graph', [])) mapping = [] for d in data['nodes']: node_data = d.copy() node = node_data.pop(id_) mapping.append(node) graph.add_node(node, attr_dict=node_data) for i, d in enumerate(data['adjacency']): source = mapping[i] for tdata in d: target_data = tdata.copy() target = target_data.pop(id_) if not multigraph: graph.add_edge(source, target, attr_dict=tdata) else: ky = target_data.pop(key, None) graph.add_edge(source, target, key=ky, attr_dict=tdata) return graph
bsd-3-clause
jagguli/intellij-community
python/helpers/pycharm_generator_utils/clr_tools.py
82
2171
# coding=utf-8 """ .NET (CLR) specific functions """ __author__ = 'Ilya.Kazakevich' def get_namespace_by_name(object_name): """ Gets namespace for full object name. Sometimes last element of name is module while it may be class. For System.Console returns System, for System.Web returns System.Web. Be sure all required assemblies are loaded (i.e. clr.AddRef.. is called) :param object_name: name to parse :return: namespace """ (imported_object, object_name) = _import_first(object_name) parts = object_name.partition(".") first_part = parts[0] remain_part = parts[2] while remain_part and type(_get_attr_by_name(imported_object, remain_part)) is type: # While we are in class remain_part = remain_part.rpartition(".")[0] if remain_part: return first_part + "." + remain_part else: return first_part def _import_first(object_name): """ Some times we can not import module directly. For example, Some.Class.InnerClass could not be imported: you need to import "Some.Class" or even "Some" instead. This function tries to find part of name that could be loaded :param object_name: name in dotted notation like "Some.Function.Here" :return: (imported_object, object_name): tuple with object and its name """ while object_name: try: return (__import__(object_name, globals=[], locals=[], fromlist=[]), object_name) except ImportError: object_name = object_name.rpartition(".")[0] # Remove rightest part raise Exception("No module name found in name " + object_name) def _get_attr_by_name(obj, name): """ Accepts chain of attributes in dot notation like "some.property.name" and gets them on object :param obj: object to introspec :param name: attribute name :return attribute >>> str(_get_attr_by_name("A", "__class__.__class__")) "<type 'type'>" >>> str(_get_attr_by_name("A", "__class__.__len__.__class__")) "<type 'method_descriptor'>" """ result = obj parts = name.split('.') for part in parts: result = getattr(result, part) return result
apache-2.0
nox/servo
tests/wpt/css-tests/tools/py/py/__init__.py
171
5872
""" py.test and pylib: rapid testing and development utils this module uses apipkg.py for lazy-loading sub modules and classes. The initpkg-dictionary below specifies name->value mappings where value can be another namespace dictionary or an import path. (c) Holger Krekel and others, 2004-2014 """ __version__ = '1.4.31' from py import _apipkg # so that py.error.* instances are picklable import sys sys.modules['py.error'] = _apipkg.AliasModule("py.error", "py._error", 'error') _apipkg.initpkg(__name__, attr={'_apipkg': _apipkg}, exportdefs={ # access to all standard lib modules 'std': '._std:std', # access to all posix errno's as classes 'error': '._error:error', '_pydir' : '.__metainfo:pydir', 'version': 'py:__version__', # backward compatibility # pytest-2.0 has a flat namespace, we use alias modules # to keep old references compatible 'test' : 'pytest', 'test.collect' : 'pytest', 'test.cmdline' : 'pytest', # hook into the top-level standard library 'process' : { '__doc__' : '._process:__doc__', 'cmdexec' : '._process.cmdexec:cmdexec', 'kill' : '._process.killproc:kill', 'ForkedFunc' : '._process.forkedfunc:ForkedFunc', }, 'apipkg' : { 'initpkg' : '._apipkg:initpkg', 'ApiModule' : '._apipkg:ApiModule', }, 'iniconfig' : { 'IniConfig' : '._iniconfig:IniConfig', 'ParseError' : '._iniconfig:ParseError', }, 'path' : { '__doc__' : '._path:__doc__', 'svnwc' : '._path.svnwc:SvnWCCommandPath', 'svnurl' : '._path.svnurl:SvnCommandPath', 'local' : '._path.local:LocalPath', 'SvnAuth' : '._path.svnwc:SvnAuth', }, # python inspection/code-generation API 'code' : { '__doc__' : '._code:__doc__', 'compile' : '._code.source:compile_', 'Source' : '._code.source:Source', 'Code' : '._code.code:Code', 'Frame' : '._code.code:Frame', 'ExceptionInfo' : '._code.code:ExceptionInfo', 'Traceback' : '._code.code:Traceback', 'getfslineno' : '._code.source:getfslineno', 'getrawcode' : '._code.code:getrawcode', 'patch_builtins' : '._code.code:patch_builtins', 'unpatch_builtins' : '._code.code:unpatch_builtins', '_AssertionError' : '._code.assertion:AssertionError', '_reinterpret_old' : '._code.assertion:reinterpret_old', '_reinterpret' : '._code.assertion:reinterpret', '_reprcompare' : '._code.assertion:_reprcompare', '_format_explanation' : '._code.assertion:_format_explanation', }, # backports and additions of builtins 'builtin' : { '__doc__' : '._builtin:__doc__', 'enumerate' : '._builtin:enumerate', 'reversed' : '._builtin:reversed', 'sorted' : '._builtin:sorted', 'any' : '._builtin:any', 'all' : '._builtin:all', 'set' : '._builtin:set', 'frozenset' : '._builtin:frozenset', 'BaseException' : '._builtin:BaseException', 'GeneratorExit' : '._builtin:GeneratorExit', '_sysex' : '._builtin:_sysex', 'print_' : '._builtin:print_', '_reraise' : '._builtin:_reraise', '_tryimport' : '._builtin:_tryimport', 'exec_' : '._builtin:exec_', '_basestring' : '._builtin:_basestring', '_totext' : '._builtin:_totext', '_isbytes' : '._builtin:_isbytes', '_istext' : '._builtin:_istext', '_getimself' : '._builtin:_getimself', '_getfuncdict' : '._builtin:_getfuncdict', '_getcode' : '._builtin:_getcode', 'builtins' : '._builtin:builtins', 'execfile' : '._builtin:execfile', 'callable' : '._builtin:callable', 'bytes' : '._builtin:bytes', 'text' : '._builtin:text', }, # input-output helping 'io' : { '__doc__' : '._io:__doc__', 'dupfile' : '._io.capture:dupfile', 'TextIO' : '._io.capture:TextIO', 'BytesIO' : '._io.capture:BytesIO', 'FDCapture' : '._io.capture:FDCapture', 'StdCapture' : '._io.capture:StdCapture', 'StdCaptureFD' : '._io.capture:StdCaptureFD', 'TerminalWriter' : '._io.terminalwriter:TerminalWriter', 'ansi_print' : '._io.terminalwriter:ansi_print', 'get_terminal_width' : '._io.terminalwriter:get_terminal_width', 'saferepr' : '._io.saferepr:saferepr', }, # small and mean xml/html generation 'xml' : { '__doc__' : '._xmlgen:__doc__', 'html' : '._xmlgen:html', 'Tag' : '._xmlgen:Tag', 'raw' : '._xmlgen:raw', 'Namespace' : '._xmlgen:Namespace', 'escape' : '._xmlgen:escape', }, 'log' : { # logging API ('producers' and 'consumers' connected via keywords) '__doc__' : '._log:__doc__', '_apiwarn' : '._log.warning:_apiwarn', 'Producer' : '._log.log:Producer', 'setconsumer' : '._log.log:setconsumer', '_setstate' : '._log.log:setstate', '_getstate' : '._log.log:getstate', 'Path' : '._log.log:Path', 'STDOUT' : '._log.log:STDOUT', 'STDERR' : '._log.log:STDERR', 'Syslog' : '._log.log:Syslog', }, })
mpl-2.0
tbeckham/calyptos
docs/conf.py
9
9299
# -*- coding: utf-8 -*- # # calyptos documentation build configuration file, created by # sphinx-quickstart on Mon May 11 15:52:37 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'calyptos' copyright = u'2015, Eucalyptus Quality Team' author = u'Eucalyptus Quality Team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.0.0' # The full version, including alpha/beta/rc tags. release = '1.0.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'euca-deploydoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'calyptos.tex', u'calyptos Documentation', u'Eucalyptus Quality Team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'calyptos', u'calyptos Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'calyptos', u'calyptos Documentation', author, 'calyptos', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
bsd-2-clause
SUSE-Cloud/nova
nova/cells/rpc_driver.py
11
11809
# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cells RPC Communication Driver """ import urllib import urlparse from oslo.config import cfg from nova.cells import driver from nova.openstack.common.gettextutils import _ from nova.openstack.common import rpc from nova.openstack.common.rpc import dispatcher as rpc_dispatcher from nova import rpcclient cell_rpc_driver_opts = [ cfg.StrOpt('rpc_driver_queue_base', default='cells.intercell', help="Base queue name to use when communicating between " "cells. Various topics by message type will be " "appended to this.")] CONF = cfg.CONF CONF.register_opts(cell_rpc_driver_opts, group='cells') CONF.import_opt('call_timeout', 'nova.cells.opts', group='cells') CONF.import_opt('rpc_backend', 'nova.openstack.common.rpc') rpcapi_cap_opt = cfg.StrOpt('intercell', help='Set a version cap for messages sent between cells services') CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels') _CELL_TO_CELL_RPC_API_VERSION = '1.0' class CellsRPCDriver(driver.BaseCellsDriver): """Driver for cell<->cell communication via RPC. This is used to setup the RPC consumers as well as to send a message to another cell. One instance of this class will be created for every neighbor cell that we find in the DB and it will be associated with the cell in its CellState. One instance is also created by the cells manager for setting up the consumers. """ BASE_RPC_API_VERSION = _CELL_TO_CELL_RPC_API_VERSION def __init__(self, *args, **kwargs): super(CellsRPCDriver, self).__init__(*args, **kwargs) self.rpc_connections = [] self.intercell_rpcapi = InterCellRPCAPI( self.BASE_RPC_API_VERSION) def _start_consumer(self, dispatcher, topic): """Start an RPC consumer.""" conn = rpc.create_connection(new=True) conn.create_consumer(topic, dispatcher, fanout=False) conn.create_consumer(topic, dispatcher, fanout=True) self.rpc_connections.append(conn) conn.consume_in_thread() return conn def start_consumers(self, msg_runner): """Start RPC consumers. Start up 2 separate consumers for handling inter-cell communication via RPC. Both handle the same types of messages, but requests/replies are separated to solve potential deadlocks. (If we used the same queue for both, it's possible to exhaust the RPC thread pool while we wait for replies.. such that we'd never consume a reply.) """ topic_base = CONF.cells.rpc_driver_queue_base proxy_manager = InterCellRPCDispatcher(msg_runner) # NOTE(comstud): We do not need to use the object serializer # on this because object serialization is taken care for us in # the messaging module. dispatcher = rpc_dispatcher.RpcDispatcher([proxy_manager]) for msg_type in msg_runner.get_message_types(): topic = '%s.%s' % (topic_base, msg_type) self._start_consumer(dispatcher, topic) def stop_consumers(self): """Stop RPC consumers. NOTE: Currently there's no hooks when stopping services to have managers cleanup, so this is not currently called. """ for conn in self.rpc_connections: conn.close() def send_message_to_cell(self, cell_state, message): """Use the IntercellRPCAPI to send a message to a cell.""" self.intercell_rpcapi.send_message_to_cell(cell_state, message) class InterCellRPCAPI(rpcclient.RpcProxy): """Client side of the Cell<->Cell RPC API. The CellsRPCDriver uses this to make calls to another cell. API version history: 1.0 - Initial version. ... Grizzly supports message version 1.0. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 1.0. """ VERSION_ALIASES = { 'grizzly': '1.0', } def __init__(self, default_version): version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.intercell, CONF.upgrade_levels.intercell) super(InterCellRPCAPI, self).__init__(None, default_version, version_cap=version_cap) def _get_client(self, next_hop, topic): server_params = self._get_server_params_for_cell(next_hop) cctxt = self.get_client(server_params=server_params) return cctxt.prepare(topic=topic) @staticmethod def _get_server_params_for_cell(next_hop): """Turn the DB information for a cell into the parameters needed for the RPC call. """ server_params = parse_transport_url(next_hop.db_info['transport_url']) return dict((k, v) for k, v in server_params.items() if v) def send_message_to_cell(self, cell_state, message): """Send a message to another cell by JSON-ifying the message and making an RPC cast to 'process_message'. If the message says to fanout, do it. The topic that is used will be 'CONF.rpc_driver_queue_base.<message_type>'. """ topic_base = CONF.cells.rpc_driver_queue_base topic = '%s.%s' % (topic_base, message.message_type) cctxt = self._get_client(cell_state, topic) if message.fanout: cctxt = cctxt.prepare(fanout=message.fanout) return cctxt.cast(message.ctxt, 'process_message', message=message.to_json()) class InterCellRPCDispatcher(object): """RPC Dispatcher to handle messages received from other cells. All messages received here have come from a sibling cell. Depending on the ultimate target and type of message, we may process the message in this cell, relay the message to another sibling cell, or both. This logic is defined by the message class in the messaging module. """ BASE_RPC_API_VERSION = _CELL_TO_CELL_RPC_API_VERSION def __init__(self, msg_runner): """Init the Intercell RPC Dispatcher.""" self.msg_runner = msg_runner def process_message(self, _ctxt, message): """We received a message from another cell. Use the MessageRunner to turn this from JSON back into an instance of the correct Message class. Then process it! """ message = self.msg_runner.message_from_json(message) message.process() def parse_transport_url(url): """ Parse a transport URL. :param url: The transport URL. :returns: A dictionary of 5 elements: the "username", the "password", the "hostname", the "port" (as an integer), and the "virtual_host" for the requested transport. """ # TODO(Vek): Use the actual Oslo code, once it lands in # oslo-incubator # First step is to parse the URL parsed = urlparse.urlparse(url or '') # Make sure we understand the scheme if parsed.scheme not in ('rabbit', 'qpid'): raise ValueError(_("Unable to handle transport URL scheme %s") % parsed.scheme) # Make sure there's not a query string; that could identify # requirements we can't comply with (e.g., ssl), so reject it if # it's present if '?' in parsed.path or parsed.query: raise ValueError(_("Cannot comply with query string in transport URL")) # Extract the interesting information from the URL; this requires # dequoting values, and ensuring empty values become None username = urllib.unquote(parsed.username) if parsed.username else None password = urllib.unquote(parsed.password) if parsed.password else None virtual_host = urllib.unquote(parsed.path[1:]) or None # Now we have to extract the hostname and port; unfortunately, # urlparse in Python 2.6 doesn't understand IPv6 addresses hostname = parsed.hostname if hostname and hostname[0] == '[': # If '@' is present, rfind() finds its position; if it isn't, # rfind() returns -1. Either way, adding 1 gives us the start # location of the host and port... host_start = parsed.netloc.rfind('@') netloc = parsed.netloc[host_start + 1:] # Find the closing ']' and extract the hostname host_end = netloc.find(']') if host_end < 0: # NOTE(Vek): Not translated so it's identical to what # Python 2.7's urlparse.urlparse() raises in this case raise ValueError("Invalid IPv6 URL") hostname = netloc[1:host_end] # Now we need the port; this is compliant with how urlparse # parses the port data port_text = netloc[host_end:] port = None if ':' in port_text: port = int(port_text.split(':', 1)[1]) else: port = parsed.port # Now that we have what we need, return the information return { 'username': username, 'password': password, 'hostname': hostname, 'port': port, 'virtual_host': virtual_host, } def unparse_transport_url(transport, secure=True): """ Unparse a transport URL; that is, synthesize a transport URL from a dictionary similar to that one returned by parse_transport_url(). :param transport: The dictionary containing the transport URL components. :param secure: Used to identify whether the transport URL is wanted for a secure or insecure link. If True--indicating a secure link--the password will be included; otherwise, it won't. :returns: The transport URL. """ # Starting place for the network location netloc = '' # Extract all the data we need from the dictionary username = transport.get('username') if secure: password = transport.get('password') else: password = None hostname = transport.get('hostname') port = transport.get('port') virtual_host = transport.get('virtual_host') # Build the username and password portion of the transport URL if username or password: if username: netloc += urllib.quote(username, '') if password: netloc += ':%s' % urllib.quote(password, '') netloc += '@' # Build the network location portion of the transport URL if hostname: if ':' in hostname: # Encode an IPv6 address properly netloc += "[%s]" % hostname else: netloc += hostname if port is not None: netloc += ":%d" % port # Determine the scheme # NOTE(Vek): This isn't really that robust, but should be more # than sufficient to carry us on until the more # complete transition to transport URLs can be # accomplished. if CONF.rpc_backend.endswith('qpid'): scheme = 'qpid' else: scheme = 'rabbit' # Assemble the transport URL url = "%s://%s/" % (scheme, netloc) if virtual_host: url += urllib.quote(virtual_host, '') return url
apache-2.0
JVillella/tensorflow
tensorflow/python/feature_column/feature_column.py
13
97953
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """This API defines FeatureColumn abstraction. FeatureColumns provide a high level abstraction for ingesting and representing features. FeatureColumns are also the primary way of encoding features for canned ${tf.estimator.Estimator}s. When using FeatureColumns with `Estimators`, the type of feature column you should choose depends on (1) the feature type and (2) the model type. 1. Feature type: * Continuous features can be represented by `numeric_column`. * Categorical features can be represented by any `categorical_column_with_*` column: - `categorical_column_with_vocabulary_list` - `categorical_column_with_vocabulary_file` - `categorical_column_with_hash_bucket` - `categorical_column_with_identity` - `weighted_categorical_column` 2. Model type: * Deep neural network models (`DNNClassifier`, `DNNRegressor`). Continuous features can be directly fed into deep neural network models. age_column = numeric_column("age") To feed sparse features into DNN models, wrap the column with `embedding_column` or `indicator_column`. `indicator_column` is recommended for features with only a few possible values. For features with many possible values, to reduce the size of your model, `embedding_column` is recommended. embedded_dept_column = embedding_column( categorical_column_with_vocabulary_list( "department", ["math", "philosphy", ...]), dimension=10) * Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`). Sparse features can be fed directly into linear models. They behave like an indicator column but with an efficient implementation. dept_column = categorical_column_with_vocabulary_list("department", ["math", "philosophy", "english"]) It is recommended that continuous features be bucketized before being fed into linear models. bucketized_age_column = bucketized_column( source_column=age_column, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) Sparse features can be crossed (also known as conjuncted or combined) in order to form non-linearities, and then fed into linear models. cross_dept_age_column = crossed_column( columns=["department", bucketized_age_column], hash_bucket_size=1000) Example of building canned `Estimator`s using FeatureColumns: ```python # Define features and transformations deep_feature_columns = [age_column, embedded_dept_column] wide_feature_columns = [dept_column, bucketized_age_column, cross_dept_age_column] # Build deep model estimator = DNNClassifier( feature_columns=deep_feature_columns, hidden_units=[500, 250, 50]) estimator.train(...) # Or build a wide model estimator = LinearClassifier( feature_columns=wide_feature_columns) estimator.train(...) # Or build a wide and deep model! estimator = DNNLinearCombinedClassifier( linear_feature_columns=wide_feature_columns, dnn_feature_columns=deep_feature_columns, dnn_hidden_units=[500, 250, 50]) estimator.train(...) ``` FeatureColumns can also be transformed into a generic input layer for custom models using `input_layer`. Example of building model using FeatureColumns, this can be used in a `model_fn` which is given to the {tf.estimator.Estimator}: ```python # Building model via layers deep_feature_columns = [age_column, embedded_dept_column] columns_to_tensor = parse_feature_columns_from_examples( serialized=my_data, feature_columns=deep_feature_columns) first_layer = input_layer( features=columns_to_tensor, feature_columns=deep_feature_columns) second_layer = fully_connected(first_layer, ...) ``` NOTE: Functions prefixed with "_" indicate experimental or private parts of the API subject to change, and should not be relied upon! """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections import math import numpy as np import six from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import string_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import checkpoint_utils from tensorflow.python.util import nest def input_layer(features, feature_columns, weight_collections=None, trainable=True): """Returns a dense `Tensor` as input layer based on given `feature_columns`. Generally a single example in training data is described with FeatureColumns. At the first layer of the model, this column oriented data should be converted to a single `Tensor`. Example: ```python price = numeric_column('price') keywords_embedded = embedding_column( categorical_column_with_hash_bucket("keywords", 10K), dimensions=16) columns = [price, keywords_embedded, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) for units in [128, 64, 32]: dense_tensor = tf.layers.dense(dense_tensor, units, tf.nn.relu) prediction = tf.layers.dense(dense_tensor, 1) ``` Args: features: A mapping from key to tensors. `_FeatureColumn`s look up via these keys. For example `numeric_column('price')` will look at 'price' key in this dict. Values can be a `SparseTensor` or a `Tensor` depends on corresponding `_FeatureColumn`. feature_columns: An iterable containing the FeatureColumns to use as inputs to your model. All items should be instances of classes derived from `_DenseColumn` such as `numeric_column`, `embedding_column`, `bucketized_column`, `indicator_column`. If you have categorical features, you can wrap them with an `embedding_column` or `indicator_column`. weight_collections: A list of collection names to which the Variable will be added. Note that, variables will also be added to collections `tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). Returns: A `Tensor` which represents input layer of a model. Its shape is (batch_size, first_layer_dimension) and its dtype is `float32`. first_layer_dimension is determined based on given `feature_columns`. Raises: ValueError: if an item in `feature_columns` is not a `_DenseColumn`. """ _check_feature_columns(feature_columns) for column in feature_columns: if not isinstance(column, _DenseColumn): raise ValueError( 'Items of feature_columns must be a _DenseColumn. ' 'You can wrap a categorical column with an ' 'embedding_column or indicator_column. Given: {}'.format(column)) weight_collections = list(weight_collections or []) if ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections: weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES) if ops.GraphKeys.MODEL_VARIABLES not in weight_collections: weight_collections.append(ops.GraphKeys.MODEL_VARIABLES) with variable_scope.variable_scope( None, default_name='input_layer', values=features.values()): builder = _LazyBuilder(features) output_tensors = [] ordered_columns = [] for column in sorted(feature_columns, key=lambda x: x.name): ordered_columns.append(column) with variable_scope.variable_scope(None, default_name=column.name): tensor = column._get_dense_tensor( # pylint: disable=protected-access builder, weight_collections=weight_collections, trainable=trainable) num_elements = column._variable_shape.num_elements() # pylint: disable=protected-access batch_size = array_ops.shape(tensor)[0] tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements)) output_tensors.append(tensor) _verify_static_batch_size_equality(output_tensors, ordered_columns) return array_ops.concat(output_tensors, 1) def linear_model(features, feature_columns, units=1, sparse_combiner='sum', weight_collections=None, trainable=True): """Returns a linear prediction `Tensor` based on given `feature_columns`. This function generates a weighted sum based on output dimension `units`. Weighted sum refers to logits in classification problems. It refers to the prediction itself for linear regression problems. Note on supported columns: `linear_model` treats categorical columns as `indicator_column`s while `input_layer` explicitly requires wrapping each of them with an `embedding_column` or an `indicator_column`. Example: ```python price = numeric_column('price') price_buckets = bucketized_column(price, boundaries=[0., 10., 100., 1000.]) keywords = categorical_column_with_hash_bucket("keywords", 10K) keywords_price = crossed_column('keywords', price_buckets, ...) columns = [price_buckets, keywords, keywords_price ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) prediction = linear_model(features, columns) ``` Args: features: A mapping from key to tensors. `_FeatureColumn`s look up via these keys. For example `numeric_column('price')` will look at 'price' key in this dict. Values are `Tensor` or `SparseTensor` depending on corresponding `_FeatureColumn`. feature_columns: An iterable containing the FeatureColumns to use as inputs to your model. All items should be instances of classes derived from `_FeatureColumn`s. units: An integer, dimensionality of the output space. Default value is 1. sparse_combiner: A string specifying how to reduce if a sparse column is multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum" the default. "sqrtn" often achieves good accuracy, in particular with bag-of-words columns. It combines each sparse columns independently. * "sum": do not normalize features in the column * "mean": do l1 normalization on features in the column * "sqrtn": do l2 normalization on features in the column weight_collections: A list of collection names to which the Variable will be added. Note that, variables will also be added to collections `tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). Returns: A `Tensor` which represents predictions/logits of a linear model. Its shape is (batch_size, units) and its dtype is `float32`. Raises: ValueError: if an item in `feature_columns` is neither a `_DenseColumn` nor `_CategoricalColumn`. """ _check_feature_columns(feature_columns) for column in feature_columns: if not isinstance(column, (_DenseColumn, _CategoricalColumn)): raise ValueError('Items of feature_columns must be either a _DenseColumn ' 'or _CategoricalColumn. Given: {}'.format(column)) weight_collections = list(weight_collections or []) if ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections: weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES) if ops.GraphKeys.MODEL_VARIABLES not in weight_collections: weight_collections.append(ops.GraphKeys.MODEL_VARIABLES) with variable_scope.variable_scope( None, default_name='linear_model', values=features.values()): weighted_sums = [] ordered_columns = [] builder = _LazyBuilder(features) for column in sorted(feature_columns, key=lambda x: x.name): with variable_scope.variable_scope(None, default_name=column.name): ordered_columns.append(column) if isinstance(column, _CategoricalColumn): weighted_sums.append(_create_categorical_column_weighted_sum( column, builder, units, sparse_combiner, weight_collections, trainable)) else: weighted_sums.append(_create_dense_column_weighted_sum( column, builder, units, weight_collections, trainable)) _verify_static_batch_size_equality(weighted_sums, ordered_columns) predictions_no_bias = math_ops.add_n( weighted_sums, name='weighted_sum_no_bias') bias = variable_scope.get_variable( 'bias_weights', shape=[units], initializer=init_ops.zeros_initializer(), trainable=trainable, collections=weight_collections) predictions = nn_ops.bias_add( predictions_no_bias, bias, name='weighted_sum') return predictions def _transform_features(features, feature_columns): """Returns transformed features based on features columns passed in. Please note that most probably you would not need to use this function. Please check `input_layer` and `linear_model` to see whether they will satisfy your use case or not. Example: ```python # Define features and transformations crosses_a_x_b = crossed_column( columns=["sparse_feature_a", "sparse_feature_b"], hash_bucket_size=10000) price_buckets = bucketized_column( source_column=numeric_column("price"), boundaries=[...]) columns = [crosses_a_x_b, price_buckets] features = tf.parse_example(..., features=make_parse_example_spec(columns)) transformed = transform_features(features=features, feature_columns=columns) assertCountEqual(columns, transformed.keys()) ``` Args: features: A mapping from key to tensors. `_FeatureColumn`s look up via these keys. For example `numeric_column('price')` will look at 'price' key in this dict. Values can be a `SparseTensor` or a `Tensor` depends on corresponding `_FeatureColumn`. feature_columns: An iterable containing all the `_FeatureColumn`s. Returns: A `dict` mapping `_FeatureColumn` to `Tensor` and `SparseTensor` values. """ _check_feature_columns(feature_columns) outputs = {} with ops.name_scope( None, default_name='transform_features', values=features.values()): builder = _LazyBuilder(features) for column in sorted(feature_columns, key=lambda x: x.name): with ops.name_scope(None, default_name=column.name): outputs[column] = builder.get(column) return outputs def make_parse_example_spec(feature_columns): """Creates parsing spec dictionary from input feature_columns. The returned dictionary can be used as arg 'features' in `tf.parse_example`. Typical usage example: ```python # Define features and transformations feature_b = numeric_column(...) feature_c_bucketized = bucketized_column(numeric_column("feature_c"), ...) feature_a_x_feature_c = crossed_column( columns=["feature_a", feature_c_bucketized], ...) feature_columns = set( [feature_b, feature_c_bucketized, feature_a_x_feature_c]) features = tf.parse_example( serialized=serialized_examples, features=make_parse_example_spec(feature_columns)) ``` For the above example, make_parse_example_spec would return the dict: ```python { "feature_a": parsing_ops.VarLenFeature(tf.string), "feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32), "feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32) } ``` Args: feature_columns: An iterable containing all feature columns. All items should be instances of classes derived from `_FeatureColumn`. Returns: A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature` value. Raises: ValueError: If any of the given `feature_columns` is not a `_FeatureColumn` instance. """ result = {} for column in feature_columns: if not isinstance(column, _FeatureColumn): raise ValueError( 'All feature_columns must be _FeatureColumn instances. ' 'Given: {}'.format(column)) config = column._parse_example_spec # pylint: disable=protected-access for key, value in six.iteritems(config): if key in result and value != result[key]: raise ValueError( 'feature_columns contain different parse_spec for key ' '{}. Given {} and {}'.format(key, value, result[key])) result.update(config) return result def embedding_column( categorical_column, dimension, combiner='mean', initializer=None, ckpt_to_load_from=None, tensor_name_in_ckpt=None, max_norm=None, trainable=True): """`_DenseColumn` that converts from sparse, categorical input. Use this when your inputs are sparse, but you want to convert them to a dense representation (e.g., to feed to a DNN). Inputs must be a `_CategoricalColumn` created by any of the `categorical_column_*` function. Here is an example embedding of an identity column for a DNN model: ```python video_id = categorical_column_with_identity( key='video_id', num_buckets=1000000, default_value=0) columns = [embedding_column(video_id, 9),...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: categorical_column: A `_CategoricalColumn` created by a `categorical_column_with_*` function. This column produces the sparse IDs that are inputs to the embedding lookup. dimension: An integer specifying dimension of the embedding, must be > 0. combiner: A string specifying how to reduce if there are multiple entries in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with 'mean' the default. 'sqrtn' often achieves good accuracy, in particular with bag-of-words columns. Each of this can be thought as example level normalizations on the column. For more information, see `tf.embedding_lookup_sparse`. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to `tf.truncated_normal_initializer` with mean `0.0` and standard deviation `1/sqrt(dimension)`. ckpt_to_load_from: String representing checkpoint name/pattern from which to restore column weights. Required if `tensor_name_in_ckpt` is not `None`. tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which to restore the column weights. Required if `ckpt_to_load_from` is not `None`. max_norm: If not `None`, embedding values are l2-normalized to this value. trainable: Whether or not the embedding is trainable. Default is True. Returns: `_DenseColumn` that converts from sparse input. Raises: ValueError: if `dimension` not > 0. ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified. ValueError: if `initializer` is specified and is not callable. """ if (dimension is None) or (dimension < 1): raise ValueError('Invalid dimension {}.'.format(dimension)) if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None): raise ValueError('Must specify both `ckpt_to_load_from` and ' '`tensor_name_in_ckpt` or none of them.') if (initializer is not None) and (not callable(initializer)): raise ValueError('initializer must be callable if specified. ' 'Embedding of column_name: {}'.format( categorical_column.name)) if initializer is None: initializer = init_ops.truncated_normal_initializer( mean=0.0, stddev=1 / math.sqrt(dimension)) return _EmbeddingColumn( categorical_column=categorical_column, dimension=dimension, combiner=combiner, initializer=initializer, ckpt_to_load_from=ckpt_to_load_from, tensor_name_in_ckpt=tensor_name_in_ckpt, max_norm=max_norm, trainable=trainable) def numeric_column(key, shape=(1,), default_value=None, dtype=dtypes.float32, normalizer_fn=None): """Represents real valued or numerical features. Example: ```python price = numeric_column('price') columns = [price, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) # or bucketized_price = bucketized_column(price, boundaries=[...]) columns = [bucketized_price, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. shape: An iterable of integers specifies the shape of the `Tensor`. An integer can be given which means a single dimension `Tensor` with given width. The `Tensor` representing the column will have the shape of [batch_size] + `shape`. default_value: A single value compatible with `dtype` or an iterable of values compatible with `dtype` which the column takes on during `tf.Example` parsing if data is missing. A default value of `None` will cause `tf.parse_example` to fail if an example does not contain this column. If a single value is provided, the same value will be applied as the default value for every item. If an iterable of values is provided, the shape of the `default_value` should be equal to the given `shape`. dtype: defines the type of values. Default value is `tf.float32`. Must be a non-quantized, real integer or floating point type. normalizer_fn: If not `None`, a function that can be used to normalize the value of the tensor after `default_value` is applied for parsing. Normalizer function takes the input `Tensor` as its argument, and returns the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that even though the most common use case of this function is normalization, it can be used for any kind of Tensorflow transformations. Returns: A `_NumericColumn`. Raises: TypeError: if any dimension in shape is not an int ValueError: if any dimension in shape is not a positive integer TypeError: if `default_value` is an iterable but not compatible with `shape` TypeError: if `default_value` is not compatible with `dtype`. ValueError: if `dtype` is not convertible to `tf.float32`. """ shape = _check_shape(shape, key) if not (dtype.is_integer or dtype.is_floating): raise ValueError('dtype must be convertible to float. ' 'dtype: {}, key: {}'.format(dtype, key)) default_value = _check_default_value(shape, default_value, dtype, key) if normalizer_fn is not None and not callable(normalizer_fn): raise TypeError( 'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn)) return _NumericColumn( key, shape=shape, default_value=default_value, dtype=dtype, normalizer_fn=normalizer_fn) def bucketized_column(source_column, boundaries): """Represents discretized dense input. Buckets include the left boundary, and exclude the right boundary. Namely, `boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`, `[1., 2.)`, and `[2., +inf)`. For example, if the inputs are ```python boundaries = [0, 10, 100] input tensor = [[-5, 10000] [150, 10] [5, 100]] ``` then the output will be ```python output = [[0, 3] [3, 2] [1, 3]] ``` Example: ```python price = numeric_column('price') bucketized_price = bucketized_column(price, boundaries=[...]) columns = [bucketized_price, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) # or columns = [bucketized_price, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` `bucketized_column` can also be crossed with another categorical column using `crossed_column`: ```python price = numeric_column('price') # bucketized_column converts numerical feature to a categorical one. bucketized_price = bucketized_column(price, boundaries=[...]) # 'keywords' is a string feature. price_x_keywords = crossed_column([bucketized_price, 'keywords'], 50K) columns = [price_x_keywords, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` Args: source_column: A one-dimensional dense column which is generated with `numeric_column`. boundaries: A sorted list or tuple of floats specifying the boundaries. Returns: A `_BucketizedColumn`. Raises: ValueError: If `source_column` is not a numeric column, or if it is not one-dimensional. ValueError: If `boundaries` is not a sorted list or tuple. """ if not isinstance(source_column, _NumericColumn): raise ValueError( 'source_column must be a column generated with numeric_column(). ' 'Given: {}'.format(source_column)) if len(source_column.shape) > 1: raise ValueError( 'source_column must be one-dimensional column. ' 'Given: {}'.format(source_column)) if (not boundaries or not (isinstance(boundaries, list) or isinstance(boundaries, tuple))): raise ValueError('boundaries must be a sorted list.') for i in range(len(boundaries) - 1): if boundaries[i] >= boundaries[i + 1]: raise ValueError('boundaries must be a sorted list.') return _BucketizedColumn(source_column, tuple(boundaries)) def _assert_string_or_int(dtype, prefix): if (dtype != dtypes.string) and (not dtype.is_integer): raise ValueError( '{} dtype must be string or integer. dtype: {}.'.format(prefix, dtype)) def categorical_column_with_hash_bucket(key, hash_bucket_size, dtype=dtypes.string): """Represents sparse feature where ids are set by hashing. Use this when your sparse features are in string or integer format, and you want to distribute your inputs into a finite number of buckets by hashing. output_id = Hash(input_feature_string) % bucket_size For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string. Note that these values are independent of the `default_value` argument. Example: ```python keywords = categorical_column_with_hash_bucket("keywords", 10K) columns = [keywords, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) # or keywords_embedded = embedding_column(keywords, 16) columns = [keywords_embedded, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. hash_bucket_size: An int > 1. The number of buckets. dtype: The type of features. Only string and integer types are supported. Returns: A `_HashedCategoricalColumn`. Raises: ValueError: `hash_bucket_size` is not greater than 1. ValueError: `dtype` is neither string nor integer. """ if hash_bucket_size is None: raise ValueError('hash_bucket_size must be set. ' 'key: {}'.format(key)) if hash_bucket_size < 1: raise ValueError('hash_bucket_size must be at least 1. ' 'hash_bucket_size: {}, key: {}'.format( hash_bucket_size, key)) _assert_string_or_int(dtype, prefix='column_name: {}'.format(key)) return _HashedCategoricalColumn(key, hash_bucket_size, dtype) def categorical_column_with_vocabulary_file( key, vocabulary_file, vocabulary_size, num_oov_buckets=0, default_value=None, dtype=dtypes.string): """A `_CategoricalColumn` with a vocabulary file. Use this when your inputs are in string or integer format, and you have a vocabulary file that maps each value to an integer ID. By default, out-of-vocabulary values are ignored. Use either (but not both) of `num_oov_buckets` and `default_value` to specify how to include out-of-vocabulary values. For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string. Note that these values are independent of the `default_value` argument. Example with `num_oov_buckets`: File '/us/states.txt' contains 50 lines, each with a 2-character U.S. state abbreviation. All inputs with values in that file are assigned an ID 0-49, corresponding to its line number. All other values are hashed and assigned an ID 50-54. ```python states = categorical_column_with_vocabulary_file( key='states', vocabulary_file='/us/states.txt', vocabulary_size=50, num_oov_buckets=5) columns = [states, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` Example with `default_value`: File '/us/states.txt' contains 51 lines - the first line is 'XX', and the other 50 each have a 2-character U.S. state abbreviation. Both a literal 'XX' in input, and other values missing from the file, will be assigned ID 0. All others are assigned the corresponding line number 1-50. ```python states = categorical_column_with_vocabulary_file( key='states', vocabulary_file='/us/states.txt', vocabulary_size=51, default_value=0) columns = [states, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` And to make an embedding with either: ```python columns = [embedding_column(states, 3),...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. vocabulary_file: The vocabulary file name. vocabulary_size: Number of the elements in the vocabulary. This must be no greater than length of `vocabulary_file`, if less than length, later values are ignored. num_oov_buckets: Non-negative integer, the number of out-of-vocabulary buckets. All out-of-vocabulary inputs will be assigned IDs in the range `[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of the input value. A positive `num_oov_buckets` can not be specified with `default_value`. default_value: The integer ID value to return for out-of-vocabulary feature values, defaults to `-1`. This can not be specified with a positive `num_oov_buckets`. dtype: The type of features. Only string and integer types are supported. Returns: A `_CategoricalColumn` with a vocabulary file. Raises: ValueError: `vocabulary_file` is missing. ValueError: `vocabulary_size` is missing or < 1. ValueError: `num_oov_buckets` is a negative integer. ValueError: `num_oov_buckets` and `default_value` are both specified. ValueError: `dtype` is neither string nor integer. """ if not vocabulary_file: raise ValueError('Missing vocabulary_file in {}.'.format(key)) # `vocabulary_size` isn't required for lookup, but it is for `_num_buckets`. if (vocabulary_size is None) or (vocabulary_size < 1): raise ValueError('Invalid vocabulary_size in {}.'.format(key)) if num_oov_buckets: if default_value is not None: raise ValueError( 'Can\'t specify both num_oov_buckets and default_value in {}.'.format( key)) if num_oov_buckets < 0: raise ValueError('Invalid num_oov_buckets {} in {}.'.format( num_oov_buckets, key)) _assert_string_or_int(dtype, prefix='column_name: {}'.format(key)) return _VocabularyFileCategoricalColumn( key=key, vocabulary_file=vocabulary_file, vocabulary_size=vocabulary_size, num_oov_buckets=0 if num_oov_buckets is None else num_oov_buckets, default_value=-1 if default_value is None else default_value, dtype=dtype) def categorical_column_with_vocabulary_list( key, vocabulary_list, dtype=None, default_value=-1, num_oov_buckets=0): """A `_CategoricalColumn` with in-memory vocabulary. Use this when your inputs are in string or integer format, and you have an in-memory vocabulary mapping each value to an integer ID. By default, out-of-vocabulary values are ignored. Use either (but not both) of `num_oov_buckets` and `default_value` to specify how to include out-of-vocabulary values. For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string. Note that these values are independent of the `default_value` argument. Example with `num_oov_buckets`: In the following example, each input in `vocabulary_list` is assigned an ID 0-3 corresponding to its index (e.g., input 'B' produces output 2). All other inputs are hashed and assigned an ID 4-5. ```python colors = categorical_column_with_vocabulary_list( key='colors', vocabulary_list=('R', 'G', 'B', 'Y'), num_oov_buckets=2) columns = [colors, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` Example with `default_value`: In the following example, each input in `vocabulary_list` is assigned an ID 0-4 corresponding to its index (e.g., input 'B' produces output 3). All other inputs are assigned `default_value` 0. ```python colors = categorical_column_with_vocabulary_list( key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0) columns = [colors, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` And to make an embedding with either: ```python columns = [embedding_column(colors, 3),...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. vocabulary_list: An ordered iterable defining the vocabulary. Each feature is mapped to the index of its value (if present) in `vocabulary_list`. Must be castable to `dtype`. dtype: The type of features. Only string and integer types are supported. If `None`, it will be inferred from `vocabulary_list`. default_value: The integer ID value to return for out-of-vocabulary feature values, defaults to `-1`. This can not be specified with a positive `num_oov_buckets`. num_oov_buckets: Non-negative integer, the number of out-of-vocabulary buckets. All out-of-vocabulary inputs will be assigned IDs in the range `[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a hash of the input value. A positive `num_oov_buckets` can not be specified with `default_value`. Returns: A `_CategoricalColumn` with in-memory vocabulary. Raises: ValueError: if `vocabulary_list` is empty, or contains duplicate keys. ValueError: `num_oov_buckets` is a negative integer. ValueError: `num_oov_buckets` and `default_value` are both specified. ValueError: if `dtype` is not integer or string. """ if (vocabulary_list is None) or (len(vocabulary_list) < 1): raise ValueError( 'vocabulary_list {} must be non-empty, column_name: {}'.format( vocabulary_list, key)) if len(set(vocabulary_list)) != len(vocabulary_list): raise ValueError( 'Duplicate keys in vocabulary_list {}, column_name: {}'.format( vocabulary_list, key)) vocabulary_dtype = dtypes.as_dtype(np.array(vocabulary_list).dtype) if num_oov_buckets: if default_value != -1: raise ValueError( 'Can\'t specify both num_oov_buckets and default_value in {}.'.format( key)) if num_oov_buckets < 0: raise ValueError('Invalid num_oov_buckets {} in {}.'.format( num_oov_buckets, key)) _assert_string_or_int( vocabulary_dtype, prefix='column_name: {} vocabulary'.format(key)) if dtype is None: dtype = vocabulary_dtype elif dtype.is_integer != vocabulary_dtype.is_integer: raise ValueError( 'dtype {} and vocabulary dtype {} do not match, column_name: {}'.format( dtype, vocabulary_dtype, key)) _assert_string_or_int(dtype, prefix='column_name: {}'.format(key)) return _VocabularyListCategoricalColumn( key=key, vocabulary_list=tuple(vocabulary_list), dtype=dtype, default_value=default_value, num_oov_buckets=num_oov_buckets) def categorical_column_with_identity(key, num_buckets, default_value=None): """A `_CategoricalColumn` that returns identity values. Use this when your inputs are integers in the range `[0, num_buckets)`, and you want to use the input value itself as the categorical ID. Values outside this range will result in `default_value` if specified, otherwise it will fail. Typically, this is used for contiguous ranges of integer indexes, but it doesn't have to be. This might be inefficient, however, if many of IDs are unused. Consider `categorical_column_with_hash_bucket` in that case. For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string. Note that these values are independent of the `default_value` argument. In the following examples, each input in the range `[0, 1000000)` is assigned the same value. All other inputs are assigned `default_value` 0. Note that a literal 0 in inputs will result in the same default ID. Linear model: ```python video_id = categorical_column_with_identity( key='video_id', num_buckets=1000000, default_value=0) columns = [video_id, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` Embedding for a DNN model: ```python columns = [embedding_column(video_id, 9),...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. num_buckets: Range of inputs and outputs is `[0, num_buckets)`. default_value: If `None`, this column's graph operations will fail for out-of-range inputs. Otherwise, this value must be in the range `[0, num_buckets)`, and will replace inputs in that range. Returns: A `_CategoricalColumn` that returns identity values. Raises: ValueError: if `num_buckets` is less than one. ValueError: if `default_value` is not in range `[0, num_buckets)`. """ if num_buckets < 1: raise ValueError( 'num_buckets {} < 1, column_name {}'.format(num_buckets, key)) if (default_value is not None) and ( (default_value < 0) or (default_value >= num_buckets)): raise ValueError( 'default_value {} not in range [0, {}), column_name {}'.format( default_value, num_buckets, key)) return _IdentityCategoricalColumn( key=key, num_buckets=num_buckets, default_value=default_value) def indicator_column(categorical_column): """Represents multi-hot representation of given categorical column. Used to wrap any `categorical_column_*` (e.g., to feed to DNN). Use `embedding_column` if the inputs are sparse. ```python name = indicator_column(categorical_column_with_vocabulary_list( 'name', ['bob', 'george', 'wanda']) columns = [name, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) dense_tensor == [[1, 0, 0]] # If "name" bytes_list is ["bob"] dense_tensor == [[1, 0, 1]] # If "name" bytes_list is ["bob", "wanda"] dense_tensor == [[2, 0, 0]] # If "name" bytes_list is ["bob", "bob"] ``` Args: categorical_column: A `_CategoricalColumn` which is created by `categorical_column_with_*` or `crossed_column` functions. Returns: An `_IndicatorColumn`. """ return _IndicatorColumn(categorical_column) def weighted_categorical_column( categorical_column, weight_feature_key, dtype=dtypes.float32): """Applies weight values to a `_CategoricalColumn`. Use this when each of your sparse inputs has both an ID and a value. For example, if you're representing text documents as a collection of word frequencies, you can provide 2 parallel sparse input features ('terms' and 'frequencies' below). Example: Input `tf.Example` objects: ```proto [ features { feature { key: "terms" value {bytes_list {value: "very" value: "model"}} } feature { key: "frequencies" value {float_list {value: 0.3 value: 0.1}} } }, features { feature { key: "terms" value {bytes_list {value: "when" value: "course" value: "human"}} } feature { key: "frequencies" value {float_list {value: 0.4 value: 0.1 value: 0.2}} } } ] ``` ```python categorical_column = categorical_column_with_hash_bucket( column_name='terms', hash_bucket_size=1000) weighted_column = weighted_categorical_column( categorical_column=categorical_column, weight_feature_key='frequencies') columns = [weighted_column, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` This assumes the input dictionary contains a `SparseTensor` for key 'terms', and a `SparseTensor` for key 'frequencies'. These 2 tensors must have the same indices and dense shape. Args: categorical_column: A `_CategoricalColumn` created by `categorical_column_with_*` functions. weight_feature_key: String key for weight values. dtype: Type of weights, such as `tf.float32`. Only float and integer weights are supported. Returns: A `_CategoricalColumn` composed of two sparse features: one represents id, the other represents weight (value) of the id feature in that example. Raises: ValueError: if `dtype` is not convertible to float. """ if (dtype is None) or not (dtype.is_integer or dtype.is_floating): raise ValueError('dtype {} is not convertible to float.'.format(dtype)) return _WeightedCategoricalColumn( categorical_column=categorical_column, weight_feature_key=weight_feature_key, dtype=dtype) def crossed_column(keys, hash_bucket_size, hash_key=None): """Returns a column for performing crosses of categorical features. Crossed features will be hashed according to `hash_bucket_size`. Conceptually, the transformation can be thought of as: Hash(cartesian product of features) % `hash_bucket_size` For example, if the input features are: * SparseTensor referred by first key: ```python shape = [2, 2] { [0, 0]: "a" [1, 0]: "b" [1, 1]: "c" } ``` * SparseTensor referred by second key: ```python shape = [2, 1] { [0, 0]: "d" [1, 0]: "e" } ``` then crossed feature will look like: ```python shape = [2, 2] { [0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size [1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size [1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size } ``` Here is an example to create a linear model with crosses of string features: ```python keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K) columns = [keywords_x_doc_terms, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` You could also use vocabulary lookup before crossing: ```python keywords = categorical_column_with_vocabulary_file( 'keywords', '/path/to/vocabulary/file', vocabulary_size=1K) keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K) columns = [keywords_x_doc_terms, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` If an input feature is of numeric type, you can use `categorical_column_with_identity`, or `bucketized_column`, as in the example: ```python # vertical_id is an integer categorical feature. vertical_id = categorical_column_with_identity('vertical_id', 10K) price = numeric_column('price') # bucketized_column converts numerical feature to a categorical one. bucketized_price = bucketized_column(price, boundaries=[...]) vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K) columns = [vertical_id_x_price, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` To use crossed column in DNN model, you need to add it in an embedding column as in this example: ```python vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K) vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10) dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...]) ``` Args: keys: An iterable identifying the features to be crossed. Each element can be either: * string: Will use the corresponding feature which must be of string type. * `_CategoricalColumn`: Will use the transformed tensor produced by this column. Does not support hashed categorical column. hash_bucket_size: An int > 1. The number of buckets. hash_key: Specify the hash_key that will be used by the `FingerprintCat64` function to combine the crosses fingerprints on SparseCrossOp (optional). Returns: A `_CrossedColumn`. Raises: ValueError: If `len(keys) < 2`. ValueError: If any of the keys is neither a string nor `_CategoricalColumn`. ValueError: If any of the keys is `_HashedCategoricalColumn`. ValueError: If `hash_bucket_size < 1`. """ if not hash_bucket_size or hash_bucket_size < 1: raise ValueError('hash_bucket_size must be > 1. ' 'hash_bucket_size: {}'.format(hash_bucket_size)) if not keys or len(keys) < 2: raise ValueError( 'keys must be a list with length > 1. Given: {}'.format(keys)) for key in keys: if (not isinstance(key, six.string_types) and not isinstance(key, _CategoricalColumn)): raise ValueError( 'Unsupported key type. All keys must be either string, or ' 'categorical column except _HashedCategoricalColumn. ' 'Given: {}'.format(key)) if isinstance(key, _HashedCategoricalColumn): raise ValueError( 'categorical_column_with_hash_bucket is not supported for crossing. ' 'Hashing before crossing will increase probability of collision. ' 'Instead, use the feature name as a string. Given: {}'.format(key)) return _CrossedColumn( keys=tuple(keys), hash_bucket_size=hash_bucket_size, hash_key=hash_key) class _FeatureColumn(object): """Represents a feature column abstraction. WARNING: Do not subclass this layer unless you know what you are doing: the API is subject to future changes. To distinguish the concept of a feature family and a specific binary feature within a family, we refer to a feature family like "country" as a feature column. Following is an example feature in a `tf.Example` format: {key: "country", value: [ "US" ]} In this example the value of feature is "US" and "country" refers to the column of the feature. This class is an abstract class. User should not create instances of this. """ __metaclass__ = abc.ABCMeta @abc.abstractproperty def name(self): """Returns string. used for variable_scope and naming.""" pass @abc.abstractmethod def _transform_feature(self, inputs): """Returns intermediate representation (usually a `Tensor`). Uses `inputs` to create an intermediate representation (usually a `Tensor`) that other feature columns can use. Example usage of `inputs`: Let's say a Feature column depends on raw feature ('raw') and another `_FeatureColumn` (input_fc). To access corresponding `Tensor`s, inputs will be used as follows: ```python raw_tensor = inputs.get('raw') fc_tensor = inputs.get(input_fc) ``` Args: inputs: A `_LazyBuilder` object to access inputs. Returns: Transformed feature `Tensor`. """ pass @abc.abstractproperty def _parse_example_spec(self): """Returns a `tf.Example` parsing spec as dict. It is used for get_parsing_spec for `tf.parse_example`. Returned spec is a dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other supported objects. Please check documentation of ${tf.parse_example} for all supported spec objects. Let's say a Feature column depends on raw feature ('raw') and another `_FeatureColumn` (input_fc). One possible implementation of _parse_example_spec is as follows: ```python spec = {'raw': tf.FixedLenFeature(...)} spec.update(input_fc._parse_example_spec) return spec ``` """ pass class _DenseColumn(_FeatureColumn): """Represents a column which can be represented as `Tensor`. WARNING: Do not subclass this layer unless you know what you are doing: the API is subject to future changes. Some examples of this type are: numeric_column, embedding_column, indicator_column. """ __metaclass__ = abc.ABCMeta @abc.abstractproperty def _variable_shape(self): """`TensorShape` of `_get_dense_tensor`, without batch dimension.""" pass @abc.abstractmethod def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): """Returns a `Tensor`. The output of this function will be used by model-builder-functions. For example the pseudo code of `input_layer` will be like: ```python def input_layer(features, feature_columns, ...): outputs = [fc._get_dense_tensor(...) for fc in feature_columns] return tf.concat(outputs) ``` Args: inputs: A `_LazyBuilder` object to access inputs. weight_collections: List of graph collections to which Variables (if any will be created) are added. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see ${tf.Variable}). Returns: `Tensor` of shape [batch_size] + `_variable_shape`. """ pass def _create_dense_column_weighted_sum( column, builder, units, weight_collections, trainable): """Create a weighted sum of a dense column for linear_model.""" tensor = column._get_dense_tensor( # pylint: disable=protected-access builder, weight_collections=weight_collections, trainable=trainable) num_elements = column._variable_shape.num_elements() # pylint: disable=protected-access batch_size = array_ops.shape(tensor)[0] tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements)) weight = variable_scope.get_variable( name='weights', shape=[num_elements, units], initializer=init_ops.zeros_initializer(), trainable=trainable, collections=weight_collections) return math_ops.matmul(tensor, weight, name='weighted_sum') class _CategoricalColumn(_FeatureColumn): """Represents a categorical feature. WARNING: Do not subclass this layer unless you know what you are doing: the API is subject to future changes. A categorical feature typically handled with a ${tf.SparseTensor} of IDs. """ __metaclass__ = abc.ABCMeta IdWeightPair = collections.namedtuple( # pylint: disable=invalid-name 'IdWeightPair', ['id_tensor', 'weight_tensor']) @abc.abstractproperty def _num_buckets(self): """Returns number of buckets in this sparse feature.""" pass @abc.abstractmethod def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): """Returns an IdWeightPair. `IdWeightPair` is a pair of `SparseTensor`s which represents ids and weights. `IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets` `SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a `SparseTensor` of `float` or `None` to indicate all weights should be taken to be 1. If specified, `weight_tensor` must have exactly the same shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing output of a `VarLenFeature` which is a ragged matrix. Args: inputs: A `LazyBuilder` as a cache to get input tensors required to create `IdWeightPair`. weight_collections: List of graph collections to which variables (if any will be created) are added. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see ${tf.get_variable}). """ pass def _create_categorical_column_weighted_sum( column, builder, units, sparse_combiner, weight_collections, trainable): """Create a weighted sum of a categorical column for linear_model.""" sparse_tensors = column._get_sparse_tensors( # pylint: disable=protected-access builder, weight_collections=weight_collections, trainable=trainable) id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [ array_ops.shape(sparse_tensors.id_tensor)[0], -1 ]) weight_tensor = sparse_tensors.weight_tensor if weight_tensor is not None: weight_tensor = sparse_ops.sparse_reshape( weight_tensor, [array_ops.shape(weight_tensor)[0], -1]) weight = variable_scope.get_variable( name='weights', shape=(column._num_buckets, units), # pylint: disable=protected-access initializer=init_ops.zeros_initializer(), trainable=trainable, collections=weight_collections) return _safe_embedding_lookup_sparse( weight, id_tensor, sparse_weights=weight_tensor, combiner=sparse_combiner, name='weighted_sum') class _LazyBuilder(object): """Handles caching of transformations while building the model. `_FeatureColumn` specifies how to digest an input column to the network. Some feature columns require data transformations. This class caches those transformations. Some features may be used in more than one place. For example, one can use a bucketized feature by itself and a cross with it. In that case we should create only one bucketization op instead of creating ops for each feature column separately. To handle re-use of transformed columns, `_LazyBuilder` caches all previously transformed columns. Example: We're trying to use the following `_FeatureColumn`s: ```python bucketized_age = fc.bucketized_column(fc.numeric_column("age"), ...) keywords = fc.categorical_column_with_hash_buckets("keywords", ...) age_X_keywords = fc.crossed_column([bucketized_age, "keywords"]) ... = linear_model(features, [bucketized_age, keywords, age_X_keywords] ``` If we transform each column independently, then we'll get duplication of bucketization (one for cross, one for bucketization itself). The `_LazyBuilder` eliminates this duplication. """ def __init__(self, features): """Creates a `_LazyBuilder`. Args: features: A mapping from feature column to objects that are `Tensor` or `SparseTensor`, or can be converted to same via `sparse_tensor.convert_to_tensor_or_sparse_tensor`. A `string` key signifies a base feature (not-transformed). A `_FeatureColumn` key means that this `Tensor` is the output of an existing `_FeatureColumn` which can be reused. """ self._features = features.copy() self._feature_tensors = {} def get(self, key): """Returns a `Tensor` for the given key. A `str` key is used to access a base feature (not-transformed). When a `_FeatureColumn` is passed, the transformed feature is returned if it already exists, otherwise the given `_FeatureColumn` is asked to provide its transformed output, which is then cached. Args: key: a `str` or a `_FeatureColumn`. Returns: The transformed `Tensor` corresponding to the `key`. Raises: ValueError: if key is not found or a transformed `Tensor` cannot be computed. """ if key in self._feature_tensors: # FeatureColumn is already transformed or converted. return self._feature_tensors[key] if key in self._features: feature_tensor = self._get_raw_feature_as_tensor(key) self._feature_tensors[key] = feature_tensor return feature_tensor if not isinstance(key, (str, _FeatureColumn)): raise TypeError('"key" must be either a "str" or "_FeatureColumn". ' 'Provided: {}'.format(key)) if not isinstance(key, _FeatureColumn): raise ValueError('Feature {} is not in features dictionary.'.format(key)) column = key logging.debug('Transforming feature_column %s.', column) transformed = column._transform_feature(self) # pylint: disable=protected-access if transformed is None: raise ValueError('Column {} is not supported.'.format(column.name)) self._feature_tensors[column] = transformed return transformed def _get_raw_feature_as_tensor(self, key): """Gets the raw_feature (keyed by `key`) as `tensor`. The raw feature is converted to (sparse) tensor and maybe expand dim. For both `Tensor` and `SparseTensor`, the rank will be expanded (to 2) if the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will error out as it is not supported. Args: key: A `str` key to access the raw feature. Returns: A `Tensor` or `SparseTensor`. Raises: ValueError: if the raw feature has rank 0. """ raw_feature = self._features[key] feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor( raw_feature) def expand_dims(input_tensor): # Input_tensor must have rank 1. if isinstance(input_tensor, sparse_tensor_lib.SparseTensor): return sparse_ops.sparse_reshape( input_tensor, [array_ops.shape(input_tensor)[0], -1]) else: return array_ops.expand_dims(input_tensor, -1) rank = feature_tensor.get_shape().ndims if rank is not None: if rank == 0: raise ValueError( 'Feature (key: {}) cannot have rank 0. Give: {}'.format( key, feature_tensor)) return feature_tensor if rank != 1 else expand_dims(feature_tensor) # Handle dynamic rank. with ops.control_dependencies([ check_ops.assert_positive( array_ops.rank(feature_tensor), message='Feature (key: {}) cannot have rank 0. Given: {}'.format( key, feature_tensor))]): return control_flow_ops.cond( math_ops.equal(1, array_ops.rank(feature_tensor)), lambda: expand_dims(feature_tensor), lambda: feature_tensor) # TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py def _shape_offsets(shape): """Returns moving offset for each dimension given shape.""" offsets = [] for dim in reversed(shape): if offsets: offsets.append(dim * offsets[-1]) else: offsets.append(dim) offsets.reverse() return offsets # TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py def _to_sparse_input(input_tensor, ignore_value=None): """Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells. If `input_tensor` is already a `SparseTensor`, just return it. Args: input_tensor: A string or integer `Tensor`. ignore_value: Entries in `dense_tensor` equal to this value will be absent from the resulting `SparseTensor`. If `None`, default value of `dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`). Returns: A `SparseTensor` with the same shape as `input_tensor`. Raises: ValueError: when `input_tensor`'s rank is `None`. """ input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor( input_tensor) if isinstance(input_tensor, sparse_tensor_lib.SparseTensor): return input_tensor with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)): input_rank = input_tensor.get_shape().ndims if input_rank is None: # TODO(b/32318825): Implement dense_to_sparse_tensor for undefined rank. raise ValueError('Undefined input_tensor shape.') if ignore_value is None: ignore_value = '' if input_tensor.dtype == dtypes.string else -1 dense_shape = math_ops.cast(array_ops.shape(input_tensor), dtypes.int64) indices = array_ops.where(math_ops.not_equal( input_tensor, math_ops.cast(ignore_value, input_tensor.dtype))) # Flattens the tensor and indices for use with gather. flat_tensor = array_ops.reshape(input_tensor, [-1]) flat_indices = indices[:, input_rank - 1] # Computes the correct flattened indices for 2d (or higher) tensors. if input_rank > 1: higher_dims = indices[:, :input_rank - 1] shape_offsets = array_ops.stack( _shape_offsets(array_ops.unstack(dense_shape)[1:])) offsets = math_ops.reduce_sum( math_ops.multiply(higher_dims, shape_offsets), reduction_indices=[1]) flat_indices = math_ops.add(flat_indices, offsets) values = array_ops.gather(flat_tensor, flat_indices) return sparse_tensor_lib.SparseTensor(indices, values, dense_shape) def _check_feature_columns(feature_columns): """Verifies feature_columns input.""" if isinstance(feature_columns, dict): raise ValueError('Expected feature_columns to be iterable, found dict.') for column in feature_columns: if not isinstance(column, _FeatureColumn): raise ValueError('Items of feature_columns must be a _FeatureColumn. ' 'Given (type {}): {}.'.format(type(column), column)) if not feature_columns: raise ValueError('feature_columns must not be empty.') name_to_column = dict() for column in feature_columns: if column.name in name_to_column: raise ValueError('Duplicate feature column name found for columns: {} ' 'and {}. This usually means that these columns refer to ' 'same base feature. Either one must be discarded or a ' 'duplicated but renamed item must be inserted in ' 'features dict.'.format(column, name_to_column[column.name])) name_to_column[column.name] = column class _NumericColumn(_DenseColumn, collections.namedtuple('_NumericColumn', [ 'key', 'shape', 'default_value', 'dtype', 'normalizer_fn' ])): """see `numeric_column`.""" @property def name(self): return self.key @property def _parse_example_spec(self): return { self.key: parsing_ops.FixedLenFeature(self.shape, self.dtype, self.default_value) } def _transform_feature(self, inputs): input_tensor = inputs.get(self.key) if isinstance(input_tensor, sparse_tensor_lib.SparseTensor): raise ValueError( 'The corresponding Tensor of numerical column must be a Tensor. ' 'SparseTensor is not supported. key: {}'.format(self.key)) if self.normalizer_fn is not None: input_tensor = self.normalizer_fn(input_tensor) return math_ops.to_float(input_tensor) @property def _variable_shape(self): return tensor_shape.TensorShape(self.shape) def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): """Returns dense `Tensor` representing numeric feature. Args: inputs: A `_LazyBuilder` object to access inputs. weight_collections: Unused `weight_collections` since no variables are created in this function. trainable: Unused `trainable` bool since no variables are created in this function. Returns: Dense `Tensor` created within `_transform_feature`. """ # Do nothing with weight_collections and trainable since no variables are # created in this function. del weight_collections del trainable # Feature has been already transformed. Return the intermediate # representation created by _transform_feature. return inputs.get(self) class _BucketizedColumn(_DenseColumn, _CategoricalColumn, collections.namedtuple('_BucketizedColumn', [ 'source_column', 'boundaries'])): """See `bucketized_column`.""" @property def name(self): return '{}_bucketized'.format(self.source_column.name) @property def _parse_example_spec(self): return self.source_column._parse_example_spec # pylint: disable=protected-access def _transform_feature(self, inputs): source_tensor = inputs.get(self.source_column) return math_ops._bucketize( # pylint: disable=protected-access source_tensor, boundaries=self.boundaries) @property def _variable_shape(self): return tensor_shape.TensorShape( tuple(self.source_column.shape) + (len(self.boundaries) + 1,)) def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable input_tensor = inputs.get(self) return array_ops.one_hot( indices=math_ops.to_int64(input_tensor), depth=len(self.boundaries) + 1, on_value=1., off_value=0.) @property def _num_buckets(self): # By construction, source_column is always one-dimensional. return (len(self.boundaries) + 1) * self.source_column.shape[0] def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): input_tensor = inputs.get(self) batch_size = array_ops.shape(input_tensor)[0] # By construction, source_column is always one-dimensional. source_dimension = self.source_column.shape[0] i1 = array_ops.reshape( array_ops.tile( array_ops.expand_dims(math_ops.range(0, batch_size), 1), [1, source_dimension]), (-1,)) i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size]) # Flatten the bucket indices and unique them across dimensions # E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets bucket_indices = ( array_ops.reshape(input_tensor, (-1,)) + (len(self.boundaries) + 1) * i2) indices = math_ops.to_int64(array_ops.transpose(array_ops.stack((i1, i2)))) dense_shape = math_ops.to_int64(array_ops.stack( [batch_size, source_dimension])) sparse_tensor = sparse_tensor_lib.SparseTensor( indices=indices, values=bucket_indices, dense_shape=dense_shape) return _CategoricalColumn.IdWeightPair(sparse_tensor, None) class _EmbeddingColumn( _DenseColumn, collections.namedtuple('_EmbeddingColumn', ( 'categorical_column', 'dimension', 'combiner', 'initializer', 'ckpt_to_load_from', 'tensor_name_in_ckpt', 'max_norm', 'trainable' ))): """See `_embedding_column`.""" @property def name(self): if not hasattr(self, '_name'): self._name = '{}_embedding'.format(self.categorical_column.name) return self._name @property def _parse_example_spec(self): return self.categorical_column._parse_example_spec # pylint: disable=protected-access def _transform_feature(self, inputs): return inputs.get(self.categorical_column) @property def _variable_shape(self): if not hasattr(self, '_shape'): self._shape = tensor_shape.vector(self.dimension) return self._shape def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): # Get sparse IDs and weights. sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access inputs, weight_collections=weight_collections, trainable=trainable) sparse_ids = sparse_tensors.id_tensor sparse_weights = sparse_tensors.weight_tensor # Create embedding weight, and restore from checkpoint if necessary. embedding_weights = variable_scope.get_variable( name='embedding_weights', shape=(self.categorical_column._num_buckets, self.dimension), # pylint: disable=protected-access dtype=dtypes.float32, initializer=self.initializer, trainable=self.trainable and trainable, collections=weight_collections) if self.ckpt_to_load_from is not None: to_restore = embedding_weights if isinstance(to_restore, variables.PartitionedVariable): to_restore = to_restore._get_variable_list() # pylint: disable=protected-access checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, { self.tensor_name_in_ckpt: to_restore }) # Return embedding lookup result. return _safe_embedding_lookup_sparse( embedding_weights=embedding_weights, sparse_ids=sparse_ids, sparse_weights=sparse_weights, combiner=self.combiner, name='%s_weights' % self.name, max_norm=self.max_norm) def _create_tuple(shape, value): """Returns a tuple with given shape and filled with value.""" if shape: return tuple([_create_tuple(shape[1:], value) for _ in range(shape[0])]) return value def _as_tuple(value): if not nest.is_sequence(value): return value return tuple([_as_tuple(v) for v in value]) def _check_shape(shape, key): """Returns shape if it's valid, raises error otherwise.""" assert shape is not None if not nest.is_sequence(shape): shape = [shape] shape = tuple(shape) for dimension in shape: if not isinstance(dimension, int): raise TypeError('shape dimensions must be integer. ' 'shape: {}, key: {}'.format(shape, key)) if dimension < 1: raise ValueError('shape dimensions must be greater than 0. ' 'shape: {}, key: {}'.format(shape, key)) return shape def _is_shape_and_default_value_compatible(default_value, shape): """Verifies compatibility of shape and default_value.""" # Invalid condition: # * if default_value is not a scalar and shape is empty # * or if default_value is an iterable and shape is not empty if nest.is_sequence(default_value) != bool(shape): return False if not shape: return True if len(default_value) != shape[0]: return False for i in range(shape[0]): if not _is_shape_and_default_value_compatible(default_value[i], shape[1:]): return False return True def _check_default_value(shape, default_value, dtype, key): """Returns default value as tuple if it's valid, otherwise raises errors. This function verifies that `default_value` is compatible with both `shape` and `dtype`. If it is not compatible, it raises an error. If it is compatible, it casts default_value to a tuple and returns it. `key` is used only for error message. Args: shape: An iterable of integers specifies the shape of the `Tensor`. default_value: If a single value is provided, the same value will be applied as the default value for every item. If an iterable of values is provided, the shape of the `default_value` should be equal to the given `shape`. dtype: defines the type of values. Default value is `tf.float32`. Must be a non-quantized, real integer or floating point type. key: Column name, used only for error messages. Returns: A tuple which will be used as default value. Raises: TypeError: if `default_value` is an iterable but not compatible with `shape` TypeError: if `default_value` is not compatible with `dtype`. ValueError: if `dtype` is not convertible to `tf.float32`. """ if default_value is None: return None if isinstance(default_value, int): return _create_tuple(shape, default_value) if isinstance(default_value, float) and dtype.is_floating: return _create_tuple(shape, default_value) if callable(getattr(default_value, 'tolist', None)): # Handles numpy arrays default_value = default_value.tolist() if nest.is_sequence(default_value): if not _is_shape_and_default_value_compatible(default_value, shape): raise ValueError( 'The shape of default_value must be equal to given shape. ' 'default_value: {}, shape: {}, key: {}'.format( default_value, shape, key)) # Check if the values in the list are all integers or are convertible to # floats. is_list_all_int = all( isinstance(v, int) for v in nest.flatten(default_value)) is_list_has_float = any( isinstance(v, float) for v in nest.flatten(default_value)) if is_list_all_int: return _as_tuple(default_value) if is_list_has_float and dtype.is_floating: return _as_tuple(default_value) raise TypeError('default_value must be compatible with dtype. ' 'default_value: {}, dtype: {}, key: {}'.format( default_value, dtype, key)) class _HashedCategoricalColumn( _CategoricalColumn, collections.namedtuple('_HashedCategoricalColumn', ['key', 'hash_bucket_size', 'dtype'])): """see `categorical_column_with_hash_bucket`.""" @property def name(self): return self.key @property def _parse_example_spec(self): return {self.key: parsing_ops.VarLenFeature(self.dtype)} def _transform_feature(self, inputs): input_tensor = _to_sparse_input(inputs.get(self.key)) if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor): raise ValueError('SparseColumn input must be a SparseTensor.') _assert_string_or_int( input_tensor.dtype, prefix='column_name: {} input_tensor'.format(self.key)) if self.dtype.is_integer != input_tensor.dtype.is_integer: raise ValueError( 'Column dtype and SparseTensors dtype must be compatible. ' 'key: {}, column dtype: {}, tensor dtype: {}'.format( self.key, self.dtype, input_tensor.dtype)) if self.dtype == dtypes.string: sparse_values = input_tensor.values else: sparse_values = string_ops.as_string(input_tensor.values) sparse_id_values = string_ops.string_to_hash_bucket_fast( sparse_values, self.hash_bucket_size, name='lookup') return sparse_tensor_lib.SparseTensor( input_tensor.indices, sparse_id_values, input_tensor.dense_shape) @property def _num_buckets(self): """Returns number of buckets in this sparse feature.""" return self.hash_bucket_size def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): return _CategoricalColumn.IdWeightPair(inputs.get(self), None) class _VocabularyFileCategoricalColumn( _CategoricalColumn, collections.namedtuple('_VocabularyFileCategoricalColumn', ( 'key', 'vocabulary_file', 'vocabulary_size', 'num_oov_buckets', 'dtype', 'default_value' ))): """See `categorical_column_with_vocabulary_file`.""" @property def name(self): return self.key @property def _parse_example_spec(self): return {self.key: parsing_ops.VarLenFeature(self.dtype)} def _transform_feature(self, inputs): input_tensor = _to_sparse_input(inputs.get(self.key)) if self.dtype.is_integer != input_tensor.dtype.is_integer: raise ValueError( 'Column dtype and SparseTensors dtype must be compatible. ' 'key: {}, column dtype: {}, tensor dtype: {}'.format( self.key, self.dtype, input_tensor.dtype)) _assert_string_or_int( input_tensor.dtype, prefix='column_name: {} input_tensor'.format(self.key)) key_dtype = self.dtype if input_tensor.dtype.is_integer: # `index_table_from_file` requires 64-bit integer keys. key_dtype = dtypes.int64 input_tensor = math_ops.to_int64(input_tensor) return lookup_ops.index_table_from_file( vocabulary_file=self.vocabulary_file, num_oov_buckets=self.num_oov_buckets, vocab_size=self.vocabulary_size, default_value=self.default_value, key_dtype=key_dtype, name='{}_lookup'.format(self.key)).lookup(input_tensor) @property def _num_buckets(self): """Returns number of buckets in this sparse feature.""" return self.vocabulary_size + self.num_oov_buckets def _get_sparse_tensors( self, inputs, weight_collections=None, trainable=None): return _CategoricalColumn.IdWeightPair(inputs.get(self), None) class _VocabularyListCategoricalColumn( _CategoricalColumn, collections.namedtuple('_VocabularyListCategoricalColumn', ( 'key', 'vocabulary_list', 'dtype', 'default_value', 'num_oov_buckets' ))): """See `categorical_column_with_vocabulary_list`.""" @property def name(self): return self.key @property def _parse_example_spec(self): return {self.key: parsing_ops.VarLenFeature(self.dtype)} def _transform_feature(self, inputs): input_tensor = _to_sparse_input(inputs.get(self.key)) if self.dtype.is_integer != input_tensor.dtype.is_integer: raise ValueError( 'Column dtype and SparseTensors dtype must be compatible. ' 'key: {}, column dtype: {}, tensor dtype: {}'.format( self.key, self.dtype, input_tensor.dtype)) _assert_string_or_int( input_tensor.dtype, prefix='column_name: {} input_tensor'.format(self.key)) key_dtype = self.dtype if input_tensor.dtype.is_integer: # `index_table_from_tensor` requires 64-bit integer keys. key_dtype = dtypes.int64 input_tensor = math_ops.to_int64(input_tensor) return lookup_ops.index_table_from_tensor( vocabulary_list=tuple(self.vocabulary_list), default_value=self.default_value, num_oov_buckets=self.num_oov_buckets, dtype=key_dtype, name='{}_lookup'.format(self.key)).lookup(input_tensor) @property def _num_buckets(self): """Returns number of buckets in this sparse feature.""" return len(self.vocabulary_list) + self.num_oov_buckets def _get_sparse_tensors( self, inputs, weight_collections=None, trainable=None): return _CategoricalColumn.IdWeightPair(inputs.get(self), None) class _IdentityCategoricalColumn( _CategoricalColumn, collections.namedtuple('_IdentityCategoricalColumn', ( 'key', 'num_buckets', 'default_value' ))): """See `categorical_column_with_identity`.""" @property def name(self): return self.key @property def _parse_example_spec(self): return {self.key: parsing_ops.VarLenFeature(dtypes.int64)} def _transform_feature(self, inputs): input_tensor = _to_sparse_input(inputs.get(self.key)) if not input_tensor.dtype.is_integer: raise ValueError( 'Invalid input, not integer. key: {} dtype: {}'.format( self.key, input_tensor.dtype)) values = math_ops.to_int64(input_tensor.values, name='values') num_buckets = math_ops.to_int64(self.num_buckets, name='num_buckets') zero = math_ops.to_int64(0, name='zero') if self.default_value is None: # Fail if values are out-of-range. assert_less = check_ops.assert_less( values, num_buckets, data=(values, num_buckets), name='assert_less_than_num_buckets') assert_greater = check_ops.assert_greater_equal( values, zero, data=(values,), name='assert_greater_or_equal_0') with ops.control_dependencies((assert_less, assert_greater)): values = array_ops.identity(values) else: # Assign default for out-of-range values. values = array_ops.where( math_ops.logical_or( values < zero, values >= num_buckets, name='out_of_range'), array_ops.fill( dims=array_ops.shape(values), value=math_ops.to_int64(self.default_value), name='default_values'), values) return sparse_tensor_lib.SparseTensor( indices=input_tensor.indices, values=values, dense_shape=input_tensor.dense_shape) @property def _num_buckets(self): """Returns number of buckets in this sparse feature.""" return self.num_buckets def _get_sparse_tensors( self, inputs, weight_collections=None, trainable=None): return _CategoricalColumn.IdWeightPair(inputs.get(self), None) class _WeightedCategoricalColumn( _CategoricalColumn, collections.namedtuple('_WeightedCategoricalColumn', ( 'categorical_column', 'weight_feature_key', 'dtype' ))): """See `weighted_categorical_column`.""" @property def name(self): return '{}_weighted_by_{}'.format( self.categorical_column.name, self.weight_feature_key) @property def _parse_example_spec(self): config = self.categorical_column._parse_example_spec # pylint: disable=protected-access if self.weight_feature_key in config: raise ValueError('Parse config {} already exists for {}.'.format( config[self.weight_feature_key], self.weight_feature_key)) config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype) return config @property def _num_buckets(self): return self.categorical_column._num_buckets # pylint: disable=protected-access def _transform_feature(self, inputs): weight_tensor = inputs.get(self.weight_feature_key) if weight_tensor is None: raise ValueError('Missing weights {}.'.format(self.weight_feature_key)) weight_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor( weight_tensor) if self.dtype != weight_tensor.dtype.base_dtype: raise ValueError('Bad dtype, expected {}, but got {}.'.format( self.dtype, weight_tensor.dtype)) if not isinstance(weight_tensor, sparse_tensor_lib.SparseTensor): # The weight tensor can be a regular Tensor. In this case, sparsify it. weight_tensor = _to_sparse_input(weight_tensor, ignore_value=0.0) if not weight_tensor.dtype.is_floating: weight_tensor = math_ops.to_float(weight_tensor) return (inputs.get(self.categorical_column), weight_tensor) def _get_sparse_tensors( self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable tensors = inputs.get(self) return _CategoricalColumn.IdWeightPair(tensors[0], tensors[1]) class _CrossedColumn( _CategoricalColumn, collections.namedtuple('_CrossedColumn', ['keys', 'hash_bucket_size', 'hash_key'])): """See `crossed_column`.""" @property def name(self): feature_names = [] for key in _collect_leaf_level_keys(self): if isinstance(key, _FeatureColumn): feature_names.append(key.name) else: # key must be a string feature_names.append(key) return '_X_'.join(sorted(feature_names)) @property def _parse_example_spec(self): config = {} for key in self.keys: if isinstance(key, _FeatureColumn): config.update(key._parse_example_spec) # pylint: disable=protected-access else: # key must be a string config.update({key: parsing_ops.VarLenFeature(dtypes.string)}) return config def _transform_feature(self, inputs): feature_tensors = [] for key in _collect_leaf_level_keys(self): if isinstance(key, six.string_types): feature_tensors.append(inputs.get(key)) elif isinstance(key, _CategoricalColumn): ids_and_weights = key._get_sparse_tensors(inputs) # pylint: disable=protected-access if ids_and_weights.weight_tensor is not None: raise ValueError( 'crossed_column does not support weight_tensor, but the given ' 'column populates weight_tensor. ' 'Given column: {}'.format(key.name)) feature_tensors.append(ids_and_weights.id_tensor) else: raise ValueError('Unsupported column type. Given: {}'.format(key)) return sparse_ops._sparse_cross_hashed( # pylint: disable=protected-access inputs=feature_tensors, num_buckets=self.hash_bucket_size, hash_key=self.hash_key) @property def _num_buckets(self): """Returns number of buckets in this sparse feature.""" return self.hash_bucket_size def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): return _CategoricalColumn.IdWeightPair(inputs.get(self), None) def _collect_leaf_level_keys(cross): """Collects base keys by expanding all nested crosses. Args: cross: A `_CrossedColumn`. Returns: A list of strings or `_CategoricalColumn` instances. """ leaf_level_keys = [] for k in cross.keys: if isinstance(k, _CrossedColumn): leaf_level_keys.extend(_collect_leaf_level_keys(k)) else: leaf_level_keys.append(k) return leaf_level_keys # TODO(zakaria): Move this to embedding_ops and make it public. def _safe_embedding_lookup_sparse(embedding_weights, sparse_ids, sparse_weights=None, combiner='mean', default_id=None, name=None, partition_strategy='div', max_norm=None): """Lookup embedding results, accounting for invalid IDs and empty features. The partitioned embedding in `embedding_weights` must all be the same shape except for the first dimension. The first dimension is allowed to vary as the vocabulary size is not necessarily a multiple of `P`. `embedding_weights` may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a partitioner. Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs with non-positive weight. For an entry with no features, the embedding vector for `default_id` is returned, or the 0-vector if `default_id` is not supplied. The ids and weights may be multi-dimensional. Embeddings are always aggregated along the last dimension. Args: embedding_weights: A list of `P` float `Tensor`s or values representing partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable` created by partitioning along dimension 0. The total unpartitioned shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the vocab size and `e_1, ..., e_m` are the embedding dimensions. sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the ids. `d_0` is typically batch size. sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing float weights corresponding to `sparse_ids`, or `None` if all weights are be assumed to be 1.0. combiner: A string specifying how to combine embedding results for each entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the default. default_id: The id to use for an entry with no features. name: A name for this operation (optional). partition_strategy: A string specifying the partitioning strategy. Currently `"div"` and `"mod"` are supported. Default is `"div"`. max_norm: If not `None`, all embeddings are l2-normalized to max_norm before combining. Returns: Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`. Raises: ValueError: if `embedding_weights` is empty. """ if embedding_weights is None: raise ValueError('Missing embedding_weights %s.' % embedding_weights) if isinstance(embedding_weights, variables.PartitionedVariable): embedding_weights = list(embedding_weights) # get underlying Variables. if not isinstance(embedding_weights, list): embedding_weights = [embedding_weights] if len(embedding_weights) < 1: raise ValueError('Missing embedding_weights %s.' % embedding_weights) dtype = sparse_weights.dtype if sparse_weights is not None else None embedding_weights = [ ops.convert_to_tensor(w, dtype=dtype) for w in embedding_weights ] with ops.name_scope(name, 'embedding_lookup', embedding_weights + [sparse_ids, sparse_weights]) as scope: # Reshape higher-rank sparse ids and weights to linear segment ids. original_shape = sparse_ids.dense_shape original_rank_dim = sparse_ids.dense_shape.get_shape()[0] original_rank = ( array_ops.size(original_shape) if original_rank_dim.value is None else original_rank_dim.value) sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [ math_ops.reduce_prod( array_ops.slice(original_shape, [0], [original_rank - 1])), array_ops.gather(original_shape, original_rank - 1)]) if sparse_weights is not None: sparse_weights = sparse_tensor_lib.SparseTensor( sparse_ids.indices, sparse_weights.values, sparse_ids.dense_shape) # Prune invalid ids and weights. sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights) # Fill in dummy values for empty features, if necessary. sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(sparse_ids, default_id or 0) if sparse_weights is not None: sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0) result = embedding_ops.embedding_lookup_sparse( embedding_weights, sparse_ids, sparse_weights, combiner=combiner, partition_strategy=partition_strategy, name=None if default_id is None else scope, max_norm=max_norm) if default_id is None: # Broadcast is_row_empty to the same shape as embedding_lookup_result, # for use in Select. is_row_empty = array_ops.tile( array_ops.reshape(is_row_empty, [-1, 1]), array_ops.stack([1, array_ops.shape(result)[1]])) result = array_ops.where(is_row_empty, array_ops.zeros_like(result), result, name=scope) # Reshape back from linear ids back into higher-dimensional dense result. final_result = array_ops.reshape( result, array_ops.concat([ array_ops.slice( math_ops.cast(original_shape, dtypes.int32), [0], [original_rank - 1]), array_ops.slice(array_ops.shape(result), [1], [-1]) ], 0)) final_result.set_shape(tensor_shape.unknown_shape( (original_rank_dim - 1).value).concatenate(result.get_shape()[1:])) return final_result def _prune_invalid_ids(sparse_ids, sparse_weights): """Prune invalid IDs (< 0) from the input ids and weights.""" is_id_valid = math_ops.greater_equal(sparse_ids.values, 0) if sparse_weights is not None: is_id_valid = math_ops.logical_and( is_id_valid, math_ops.greater(sparse_weights.values, 0)) sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid) if sparse_weights is not None: sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid) return sparse_ids, sparse_weights class _IndicatorColumn(_DenseColumn, collections.namedtuple('_IndicatorColumn', ['categorical_column'])): """Represents a one-hot column for use in deep networks. Args: categorical_column: A `_CategoricalColumn` which is created by `categorical_column_with_*` function. """ @property def name(self): return '{}_indicator'.format(self.categorical_column.name) def _transform_feature(self, inputs): """Returns dense `Tensor` representing feature. Args: inputs: A `_LazyBuilder` object to access inputs. Returns: Transformed feature `Tensor`. Raises: ValueError: if input rank is not known at graph building time. """ id_weight_pair = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access id_tensor = id_weight_pair.id_tensor weight_tensor = id_weight_pair.weight_tensor # If the underlying column is weighted, return the input as a dense tensor. if weight_tensor is not None: weighted_column = sparse_ops.sparse_merge( sp_ids=id_tensor, sp_values=weight_tensor, vocab_size=self._variable_shape[-1]) return sparse_ops.sparse_tensor_to_dense(weighted_column) dense_id_tensor = sparse_ops.sparse_tensor_to_dense( id_tensor, default_value=-1) # One hot must be float for tf.concat reasons since all other inputs to # input_layer are float32. one_hot_id_tensor = array_ops.one_hot( dense_id_tensor, depth=self._variable_shape[-1], on_value=1.0, off_value=0.0) # Reduce to get a multi-hot per example. return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2]) @property def _parse_example_spec(self): return self.categorical_column._parse_example_spec # pylint: disable=protected-access @property def _variable_shape(self): """Returns a `TensorShape` representing the shape of the dense `Tensor`.""" return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): """Returns dense `Tensor` representing feature. Args: inputs: A `_LazyBuilder` object to access inputs. weight_collections: Unused `weight_collections` since no variables are created in this function. trainable: Unused `trainable` bool since no variables are created in this function. Returns: Dense `Tensor` created within `_transform_feature`. """ # Do nothing with weight_collections and trainable since no variables are # created in this function. del weight_collections del trainable # Feature has been already transformed. Return the intermediate # representation created by _transform_feature. return inputs.get(self) def _verify_static_batch_size_equality(tensors, columns): # bath_size is a tf.Dimension object. expected_batch_size = None for i in range(0, len(tensors)): if tensors[i].shape[0].value is not None: if expected_batch_size is None: bath_size_column_index = i expected_batch_size = tensors[i].shape[0] elif not expected_batch_size.is_compatible_with(tensors[i].shape[0]): raise ValueError( 'Batch size (first dimension) of each feature must be same. ' 'Batch size of columns ({}, {}): ({}, {})'.format( columns[bath_size_column_index].name, columns[i].name, expected_batch_size, tensors[i].shape[0]))
apache-2.0
rripio/FIRtro
bin/soundcards.py
2
14694
#!/usr/bin/python # -*- coding: utf-8 -*- u""" Módulo interno para gestión de las tarjetas declaradas en ~/audio/config - Gestión del mixer de las tarjetas. - Integración en Jack mediante resampling (zita). - Gestión del reloj en tarjetas profesionales. NOTA: Las tarjetas profesionales con referencia de reloj configurable (int/ext) deben ser declaradas en /home/firtro/audio/cards.ini (ver documentación) """ # v1.1: # - Se evalúa si la tarjeta es USB (aplay -l) para poner n=3 en zita # - Se pone latencia inicial en zita-a2j para intentar evitar desincronismos # v1.2: # - Se integra aquí la funcionalidad de selección de la referencia de reloj # interna/externa para tarjetas profesionales compatibles. # v1.3: # - Se integran aquí las funciones para restaurar el mixer ALSA de las [cards] # de audio/config, también para hacer mute/unmute en el mixer de la principal. # v1.3a # - Se revisa la rutina alsa_mute_system_card() # - Se permite un argumento (no documentado) que fuerza un flapeo SPDIF # para intentar resincronizar el DAC externo conectado :-/ # v1.3b # - se depura iec958_set para tarjetas que no tengan este control # v1.3c # - se depura analog_scontrols y iec_scontrols # v1.3d # - se revisan los parámetros de arranque de zita # - se arranca zita con Popen # - zita con log rotado en /var/log/<username> from time import sleep import jack import os from sys import argv as sys_argv import subprocess as sp from getconfig import * from ConfigParser import ConfigParser cardsINI = ConfigParser() cardsINI.read("/home/firtro/audio/cards.ini") ############################################################################## # CONFIGURABLE PARA DEVELOPPERS: # # DICCIONARIO DE FUNCIONES PARA TARJETAS CON REFERENCIA DE RELOJ INT/EXT # # (ver más abajo) # ############################################################################## # Puertos JACK de monitores de la señal definidos en audio/config monitor_ports = jack_external_monitors.split() + jack_internal_monitors.split() # rutina para lanzar comandos amixer def amixer(card, cmd): cmd = "amixer -q -c" + card + " " + cmd if sp.call(cmd, shell=True): print "(soundcards) <!> Error: " + cmd return False else: print "(soundcards) " + cmd return True # Función especializada en la tarjeta M-Audio 1010LT def change_clock_1010LT(clock,fs): """ Orden de configuracion: 'Multi Track Internal Clock Default' --> 'valor_Fs' 'Multi Track Internal Clock' --> 'valor_Fs' o 'IEC958 Input' """ cmd = "sset 'Multi Track Internal Clock Default' '" + format(fs) + "'" chk1 = amixer("M1010LT", cmd) if clock == "card": cmd = "sset 'Multi Track Internal Clock' '" + format(fs) + "'" chk2 = amixer("M1010LT", cmd) elif clock == "spdif": cmd = "sset 'Multi Track Internal Clock' 'IEC958 Input'" chk2 = amixer("M1010LT", cmd) else: chk2 = False return chk1 and chk2 # Función especializada en NO HACER NADA (es para probar el diccionario) def change_clock_DUMMY(clock,fs): print "(soundcards) change clock *** DUMMY *** clock:" + clock + " fs:" + format(fs) return True # DICCIONARIO DE FUNCIONES PARA TARJETAS CON REFERENCIA DE RELOJ INT/EXT # De uso interno para apuntar a la función correspondiente # para las tarjetas [proCards] en ~/audio/cards.ini. # Las funciones especializadas para cada tarjeta se deben definir antes que este diccionario. fDict = { "M1010LT" : change_clock_1010LT, "DX" : change_clock_DUMMY } # Función genérica para cambiar la referencia de reloj de tarjetas profesionales compatibles def change_clock(clock, fs, card=system_card): """ con opción de cambiar la tarjeta afectada para debug """ if clock in ("spdif", "card"): if "proCards" in cardsINI.sections(): if bareCard(card) in cardsINI.get("proCards", "cards").split(): fDict[bareCard(card)](clock, fs) else: print "(soundcards) <!> Clock option not available on " + card return False else: print "(soundcards) <!> ERROR valid clock values are: spdif | card" return False # Funcion para resincronizar posibles tarjetas externas usadas # como entrada de señal, típicamente SPDIF que requerirá resincronización. def external_card_resync(in_ports, rate): card_es_input_y_monitor = False # recorremos las tarjetas externas declaradas en el FIRtro for card in external_cards.split(): # Evaluamos si la tarjeta proporciona los puertos jack para la input requerida if [x for x in in_ports if bareCard(card) in x]: # Evaluamos si la tarjeta tb se emplea como monitor, en ese caso lo # suyo es rearrancar zita-j2a con la nueva rate en esa tarjeta if [x for x in monitor_ports if bareCard(card) in x]: card_es_input_y_monitor = True apaga_resampler(card) if "alsa" in resampler: mode = "alsa_in" elif "zita" in resampler: mode = "zita-a2j" if card_es_input_y_monitor: arranca_resampler(card, rate, mode) arranca_resampler(card, rate, mode) def apaga_resampler(card): sp.call("pkill -f " + card , shell=True) intentos = 8 while True: resampler_process = sp.check_output("pgrep -l -f " + bareCard(card), shell=True) if (not "zita" in resampler_process) and (not "alsa_" in resampler_process): print "(server_input) Se ha apagado el resampler en " + card break if not intentos: print "(server_input) <!> no se ha apagado el resamplern en " + card break intentos -= 1 sleep(.25) # devuelve el nombre alsa de la tarjeta sin "hw:_____,X" def bareCard(card): """ función auxiliar que devuelve el nombre de la tarjeta sin "hw:" y sin el device ",X" de utilidad para presentar la tarjeta en jack o para buscarla dentro de los posibles monitores externos de audio/config """ bareCard = card.split(":")[-1] # quitamos "hw:" si lo hubiera return bareCard.split(",")[0] # y el device ",X" si lo hubiera... def cardIsUSB(card): tmp = sp.check_output("aplay -l | grep " + bareCard(card), shell=True) if "usb" in tmp.lower(): return True else: return False def cardParams(card, mode): """ diccionario con parámetros de tarjeta en función del tipo de tarjeta y del modo de trabajo (zita necezita ajustes...) """ # valores por defecto: p = {"latencia":"", "2ch16bit":False, "p":"512", "n":"2"} if mode == "zita-a2j" or mode == "alsa_in": pass elif mode == "zita-j2a" or mode == "alsa_out": pass if cardIsUSB(card): p["p"] = "1024" p["n"] = "3" if "miniStreamer" in card: p["2ch16bit"] = True p["p"] = "1024" if "zita" in mode: p["2ch16bit"] = True p["p"] = "64" return p def zitaJack(card, rate, mode, p="", n="", log=False): """ OjO algo pasa con zita que deja el socket 9999 pillado si lo intentamos lanzar desde python He optado por preparar un script de shell e invocarlo, entonces no hay problema (¿¡?) mode: zita-j2a | zita-a2j log: invocar True para debug """ params = cardParams(card, mode) #tmp = "# OjO algo pasa con zita que deja el socket 9999 pillado si lo intentamos lanzar desde python\n" #tmp += "# He optado por preparar un script de shell e invocarlo, entonces no hay problema (¿¡?)\n" tmp = mode + " -j" + bareCard(card) + " -d" + card if params["latencia"]: tmp += " -I" + params["latencia"] if params["2ch16bit"]: tmp += " -L" if resamplingQ: tmp += " -Q" + resamplingQ tmp += " -p" + params["p"] tmp += " -n" + params["n"] tmp += " -r" + rate # log para estudiar por qué se desincroniza zita :-/ if not log: #tmp += " > /dev/null 2>&1 &" pass else: tmp += " -v >> /var/log/" + os.getenv("LOGNAME") + "/" + mode + "_" + bareCard(card) + " &" #os.system("touch /tmp/zitaTmp.sh") #os.system("chmod +x /tmp/zitaTmp.sh") #os.system("echo '" + tmp + "' > /tmp/zitaTmp.sh") #if sp.call ("/tmp/zitaTmp.sh", shell=True): try: sp.Popen(tmp, shell=True) chk = True except: chk = False #os.remove("/tmp/zitaTmp.sh") # lo borramos para que otro usuario pueda sobreescribirlo later return chk def alsaInOut(card, rate, mode): params = cardParams(card, mode) tmp = mode + " -j" + bareCard(card) + " -d" + card if resamplingQ: tmp += " -q" + resamplingQ tmp += " -p" + params["p"] tmp += " -n" + params["n"] tmp += " -r" + rate try: sp.Popen(tmp, shell=True) return True except: return False def arranca_resampler(card, rate, mode): """ mode puede ser: zita-a2j, zita-j2a, alsa_in, alsa_out """ resamplerIsRunning = False if "zita" in mode: # este es el verdadero arranque de zita: resamplerIsRunning = zitaJack(card, rate, mode, log=False) # ver la funcion zitaJack elif "alsa" in mode: resamplerIsRunning = alsaInOut(card, rate, mode) if resamplerIsRunning: # esperamos a que los puertos zita aparezcan en jack jack.attach('tmp') intentos = 8; encontrado = False while intentos: for port in jack.get_ports(): if bareCard(card) in port: encontrado = True if encontrado: print "(soundcards) Se ha reiniciado " + mode + " " + card + " " + rate break intentos -= 1 print "(soundcards) Esperando a " + mode + "." * (8-intentos+1) sleep(.25) if not intentos: print "(soundcards) (!) No está disponible el puerto " + bareCard(card) + " en jack" jack.detach() else: print "(soundcards) <!> No se ha podido reiniciar " + mode + " en " + card + " " + rate def alsa_restore_cards(): """ Restaura los amixer de todas las tarjetas de ~/audio/config leyendo los archivos asound.xxx que el usuario ha debido guardar en ~/audio """ for card in [system_card] + external_cards.split(): card = bareCard(card) tmp = "alsactl --file /home/firtro/audio/asound." + card + " restore " + card print "(soundcards) restaurando alsactl en ", card tmp = sp.call(tmp, shell=True) def alsa_mute_system_card(muteOnOff): """ Gestiona el MUTE amixer la tarjeta principal del FIRtro. """ card = bareCard(system_card) if muteOnOff == "on": mode = "off" # va con lógica inversa else: mode = "on" # Las salidas analógicas: # amixer(card, cmd="sset Master playback '" + mode + "'") analog_set(card, mode) # y las SPDIF (si las tuviera conmutables): iec958_set(card, mode) def alsa_dB2percent(dB): """ truño ya que no funciona el comando amixer sset ... -3dB solo funciona bien dando un % que baje 10 puntos cada -3 dBs (!) """ tmp = float(dB.replace("dB", "")) tmp = int(100.0 + (tmp * 10.0/3.0)) if tmp < 0: tmp = 0 return str(tmp) + "%" def analog_set(card=bareCard(system_card), mode="off"): """ Función auxiliar para establecer los controles principales analógicos (Master o DAC) """ if mode == "off": for scontrol in analog_scontrols_main(card): cmd = "-M sset " + scontrol + " 0%" amixer(card, cmd) if mode == "on": for scontrol in analog_scontrols_main(card): gain = "0.00dB" # (i) LEEMOS ~/audio/cards.ini si tuviera una sección para la tarjeta if card in cardsINI.sections(): gain = cardsINI.get(card, scontrol) cmd = "-M sset " + scontrol + " " + alsa_dB2percent(gain) amixer(card, cmd) def analog_scontrols_main(card=bareCard(system_card)): """ v1.0 solo buscamos los DAC o los Master """ try: tmp = sp.check_output("amixer -c" + bareCard(card) + " scontrols | grep -i dac", shell=True).split("\n")[:-1] DACs = [x.split("control ")[-1] for x in tmp if not "filter" in x.lower()] except: DACs = [] try: tmp = sp.check_output("amixer -c" + bareCard(card) + " scontrols | grep -i master", shell=True).split("\n")[:-1] Masters = [x.split("control ")[-1] for x in tmp] except: Masters = [] return DACs + Masters def iec958_set(card=bareCard(system_card), mode="on"): """ Función auxiliar para establecer los posibles switches SPDIF de una tarjeta """ for scontrol in iec958_scontrols(card): # Solo lo ejecutamos si el scontrol tiene Capabilities: pswitch (los penum no funcionan) # y excluimos los loopback if "pswitch" in sp.check_output("amixer -c" + bareCard(card) + " sget " + scontrol, shell=True): amixer(card, cmd="sset " + scontrol + " '" + mode + "'") def iec958_scontrols(card=bareCard(system_card)): try: tmp = sp.check_output("amixer -c" + bareCard(card) + " scontrols | grep -i iec958", shell=True).split("\n")[:-1] tmp = [x.split("control ")[-1] for x in tmp] return [x for x in tmp if (not "loop" in x.lower()) and (not "filter" in x.lower())] except: return [] if __name__ == "__main__": if sys_argv[1:]: for cosa in sys_argv[1:]: cosa = cosa.lower() if "spdif" in cosa or "iec958" in cosa or "reset" in cosa: print "forzamos un flapeo SPDIF para intentar resincronizar el DAC externo :-/" sleep(.05) iec958_set(bareCard(system_card), "off") sleep(.05) iec958_set(bareCard(system_card), "on") else: print __doc__ print " 'system card' declarada en ~/audio/config:" print "\t" + system_card print "\n 'external_cards' declaradas en ~/audio/config:" for card in external_cards.split(): print "\t" + card if "proCards" in cardsINI.sections(): print "\n Tarjetas profesionales con referencia de reloj configurable:" print "\t(~/audio/cards.ini)" for card in cardsINI.get("proCards", "cards").split(): print "\t" + card print
gpl-3.0
revolutionaryG/phantomjs
src/qt/qtwebkit/Source/ThirdParty/gtest/test/gtest_uninitialized_test.py
2901
2480
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that Google Test warns the user when not initialized properly.""" __author__ = 'wan@google.com (Zhanyong Wan)' import gtest_test_utils COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_uninitialized_test_') def Assert(condition): if not condition: raise AssertionError def AssertEq(expected, actual): if expected != actual: print 'Expected: %s' % (expected,) print ' Actual: %s' % (actual,) raise AssertionError def TestExitCodeAndOutput(command): """Runs the given command and verifies its exit code and output.""" # Verifies that 'command' exits with code 1. p = gtest_test_utils.Subprocess(command) Assert(p.exited) AssertEq(1, p.exit_code) Assert('InitGoogleTest' in p.output) class GTestUninitializedTest(gtest_test_utils.TestCase): def testExitCodeAndOutput(self): TestExitCodeAndOutput(COMMAND) if __name__ == '__main__': gtest_test_utils.Main()
bsd-3-clause
bcheung92/Paperproject
gem5/src/arch/x86/isa/insts/x87/arithmetic/change_sign.py
70
2266
# Copyright (c) 2007 The Hewlett-Packard Development Company # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Gabe Black microcode = ''' def macroop FABS { absfp st(0), st(0), SetStatus=True }; def macroop FCHS { chsfp st(0), st(0), SetStatus=True }; '''
mit
Bysmyyr/chromium-crosswalk
tools/telemetry/telemetry/util/process_statistic_timeline_data_unittest.py
26
1653
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest from telemetry.util import process_statistic_timeline_data class ProcessStatisticTimelineDataTest(unittest.TestCase): def testProcessStatisticValueMath(self): pid1 = 1 pid2 = 2 a = process_statistic_timeline_data.ProcessStatisticTimelineData(pid1, 5) b = process_statistic_timeline_data.ProcessStatisticTimelineData(pid2, 1) c = process_statistic_timeline_data.ProcessStatisticTimelineData(pid1, 1) # Test addition. addition_result = (a + b).value_by_pid self.assertEquals(5, addition_result[pid1]) self.assertEquals(1, addition_result[pid2]) self.assertEquals(2, len(addition_result.keys())) # Test subtraction. subtraction_result = ((a + b) - c).value_by_pid self.assertEquals(4, subtraction_result[pid1]) self.assertEquals(1, subtraction_result[pid2]) self.assertEquals(2, len(subtraction_result.keys())) # Test subtraction with a pid that exists only in rhs. subtraction_results1 = (a - (b + c)).value_by_pid self.assertEquals(4, subtraction_results1[pid1]) self.assertEquals(1, len(subtraction_results1.keys())) # Test calculation of total sum. self.assertEquals(6, (a + b).total_sum()) def testProcessStatisticValueSummary(self): pid1 = 1 pid2 = 2 a = process_statistic_timeline_data.ProcessStatisticTimelineData(pid1, 1) b = process_statistic_timeline_data.ProcessStatisticTimelineData(pid2, 99) c = a + b self.assertEquals(100, c.total_sum())
bsd-3-clause
dudonwai/dudonsblog
Lib/site-packages/django/db/models/sql/where.py
76
17910
""" Code to manage the creation and SQL rendering of 'where' constraints. """ import collections import datetime import warnings from itertools import repeat from django.conf import settings from django.db.models.fields import DateTimeField, Field from django.db.models.sql.datastructures import Empty, EmptyResultSet from django.utils import timezone, tree from django.utils.deprecation import RemovedInDjango19Warning from django.utils.functional import cached_property from django.utils.six.moves import range # Connection types AND = 'AND' OR = 'OR' class EmptyShortCircuit(Exception): """ Internal exception used to indicate that a "matches nothing" node should be added to the where-clause. """ pass class WhereNode(tree.Node): """ Used to represent the SQL where-clause. The class is tied to the Query class that created it (in order to create the correct SQL). A child is usually a tuple of: (Constraint(alias, targetcol, field), lookup_type, value) where value can be either raw Python value, or Query, ExpressionNode or something else knowing how to turn itself into SQL. However, a child could also be any class with as_sql() and either relabeled_clone() method or relabel_aliases() and clone() methods. The second alternative should be used if the alias is not the only mutable variable. """ default = AND def _prepare_data(self, data): """ Prepare data for addition to the tree. If the data is a list or tuple, it is expected to be of the form (obj, lookup_type, value), where obj is a Constraint object, and is then slightly munged before being stored (to avoid storing any reference to field objects). Otherwise, the 'data' is stored unchanged and can be any class with an 'as_sql()' method. """ if not isinstance(data, (list, tuple)): return data obj, lookup_type, value = data if isinstance(value, collections.Iterator): # Consume any generators immediately, so that we can determine # emptiness and transform any non-empty values correctly. value = list(value) # The "value_annotation" parameter is used to pass auxiliary information # about the value(s) to the query construction. Specifically, datetime # and empty values need special handling. Other types could be used # here in the future (using Python types is suggested for consistency). if (isinstance(value, datetime.datetime) or (isinstance(obj.field, DateTimeField) and lookup_type != 'isnull')): value_annotation = datetime.datetime elif hasattr(value, 'value_annotation'): value_annotation = value.value_annotation else: value_annotation = bool(value) if hasattr(obj, 'prepare'): value = obj.prepare(lookup_type, value) return (obj, lookup_type, value_annotation, value) def as_sql(self, compiler, connection): """ Returns the SQL version of the where clause and the value to be substituted in. Returns '', [] if this node matches everything, None, [] if this node is empty, and raises EmptyResultSet if this node can't match anything. """ # Note that the logic here is made slightly more complex than # necessary because there are two kind of empty nodes: Nodes # containing 0 children, and nodes that are known to match everything. # A match-everything node is different than empty node (which also # technically matches everything) for backwards compatibility reasons. # Refs #5261. result = [] result_params = [] everything_childs, nothing_childs = 0, 0 non_empty_childs = len(self.children) for child in self.children: try: if hasattr(child, 'as_sql'): sql, params = compiler.compile(child) else: # A leaf node in the tree. sql, params = self.make_atom(child, compiler, connection) except EmptyResultSet: nothing_childs += 1 else: if sql: result.append(sql) result_params.extend(params) else: if sql is None: # Skip empty childs totally. non_empty_childs -= 1 continue everything_childs += 1 # Check if this node matches nothing or everything. # First check the amount of full nodes and empty nodes # to make this node empty/full. if self.connector == AND: full_needed, empty_needed = non_empty_childs, 1 else: full_needed, empty_needed = 1, non_empty_childs # Now, check if this node is full/empty using the # counts. if empty_needed - nothing_childs <= 0: if self.negated: return '', [] else: raise EmptyResultSet if full_needed - everything_childs <= 0: if self.negated: raise EmptyResultSet else: return '', [] if non_empty_childs == 0: # All the child nodes were empty, so this one is empty, too. return None, [] conn = ' %s ' % self.connector sql_string = conn.join(result) if sql_string: if self.negated: # Some backends (Oracle at least) need parentheses # around the inner SQL in the negated case, even if the # inner SQL contains just a single expression. sql_string = 'NOT (%s)' % sql_string elif len(result) > 1: sql_string = '(%s)' % sql_string return sql_string, result_params def get_group_by_cols(self): cols = [] for child in self.children: if hasattr(child, 'get_group_by_cols'): cols.extend(child.get_group_by_cols()) else: if isinstance(child[0], Constraint): cols.append((child[0].alias, child[0].col)) if hasattr(child[3], 'get_group_by_cols'): cols.extend(child[3].get_group_by_cols()) return cols def make_atom(self, child, compiler, connection): """ Turn a tuple (Constraint(table_alias, column_name, db_type), lookup_type, value_annotation, params) into valid SQL. The first item of the tuple may also be an Aggregate. Returns the string for the SQL fragment and the parameters to use for it. """ warnings.warn( "The make_atom() method will be removed in Django 1.9. Use Lookup class instead.", RemovedInDjango19Warning) lvalue, lookup_type, value_annotation, params_or_value = child field_internal_type = lvalue.field.get_internal_type() if lvalue.field else None if isinstance(lvalue, Constraint): try: lvalue, params = lvalue.process(lookup_type, params_or_value, connection) except EmptyShortCircuit: raise EmptyResultSet else: raise TypeError("'make_atom' expects a Constraint as the first " "item of its 'child' argument.") if isinstance(lvalue, tuple): # A direct database column lookup. field_sql, field_params = self.sql_for_columns(lvalue, compiler, connection, field_internal_type), [] else: # A smart object with an as_sql() method. field_sql, field_params = compiler.compile(lvalue) is_datetime_field = value_annotation is datetime.datetime cast_sql = connection.ops.datetime_cast_sql() if is_datetime_field else '%s' if hasattr(params, 'as_sql'): extra, params = compiler.compile(params) cast_sql = '' else: extra = '' params = field_params + params if (len(params) == 1 and params[0] == '' and lookup_type == 'exact' and connection.features.interprets_empty_strings_as_nulls): lookup_type = 'isnull' value_annotation = True if lookup_type in connection.operators: format = "%s %%s %%s" % (connection.ops.lookup_cast(lookup_type),) return (format % (field_sql, connection.operators[lookup_type] % cast_sql, extra), params) if lookup_type == 'in': if not value_annotation: raise EmptyResultSet if extra: return ('%s IN %s' % (field_sql, extra), params) max_in_list_size = connection.ops.max_in_list_size() if max_in_list_size and len(params) > max_in_list_size: # Break up the params list into an OR of manageable chunks. in_clause_elements = ['('] for offset in range(0, len(params), max_in_list_size): if offset > 0: in_clause_elements.append(' OR ') in_clause_elements.append('%s IN (' % field_sql) group_size = min(len(params) - offset, max_in_list_size) param_group = ', '.join(repeat('%s', group_size)) in_clause_elements.append(param_group) in_clause_elements.append(')') in_clause_elements.append(')') return ''.join(in_clause_elements), params else: return ('%s IN (%s)' % (field_sql, ', '.join(repeat('%s', len(params)))), params) elif lookup_type in ('range', 'year'): return ('%s BETWEEN %%s and %%s' % field_sql, params) elif is_datetime_field and lookup_type in ('month', 'day', 'week_day', 'hour', 'minute', 'second'): tzname = timezone.get_current_timezone_name() if settings.USE_TZ else None sql, tz_params = connection.ops.datetime_extract_sql(lookup_type, field_sql, tzname) return ('%s = %%s' % sql, tz_params + params) elif lookup_type in ('month', 'day', 'week_day'): return ('%s = %%s' % connection.ops.date_extract_sql(lookup_type, field_sql), params) elif lookup_type == 'isnull': assert value_annotation in (True, False), "Invalid value_annotation for isnull" return ('%s IS %sNULL' % (field_sql, ('' if value_annotation else 'NOT ')), ()) elif lookup_type == 'search': return (connection.ops.fulltext_search_sql(field_sql), params) elif lookup_type in ('regex', 'iregex'): return connection.ops.regex_lookup(lookup_type) % (field_sql, cast_sql), params raise TypeError('Invalid lookup_type: %r' % lookup_type) def sql_for_columns(self, data, qn, connection, internal_type=None): """ Returns the SQL fragment used for the left-hand side of a column constraint (for example, the "T1.foo" portion in the clause "WHERE ... T1.foo = 6") and a list of parameters. """ table_alias, name, db_type = data if table_alias: lhs = '%s.%s' % (qn(table_alias), qn(name)) else: lhs = qn(name) return connection.ops.field_cast_sql(db_type, internal_type) % lhs def relabel_aliases(self, change_map): """ Relabels the alias values of any children. 'change_map' is a dictionary mapping old (current) alias values to the new values. """ for pos, child in enumerate(self.children): if hasattr(child, 'relabel_aliases'): # For example another WhereNode child.relabel_aliases(change_map) elif hasattr(child, 'relabeled_clone'): self.children[pos] = child.relabeled_clone(change_map) elif isinstance(child, (list, tuple)): # tuple starting with Constraint child = (child[0].relabeled_clone(change_map),) + child[1:] if hasattr(child[3], 'relabeled_clone'): child = (child[0], child[1], child[2]) + ( child[3].relabeled_clone(change_map),) self.children[pos] = child def clone(self): """ Creates a clone of the tree. Must only be called on root nodes (nodes with empty subtree_parents). Childs must be either (Contraint, lookup, value) tuples, or objects supporting .clone(). """ clone = self.__class__._new_instance( children=[], connector=self.connector, negated=self.negated) for child in self.children: if hasattr(child, 'clone'): clone.children.append(child.clone()) else: clone.children.append(child) return clone def relabeled_clone(self, change_map): clone = self.clone() clone.relabel_aliases(change_map) return clone @classmethod def _contains_aggregate(cls, obj): if not isinstance(obj, tree.Node): return getattr(obj.lhs, 'contains_aggregate', False) or getattr(obj.rhs, 'contains_aggregate', False) return any(cls._contains_aggregate(c) for c in obj.children) @cached_property def contains_aggregate(self): return self._contains_aggregate(self) class EmptyWhere(WhereNode): def add(self, data, connector): return def as_sql(self, compiler=None, connection=None): raise EmptyResultSet class EverythingNode(object): """ A node that matches everything. """ def as_sql(self, compiler=None, connection=None): return '', [] class NothingNode(object): """ A node that matches nothing. """ def as_sql(self, compiler=None, connection=None): raise EmptyResultSet class ExtraWhere(object): def __init__(self, sqls, params): self.sqls = sqls self.params = params def as_sql(self, compiler=None, connection=None): sqls = ["(%s)" % sql for sql in self.sqls] return " AND ".join(sqls), list(self.params or ()) class Constraint(object): """ An object that can be passed to WhereNode.add() and knows how to pre-process itself prior to including in the WhereNode. """ def __init__(self, alias, col, field): warnings.warn( "The Constraint class will be removed in Django 1.9. Use Lookup class instead.", RemovedInDjango19Warning) self.alias, self.col, self.field = alias, col, field def prepare(self, lookup_type, value): if self.field and not hasattr(value, 'as_sql'): return self.field.get_prep_lookup(lookup_type, value) return value def process(self, lookup_type, value, connection): """ Returns a tuple of data suitable for inclusion in a WhereNode instance. """ # Because of circular imports, we need to import this here. from django.db.models.base import ObjectDoesNotExist try: if self.field: params = self.field.get_db_prep_lookup(lookup_type, value, connection=connection, prepared=True) db_type = self.field.db_type(connection=connection) else: # This branch is used at times when we add a comparison to NULL # (we don't really want to waste time looking up the associated # field object at the calling location). params = Field().get_db_prep_lookup(lookup_type, value, connection=connection, prepared=True) db_type = None except ObjectDoesNotExist: raise EmptyShortCircuit return (self.alias, self.col, db_type), params def relabeled_clone(self, change_map): if self.alias not in change_map: return self else: new = Empty() new.__class__ = self.__class__ new.alias, new.col, new.field = change_map[self.alias], self.col, self.field return new class SubqueryConstraint(object): def __init__(self, alias, columns, targets, query_object): self.alias = alias self.columns = columns self.targets = targets self.query_object = query_object def as_sql(self, compiler, connection): query = self.query_object # QuerySet was sent if hasattr(query, 'values'): if query._db and connection.alias != query._db: raise ValueError("Can't do subqueries with queries on different DBs.") # Do not override already existing values. if not hasattr(query, 'field_names'): query = query.values(*self.targets) else: query = query._clone() query = query.query if query.can_filter(): # If there is no slicing in use, then we can safely drop all ordering query.clear_ordering(True) query_compiler = query.get_compiler(connection=connection) return query_compiler.as_subquery_condition(self.alias, self.columns, compiler) def relabel_aliases(self, change_map): self.alias = change_map.get(self.alias, self.alias) def clone(self): return self.__class__( self.alias, self.columns, self.targets, self.query_object)
mit
jjmeyer0/incubator-metron
metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/2.3.3/package/scripts/properties_config.py
27
1430
#!/usr/bin/env python """ Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from resource_management.core.resources.system import File from resource_management.core.source import InlineTemplate def properties_inline_template(configurations): return InlineTemplate('''{% for key, value in configurations_dict.items() %}{{ key }}={{ value }} {% endfor %}''', configurations_dict=configurations) def properties_config(filename, configurations=None, conf_dir=None, mode=None, owner=None, group=None, brokerid=None): config_content = properties_inline_template(configurations) File(format("{conf_dir}/{filename}"), content=config_content, owner=owner, group=group, mode=mode)
apache-2.0
nugget/home-assistant
homeassistant/components/camera/ffmpeg.py
1
2654
""" Support for Cameras with FFmpeg as decoder. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/camera.ffmpeg/ """ import asyncio import logging import voluptuous as vol from homeassistant.const import CONF_NAME from homeassistant.components.camera import Camera, PLATFORM_SCHEMA from homeassistant.components.ffmpeg import ( DATA_FFMPEG, CONF_INPUT, CONF_EXTRA_ARGUMENTS) import homeassistant.helpers.config_validation as cv from homeassistant.helpers.aiohttp_client import ( async_aiohttp_proxy_stream) _LOGGER = logging.getLogger(__name__) DEPENDENCIES = ['ffmpeg'] DEFAULT_NAME = 'FFmpeg' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_INPUT): cv.string, vol.Optional(CONF_EXTRA_ARGUMENTS): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, }) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up a FFmpeg camera.""" async_add_entities([FFmpegCamera(hass, config)]) class FFmpegCamera(Camera): """An implementation of an FFmpeg camera.""" def __init__(self, hass, config): """Initialize a FFmpeg camera.""" super().__init__() self._manager = hass.data[DATA_FFMPEG] self._name = config.get(CONF_NAME) self._input = config.get(CONF_INPUT) self._extra_arguments = config.get(CONF_EXTRA_ARGUMENTS) async def async_camera_image(self): """Return a still image response from the camera.""" from haffmpeg import ImageFrame, IMAGE_JPEG ffmpeg = ImageFrame(self._manager.binary, loop=self.hass.loop) image = await asyncio.shield(ffmpeg.get_image( self._input, output_format=IMAGE_JPEG, extra_cmd=self._extra_arguments), loop=self.hass.loop) return image async def handle_async_mjpeg_stream(self, request): """Generate an HTTP MJPEG stream from the camera.""" from haffmpeg import CameraMjpeg stream = CameraMjpeg(self._manager.binary, loop=self.hass.loop) await stream.open_camera( self._input, extra_cmd=self._extra_arguments) try: return await async_aiohttp_proxy_stream( self.hass, request, stream, self._manager.ffmpeg_stream_content_type) finally: await stream.close() @property def name(self): """Return the name of this camera.""" return self._name @property def stream_source(self): """Return the source of the stream.""" return self._input
apache-2.0
matthiasdiener/spack
var/spack/repos/builtin/packages/r-backports/package.py
5
2031
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RBackports(RPackage): """Implementations of functions which have been introduced in R since version 3.0.0. The backports are conditionally exported which results in R resolving the function names to the version shipped with R (if available) and uses the implemented backports as fallback. This way package developers can make use of the new functions without worrying about the minimum required R version.""" homepage = "https://cran.r-project.org/package=backports" url = "https://cran.r-project.org/src/contrib/backports_1.1.1.tar.gz" list_url = "https://cran.r-project.org/src/contrib/Archive/backports" version('1.1.1', '969543a0af32dc23bba9bb37ec82008c') version('1.1.0', 'b97a71b026fd7ede0e449be93d160c17')
lgpl-2.1
cindyyu/kuma
kuma/core/helpers.py
2
11447
import datetime import HTMLParser import os import urllib import hashlib import bitly_api from babel import localedata from babel.dates import format_date, format_time, format_datetime from babel.numbers import format_decimal import bleach import pytz from urlobject import URLObject from jingo import register, env import jinja2 from pytz import timezone from tower import ugettext_lazy as _lazy, ungettext from django.conf import settings from django.contrib.messages.storage.base import LEVEL_TAGS from django.contrib.staticfiles.storage import staticfiles_storage from django.template import defaultfilters from django.utils.encoding import smart_str, force_text from django.utils.html import strip_tags from django.utils.safestring import mark_safe from django.utils.timezone import get_default_timezone from soapbox.models import Message from statici18n.utils import get_filename from .cache import memcache from .exceptions import DateTimeFormatError from .urlresolvers import reverse, split_path htmlparser = HTMLParser.HTMLParser() # Yanking filters from Django. register.filter(strip_tags) register.filter(defaultfilters.timesince) register.filter(defaultfilters.truncatewords) @register.filter def paginator(pager): """Render list of pages.""" return Paginator(pager).render() @register.function def url(viewname, *args, **kwargs): """Helper for Django's ``reverse`` in templates.""" locale = kwargs.pop('locale', None) return reverse(viewname, args=args, kwargs=kwargs, locale=locale) bitly = bitly_api.Connection(login=getattr(settings, 'BITLY_USERNAME', ''), api_key=getattr(settings, 'BITLY_API_KEY', '')) @register.filter def bitly_shorten(url): """Attempt to shorten a given URL through bit.ly / mzl.la""" cache_key = 'bitly:%s' % hashlib.md5(smart_str(url)).hexdigest() short_url = memcache.get(cache_key) if short_url is None: try: short_url = bitly.shorten(url)['url'] memcache.set(cache_key, short_url, 60 * 60 * 24 * 30 * 12) except (bitly_api.BitlyError, KeyError): # Just in case the bit.ly service fails or the API key isn't # configured, fall back to using the original URL. return url return short_url class Paginator(object): def __init__(self, pager): self.pager = pager self.max = 10 self.span = (self.max - 1) / 2 self.page = pager.number self.num_pages = pager.paginator.num_pages self.count = pager.paginator.count pager.page_range = self.range() pager.dotted_upper = self.num_pages not in pager.page_range pager.dotted_lower = 1 not in pager.page_range def range(self): """Return a list of page numbers to show in the paginator.""" page, total, span = self.page, self.num_pages, self.span if total < self.max: lower, upper = 0, total elif page < span + 1: lower, upper = 0, span * 2 elif page > total - span: lower, upper = total - span * 2, total else: lower, upper = page - span, page + span - 1 return range(max(lower + 1, 1), min(total, upper) + 1) def render(self): c = {'pager': self.pager, 'num_pages': self.num_pages, 'count': self.count} t = env.get_template('includes/paginator.html').render(c) return jinja2.Markup(t) @register.filter def timesince(d, now=None): """Take two datetime objects and return the time between d and now as a nicely formatted string, e.g. "10 minutes". If d is None or occurs after now, return ''. Units used are years, months, weeks, days, hours, and minutes. Seconds and microseconds are ignored. Just one unit is displayed. For example, "2 weeks" and "1 year" are possible outputs, but "2 weeks, 3 days" and "1 year, 5 months" are not. Adapted from django.utils.timesince to have better i18n (not assuming commas as list separators and including "ago" so order of words isn't assumed), show only one time unit, and include seconds. """ if d is None: return u'' chunks = [ (60 * 60 * 24 * 365, lambda n: ungettext('%(number)d year ago', '%(number)d years ago', n)), (60 * 60 * 24 * 30, lambda n: ungettext('%(number)d month ago', '%(number)d months ago', n)), (60 * 60 * 24 * 7, lambda n: ungettext('%(number)d week ago', '%(number)d weeks ago', n)), (60 * 60 * 24, lambda n: ungettext('%(number)d day ago', '%(number)d days ago', n)), (60 * 60, lambda n: ungettext('%(number)d hour ago', '%(number)d hours ago', n)), (60, lambda n: ungettext('%(number)d minute ago', '%(number)d minutes ago', n)), (1, lambda n: ungettext('%(number)d second ago', '%(number)d seconds ago', n))] if not now: if d.tzinfo: now = datetime.datetime.now(get_default_timezone()) else: now = datetime.datetime.now() # Ignore microsecond part of 'd' since we removed it from 'now' delta = now - (d - datetime.timedelta(0, 0, d.microsecond)) since = delta.days * 24 * 60 * 60 + delta.seconds if since <= 0: # d is in the future compared to now, stop processing. return u'' for i, (seconds, name) in enumerate(chunks): count = since // seconds if count != 0: break return name(count) % {'number': count} @register.filter def yesno(boolean_value): return jinja2.Markup(_lazy(u'Yes') if boolean_value else _lazy(u'No')) @register.filter def entity_decode(str): """Turn HTML entities in a string into unicode.""" return htmlparser.unescape(str) @register.function def inlinei18n(locale): key = 'statici18n:%s' % locale path = memcache.get(key) if path is None: path = os.path.join(settings.STATICI18N_OUTPUT_DIR, get_filename(locale, settings.STATICI18N_DOMAIN)) memcache.set(key, path, 60 * 60 * 24 * 30) with staticfiles_storage.open(path) as i18n_file: return mark_safe(i18n_file.read()) @register.function def page_title(title): return u'%s | MDN' % title @register.filter def level_tag(message): return jinja2.Markup(force_text(LEVEL_TAGS.get(message.level, ''), strings_only=True)) @register.filter def isotime(t): """Date/Time format according to ISO 8601""" if not hasattr(t, 'tzinfo'): return return _append_tz(t).astimezone(pytz.utc).strftime("%Y-%m-%dT%H:%M:%SZ") def _append_tz(t): tz = pytz.timezone(settings.TIME_ZONE) return tz.localize(t) @register.function def thisyear(): """The current year.""" return jinja2.Markup(datetime.date.today().year) @register.filter def cleank(txt): """Clean and link some user-supplied text.""" return jinja2.Markup(bleach.linkify(bleach.clean(txt))) @register.filter def urlencode(txt): """Url encode a path.""" return urllib.quote_plus(txt.encode('utf8')) @register.filter def jsonencode(data): import json return jinja2.Markup(json.dumps(data)) @register.function def get_soapbox_messages(url): _, path = split_path(url) return Message.objects.match(path) @register.function def get_webfont_attributes(request): """Return data attributes based on assumptions about if user has them cached""" assume_loaded = 'true' if request.META.get('HTTP_PRAGMA') == 'no-cache': assume_loaded = 'false' elif request.META.get('HTTP_CACHE_CONTROL') == 'no-cache': assume_loaded = 'false' elif request.COOKIES.get('ffo', 'false') == 'true': assume_loaded = 'true' else: assume_loaded = 'false' font_names = ['opensanslight', 'opensans'] font_attributes = '' for font_name in font_names: font_attributes += ' data-ffo-' + font_name + '=' + assume_loaded + '' return font_attributes @register.inclusion_tag('core/elements/soapbox_messages.html') def soapbox_messages(soapbox_messages): return {'soapbox_messages': soapbox_messages} @register.function def add_utm(url_, campaign, source='developer.mozilla.org', medium='email'): """Add the utm_* tracking parameters to a URL.""" url_obj = URLObject(url_).add_query_params({ 'utm_campaign': campaign, 'utm_source': source, 'utm_medium': medium}) return str(url_obj) def _babel_locale(locale): """Return the Babel locale code, given a normal one.""" # Babel uses underscore as separator. return locale.replace('-', '_') def _contextual_locale(context): """Return locale from the context, falling back to a default if invalid.""" locale = context['request'].locale if not localedata.exists(locale): locale = settings.LANGUAGE_CODE return locale @register.function @jinja2.contextfunction def datetimeformat(context, value, format='shortdatetime', output='html'): """ Returns date/time formatted using babel's locale settings. Uses the timezone from settings.py """ if not isinstance(value, datetime.datetime): if isinstance(value, datetime.date): # Turn a date into a datetime value = datetime.datetime.combine(value, datetime.datetime.min.time()) else: # Expecting datetime value raise ValueError default_tz = timezone(settings.TIME_ZONE) tzvalue = default_tz.localize(value) user = context['request'].user try: if user.is_authenticated() and user.timezone: user_tz = timezone(user.timezone) tzvalue = user_tz.normalize(tzvalue.astimezone(user_tz)) except AttributeError: pass locale = _babel_locale(_contextual_locale(context)) # If within a day, 24 * 60 * 60 = 86400s if format == 'shortdatetime': # Check if the date is today if value.toordinal() == datetime.date.today().toordinal(): formatted = _lazy(u'Today at %s') % format_time( tzvalue, format='short', locale=locale) else: formatted = format_datetime(tzvalue, format='short', locale=locale) elif format == 'longdatetime': formatted = format_datetime(tzvalue, format='long', locale=locale) elif format == 'date': formatted = format_date(tzvalue, locale=locale) elif format == 'time': formatted = format_time(tzvalue, locale=locale) elif format == 'datetime': formatted = format_datetime(tzvalue, locale=locale) else: # Unknown format raise DateTimeFormatError if output == 'json': return formatted return jinja2.Markup('<time datetime="%s">%s</time>' % (tzvalue.isoformat(), formatted)) @register.function @jinja2.contextfunction def number(context, n): """Return the localized representation of an integer or decimal. For None, print nothing. """ if n is None: return '' return format_decimal(n, locale=_babel_locale(_contextual_locale(context)))
mpl-2.0
tempbottle/restcommander
play-1.2.4/python/Lib/site-packages/pyreadline/keysyms/winconstants.py
16
2544
#This file contains constants that are normally found in win32all #But included here to avoid the dependency VK_LBUTTON=1 VK_RBUTTON=2 VK_CANCEL=3 VK_MBUTTON=4 VK_XBUTTON1=5 VK_XBUTTON2=6 VK_BACK=8 VK_TAB=9 VK_CLEAR=12 VK_RETURN=13 VK_SHIFT=16 VK_CONTROL=17 VK_MENU=18 VK_PAUSE=19 VK_CAPITAL=20 VK_KANA=0x15 VK_HANGEUL=0x15 VK_HANGUL=0x15 VK_JUNJA=0x17 VK_FINAL=0x18 VK_HANJA=0x19 VK_KANJI=0x19 VK_ESCAPE=0x1B VK_CONVERT=0x1C VK_NONCONVERT=0x1D VK_ACCEPT=0x1E VK_MODECHANGE=0x1F VK_SPACE=32 VK_PRIOR=33 VK_NEXT=34 VK_END=35 VK_HOME=36 VK_LEFT=37 VK_UP=38 VK_RIGHT=39 VK_DOWN=40 VK_SELECT=41 VK_PRINT=42 VK_EXECUTE=43 VK_SNAPSHOT=44 VK_INSERT=45 VK_DELETE=46 VK_HELP=47 VK_LWIN=0x5B VK_RWIN=0x5C VK_APPS=0x5D VK_SLEEP=0x5F VK_NUMPAD0=0x60 VK_NUMPAD1=0x61 VK_NUMPAD2=0x62 VK_NUMPAD3=0x63 VK_NUMPAD4=0x64 VK_NUMPAD5=0x65 VK_NUMPAD6=0x66 VK_NUMPAD7=0x67 VK_NUMPAD8=0x68 VK_NUMPAD9=0x69 VK_MULTIPLY=0x6A VK_ADD=0x6B VK_SEPARATOR=0x6C VK_SUBTRACT=0x6D VK_DECIMAL=0x6E VK_DIVIDE=0x6F VK_F1=0x70 VK_F2=0x71 VK_F3=0x72 VK_F4=0x73 VK_F5=0x74 VK_F6=0x75 VK_F7=0x76 VK_F8=0x77 VK_F9=0x78 VK_F10=0x79 VK_F11=0x7A VK_F12=0x7B VK_F13=0x7C VK_F14=0x7D VK_F15=0x7E VK_F16=0x7F VK_F17=0x80 VK_F18=0x81 VK_F19=0x82 VK_F20=0x83 VK_F21=0x84 VK_F22=0x85 VK_F23=0x86 VK_F24=0x87 VK_NUMLOCK=0x90 VK_SCROLL=0x91 VK_LSHIFT=0xA0 VK_RSHIFT=0xA1 VK_LCONTROL=0xA2 VK_RCONTROL=0xA3 VK_LMENU=0xA4 VK_RMENU=0xA5 VK_BROWSER_BACK=0xA6 VK_BROWSER_FORWARD=0xA7 VK_BROWSER_REFRESH=0xA8 VK_BROWSER_STOP=0xA9 VK_BROWSER_SEARCH=0xAA VK_BROWSER_FAVORITES=0xAB VK_BROWSER_HOME=0xAC VK_VOLUME_MUTE=0xAD VK_VOLUME_DOWN=0xAE VK_VOLUME_UP=0xAF VK_MEDIA_NEXT_TRACK=0xB0 VK_MEDIA_PREV_TRACK=0xB1 VK_MEDIA_STOP=0xB2 VK_MEDIA_PLAY_PAUSE=0xB3 VK_LAUNCH_MAIL=0xB4 VK_LAUNCH_MEDIA_SELECT=0xB5 VK_LAUNCH_APP1=0xB6 VK_LAUNCH_APP2=0xB7 VK_OEM_1=0xBA VK_OEM_PLUS=0xBB VK_OEM_COMMA=0xBC VK_OEM_MINUS=0xBD VK_OEM_PERIOD=0xBE VK_OEM_2=0xBF VK_OEM_3=0xC0 VK_OEM_4=0xDB VK_OEM_5=0xDC VK_OEM_6=0xDD VK_OEM_7=0xDE VK_OEM_8=0xDF VK_OEM_102=0xE2 VK_PROCESSKEY=0xE5 VK_PACKET=0xE7 VK_ATTN=0xF6 VK_CRSEL=0xF7 VK_EXSEL=0xF8 VK_EREOF=0xF9 VK_PLAY=0xFA VK_ZOOM=0xFB VK_NONAME=0xFC VK_PA1=0xFD VK_OEM_CLEAR=0xFE CF_TEXT=1 CF_BITMAP=2 CF_METAFILEPICT=3 CF_SYLK=4 CF_DIF=5 CF_TIFF=6 CF_OEMTEXT=7 CF_DIB=8 CF_PALETTE=9 CF_PENDATA=10 CF_RIFF=11 CF_WAVE=12 CF_UNICODETEXT=13 CF_ENHMETAFILE=14 CF_HDROP=15 CF_LOCALE=16 CF_MAX=17 CF_OWNERDISPLAY=128 CF_DSPTEXT=129 CF_DSPBITMAP=130 CF_DSPMETAFILEPICT=131 CF_DSPENHMETAFILE=142 CF_PRIVATEFIRST=512 CF_PRIVATELAST=767 CF_GDIOBJFIRST=768 CF_GDIOBJLAST=1023 GPTR=64 GHND=66
apache-2.0
trik/djangae
djangae/tests/test_caching.py
2
27318
import unittest from google.appengine.api import datastore from google.appengine.api import datastore_errors from google.appengine.ext.db import non_transactional from django.db import models from django.http import HttpRequest from django.core.signals import request_finished, request_started from django.core.cache import cache from djangae.contrib import sleuth from djangae.test import TestCase from djangae.db import unique_utils from djangae.db import transaction from djangae.db.backends.appengine.context import ContextStack from djangae.db.backends.appengine import caching from djangae.db.caching import disable_cache, clear_context_cache class FakeEntity(dict): COUNTER = 1 def __init__(self, data, id=0): self.id = id or FakeEntity.COUNTER FakeEntity.COUNTER += 1 self.update(data) def key(self): return datastore.Key.from_path("auth_user", self.id) class ContextStackTests(TestCase): def test_push_pop(self): stack = ContextStack() self.assertEqual({}, stack.top.cache) entity = FakeEntity({"bananas": 1}) stack.top.cache_entity(["bananas:1"], entity, caching.CachingSituation.DATASTORE_PUT) self.assertEqual({"bananas": 1}, stack.top.cache.values()[0]) stack.push() self.assertEqual([], stack.top.cache.values()) self.assertEqual(2, stack.size) stack.push() stack.top.cache_entity(["apples:2"], entity, caching.CachingSituation.DATASTORE_PUT) self.assertItemsEqual(["apples:2"], stack.top.cache.keys()) stack.pop() self.assertItemsEqual([], stack.top.cache.keys()) self.assertEqual(2, stack.size) self.assertEqual(1, stack.staged_count) updated = FakeEntity({"bananas": 3}) stack.top.cache_entity(["bananas:1"], updated, caching.CachingSituation.DATASTORE_PUT) stack.pop(apply_staged=True, clear_staged=True) self.assertEqual(1, stack.size) self.assertEqual({"bananas": 3}, stack.top.cache["bananas:1"]) self.assertEqual(0, stack.staged_count) def test_property_deletion(self): stack = ContextStack() entity = FakeEntity({"field1": "one", "field2": "two"}) stack.top.cache_entity(["entity"], entity, caching.CachingSituation.DATASTORE_PUT) stack.push() # Enter transaction entity["field1"] = "oneone" del entity["field2"] stack.top.cache_entity(["entity"], entity, caching.CachingSituation.DATASTORE_PUT) stack.pop(apply_staged=True, clear_staged=True) self.assertEqual({"field1": "oneone"}, stack.top.cache["entity"]) class CachingTestModel(models.Model): field1 = models.CharField(max_length=255, unique=True) comb1 = models.IntegerField(default=0) comb2 = models.CharField(max_length=255) class Meta: unique_together = [ ("comb1", "comb2") ] app_label = "djangae" class MemcacheCachingTests(TestCase): """ We need to be pretty selective with our caching in memcache, because unlike the context caching, this stuff is global. For that reason, we have the following rules: - save/update caches entities outside transactions - Inside transactions save/update wipes out the cache for updated entities (a subsequent read by key will populate it again) - Inside transactions filter/get does not hit memcache (that just breaks transactions) - filter/get by key caches entities (consistent) - filter/get by anything else does not (eventually consistent) """ @disable_cache(memcache=False, context=True) def test_save_inside_transaction_evicts_cache(self): entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } identifiers = unique_utils.unique_identifiers_from_entity(CachingTestModel, FakeEntity(entity_data, id=222)) instance = CachingTestModel.objects.create(id=222, **entity_data) for identifier in identifiers: self.assertEqual(entity_data, cache.get(identifier)) with transaction.atomic(): instance.field1 = "Banana" instance.save() # Make sure that altering inside the transaction evicted the item from the cache # and that a get then hits the datastore (which then in turn caches) with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get: for identifier in identifiers: self.assertIsNone(cache.get(identifier)) self.assertEqual("Banana", CachingTestModel.objects.get(pk=instance.pk).field1) self.assertTrue(datastore_get.called) @disable_cache(memcache=False, context=True) def test_save_caches_outside_transaction_only(self): entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } identifiers = unique_utils.unique_identifiers_from_entity(CachingTestModel, FakeEntity(entity_data, id=222)) for identifier in identifiers: self.assertIsNone(cache.get(identifier)) instance = CachingTestModel.objects.create(id=222, **entity_data) for identifier in identifiers: self.assertEqual(entity_data, cache.get(identifier)) instance.delete() for identifier in identifiers: self.assertIsNone(cache.get(identifier)) with transaction.atomic(): instance = CachingTestModel.objects.create(**entity_data) for identifier in identifiers: self.assertIsNone(cache.get(identifier)) @disable_cache(memcache=False, context=True) def test_save_wipes_entity_from_cache_inside_transaction(self): entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } identifiers = unique_utils.unique_identifiers_from_entity(CachingTestModel, FakeEntity(entity_data, id=222)) for identifier in identifiers: self.assertIsNone(cache.get(identifier)) instance = CachingTestModel.objects.create(id=222, **entity_data) for identifier in identifiers: self.assertEqual(entity_data, cache.get(identifier)) with transaction.atomic(): instance.save() for identifier in identifiers: self.assertIsNone(cache.get(identifier)) @disable_cache(memcache=False, context=True) def test_transactional_save_wipes_the_cache_only_after_its_result_is_consistently_available(self): entity_data = { "field1": "old", } identifiers = unique_utils.unique_identifiers_from_entity(CachingTestModel, FakeEntity(entity_data, id=222)) for identifier in identifiers: self.assertIsNone(cache.get(identifier)) instance = CachingTestModel.objects.create(id=222, **entity_data) for identifier in identifiers: self.assertEqual("old", cache.get(identifier)["field1"]) @non_transactional def non_transactional_read(instance_pk): CachingTestModel.objects.get(pk=instance_pk) with transaction.atomic(): instance.field1 = "new" instance.save() non_transactional_read(instance.pk) # could potentially recache the old object for identifier in identifiers: self.assertIsNone(cache.get(identifier)) @disable_cache(memcache=False, context=True) def test_consistent_read_updates_memcache_outside_transaction(self): entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } identifiers = unique_utils.unique_identifiers_from_entity(CachingTestModel, FakeEntity(entity_data, id=222)) for identifier in identifiers: self.assertIsNone(cache.get(identifier)) CachingTestModel.objects.create(id=222, **entity_data) for identifier in identifiers: self.assertEqual(entity_data, cache.get(identifier)) cache.clear() for identifier in identifiers: self.assertIsNone(cache.get(identifier)) CachingTestModel.objects.get(id=222) # Consistent read for identifier in identifiers: self.assertEqual(entity_data, cache.get(identifier)) @disable_cache(memcache=False, context=True) def test_eventual_read_doesnt_update_memcache(self): entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } identifiers = unique_utils.unique_identifiers_from_entity(CachingTestModel, FakeEntity(entity_data, id=222)) for identifier in identifiers: self.assertIsNone(cache.get(identifier)) CachingTestModel.objects.create(id=222, **entity_data) for identifier in identifiers: self.assertEqual(entity_data, cache.get(identifier)) cache.clear() for identifier in identifiers: self.assertIsNone(cache.get(identifier)) CachingTestModel.objects.all()[0] # Inconsistent read for identifier in identifiers: self.assertIsNone(cache.get(identifier)) @disable_cache(memcache=False, context=True) def test_unique_filter_hits_memcache(self): entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } original = CachingTestModel.objects.create(**entity_data) with sleuth.watch("google.appengine.api.datastore.Query.Run") as datastore_query: instance = CachingTestModel.objects.filter(field1="Apple").all()[0] self.assertEqual(original, instance) self.assertFalse(datastore_query.called) @disable_cache(memcache=False, context=True) def test_unique_filter_applies_all_filters(self): entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } original = CachingTestModel.objects.create(**entity_data) with sleuth.watch("google.appengine.api.datastore.Query.Run") as datastore_query: # Expect no matches num_instances = CachingTestModel.objects.filter(field1="Apple", comb1=0).count() self.assertEqual(num_instances, 0) @disable_cache(memcache=False, context=True) def test_non_unique_filter_hits_datastore(self): entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } original = CachingTestModel.objects.create(**entity_data) with sleuth.watch("google.appengine.api.datastore.Query.Run") as datastore_query: instance = CachingTestModel.objects.filter(comb1=1).all()[0] self.assertEqual(original, instance) self.assertTrue(datastore_query.called) @disable_cache(memcache=False, context=True) def test_get_by_key_hits_memcache(self): entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } original = CachingTestModel.objects.create(**entity_data) with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get: instance = CachingTestModel.objects.get(pk=original.pk) self.assertEqual(original, instance) self.assertFalse(datastore_get.called) @disable_cache(memcache=False, context=True) def test_get_by_key_hits_datastore_inside_transaction(self): entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } original = CachingTestModel.objects.create(**entity_data) with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get: with transaction.atomic(): instance = CachingTestModel.objects.get(pk=original.pk) self.assertEqual(original, instance) self.assertTrue(datastore_get.called) @disable_cache(memcache=False, context=True) def test_unique_get_hits_memcache(self): entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } original = CachingTestModel.objects.create(**entity_data) with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get: instance = CachingTestModel.objects.get(field1="Apple") self.assertEqual(original, instance) self.assertFalse(datastore_get.called) @disable_cache(memcache=False, context=True) def test_unique_get_hits_datastore_inside_transaction(self): entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } CachingTestModel.objects.create(**entity_data) with sleuth.watch("google.appengine.api.datastore.Query.Run") as datastore_query: with transaction.atomic(): try: CachingTestModel.objects.get(field1="Apple") except datastore_errors.BadRequestError: # You can't query in a transaction, but still pass self.assertTrue(datastore_query.called) @disable_cache(memcache=False, context=True) def test_bulk_cache(self): with sleuth.watch("django.core.cache.cache.set_many") as set_many_1: CachingTestModel.objects.create(field1="Apple", comb1=1, comb2="Cherry") self.assertEqual(set_many_1.call_count, 1) self.assertEqual(len(set_many_1.calls[0].args[0]), 3) with sleuth.watch("django.core.cache.cache.set_many") as set_many_2: CachingTestModel.objects.bulk_create([ CachingTestModel(field1="Banana", comb1=2, comb2="Cherry"), CachingTestModel(field1="Orange", comb1=3, comb2="Cherry"), ]) self.assertEqual(set_many_2.call_count, 1) self.assertEqual(len(set_many_2.calls[0].args[0]), 3*2) pks = list(CachingTestModel.objects.values_list('pk', flat=True)) with sleuth.watch("django.core.cache.cache.set_many") as set_many_3: list(CachingTestModel.objects.filter(pk__in=pks).all()) self.assertEqual(set_many_3.call_count, 1) self.assertEqual(len(set_many_3.calls[0].args[0]), 3*len(pks)) with sleuth.watch("django.core.cache.cache.get_many") as get_many: with sleuth.watch("django.core.cache.cache.delete_many") as delete_many: CachingTestModel.objects.all().delete() self.assertEqual(get_many.call_count, 1) self.assertEqual(delete_many.call_count, 1) self.assertEqual(len(get_many.calls[0].args[0]), 3) # Get by pk from cache class ContextCachingTests(TestCase): """ We can be a bit more liberal with hitting the context cache as it's thread-local and request-local The context cache is actually a stack. When you start a transaction we push a copy of the current context onto the stack, when we finish a transaction we pop the current context and apply the changes onto the outer transaction. The rules are thus: - Entering a transaction pushes a copy of the current context - Rolling back a transaction pops the top of the stack - Committing a transaction pops the top of the stack, and adds it to a queue - When all transactions exit, the queue is applied to the current context one at a time - save/update caches entities - filter/get by key caches entities (consistent) - filter/get by anything else does not (eventually consistent) """ @disable_cache(memcache=True, context=False) def test_that_transactions_dont_inherit_context_cache(self): """ It's fine to hit the context cache inside an independent transaction, providing that the cache doesn't inherit the outer cache! Otherwise we have a situation where the transaction never hits the database when reloading an entity """ entity_data = { "field1": u"Apple", "comb1": 1, "comb2": u"Cherry" } instance = CachingTestModel.objects.create(**entity_data) with transaction.atomic(): with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get: instance = CachingTestModel.objects.get(pk=instance.pk) self.assertEqual(1, datastore_get.call_count) # Shouldn't hit the cache! instance.save() with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get: self.assertEqual(0, datastore_get.call_count) # Should hit the cache @disable_cache(memcache=True, context=False) def test_caching_bug(self): entity_data = { "field1": u"Apple", "comb1": 1, "comb2": u"Cherry" } instance = CachingTestModel.objects.create(**entity_data) expected = entity_data.copy() expected[u"id"] = instance.pk # Fetch the object, which causes it to be added to the context cache self.assertItemsEqual(CachingTestModel.objects.filter(pk=instance.pk).values(), [expected]) # Doing a .values_list() fetches from the cache and wipes out the other fields from the entity self.assertItemsEqual(CachingTestModel.objects.filter(pk=instance.pk).values_list("field1"), [("Apple",)]) # Now fetch from the cache again, checking that the previously wiped fields are still in tact self.assertItemsEqual(CachingTestModel.objects.filter(pk=instance.pk).values(), [expected]) @disable_cache(memcache=True, context=False) def test_transactions_get_their_own_context(self): with sleuth.watch("djangae.db.backends.appengine.context.ContextStack.push") as context_push: with transaction.atomic(): pass self.assertTrue(context_push.called) @disable_cache(memcache=True, context=False) def test_independent_transaction_applies_to_outer_context(self): """ When a transaction commits successfully, we can apply its cache to the outer stack. This alters the behaviour of transactions a little but in a positive way. Things that change are: 1. If you run an independent transaction inside another transaction, a subsequent Get for an entity updated there will return the updated instance from the cache. Due to serialization of transactions it's possible that this would have happened anyway (the outer transaction wouldn't start until the independent one had finished). It makes this behaviour consistent even when serialization isn't possible. 2. Due to the fact the context cache is hit within a transaction, you can now Put, then Get an entity and it will return its current state (as the transaction would see it), rather than the state at the beginning of the transaction. This behaviour is nicer than the default. """ entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } original = CachingTestModel.objects.create(**entity_data) with transaction.atomic(): with transaction.atomic(independent=True): inner = CachingTestModel.objects.get(pk=original.pk) inner.field1 = "Banana" inner.save() outer = CachingTestModel.objects.get(pk=original.pk) self.assertEqual("Banana", outer.field1) outer.field1 = "Apple" outer.save() original = CachingTestModel.objects.get(pk=original.pk) self.assertEqual("Apple", original.field1) @disable_cache(memcache=True, context=False) def test_nested_transactions_dont_get_their_own_context(self): """ The datastore doesn't support nested transactions, so when there is a nested atomic block which isn't marked as independent, the atomic is a no-op. Therefore we shouldn't push a context here, and we shouldn't pop it at the end either. """ self.assertEqual(1, caching._context.stack.size) with transaction.atomic(): self.assertEqual(2, caching._context.stack.size) with transaction.atomic(): self.assertEqual(2, caching._context.stack.size) with transaction.atomic(): self.assertEqual(2, caching._context.stack.size) self.assertEqual(2, caching._context.stack.size) self.assertEqual(2, caching._context.stack.size) self.assertEqual(1, caching._context.stack.size) @disable_cache(memcache=True, context=False) def test_nested_rollback_doesnt_apply_on_outer_commit(self): entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } original = CachingTestModel.objects.create(**entity_data) with transaction.atomic(): try: with transaction.atomic(independent=True): inner = CachingTestModel.objects.get(pk=original.pk) inner.field1 = "Banana" inner.save() raise ValueError() # Will rollback the transaction except ValueError: pass outer = CachingTestModel.objects.get(pk=original.pk) self.assertEqual("Apple", outer.field1) original = CachingTestModel.objects.get(pk=original.pk) self.assertEqual("Apple", original.field1) # Shouldn't have changed @disable_cache(memcache=True, context=False) def test_save_caches(self): entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } original = CachingTestModel.objects.create(**entity_data) with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get: with sleuth.watch("django.core.cache.cache.get") as memcache_get: original = CachingTestModel.objects.get(pk=original.pk) self.assertFalse(datastore_get.called) self.assertFalse(memcache_get.called) @disable_cache(memcache=True, context=False) def test_consistent_read_updates_cache_outside_transaction(self): """ A read inside a transaction shouldn't update the context cache outside that transaction """ entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } original = CachingTestModel.objects.create(**entity_data) clear_context_cache() CachingTestModel.objects.get(pk=original.pk) # Should update the cache with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get: CachingTestModel.objects.get(pk=original.pk) self.assertFalse(datastore_get.called) clear_context_cache() with transaction.atomic(): with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get: CachingTestModel.objects.get(pk=original.pk) # Should *not* update the cache self.assertTrue(datastore_get.called) with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get: CachingTestModel.objects.get(pk=original.pk) self.assertTrue(datastore_get.called) @disable_cache(memcache=True, context=False) def test_inconsistent_read_doesnt_update_cache(self): entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } original = CachingTestModel.objects.create(**entity_data) clear_context_cache() CachingTestModel.objects.all() # Inconsistent with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get: CachingTestModel.objects.get(pk=original.pk) self.assertTrue(datastore_get.called) @disable_cache(memcache=True, context=False) def test_unique_filter_hits_cache(self): entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } CachingTestModel.objects.create(**entity_data) with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get: list(CachingTestModel.objects.filter(field1="Apple")) self.assertFalse(datastore_get.called) @disable_cache(memcache=True, context=False) def test_unique_filter_applies_all_filters(self): entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } original = CachingTestModel.objects.create(**entity_data) with sleuth.watch("google.appengine.api.datastore.Query.Run") as datastore_query: # Expect no matches num_instances = CachingTestModel.objects.filter(field1="Apple", comb1=0).count() self.assertEqual(num_instances, 0) @disable_cache(memcache=True, context=False) def test_get_by_key_hits_cache(self): entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } original = CachingTestModel.objects.create(**entity_data) with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get: CachingTestModel.objects.get(pk=original.pk) self.assertFalse(datastore_get.called) @disable_cache(memcache=True, context=False) def test_unique_get_hits_cache(self): entity_data = { "field1": "Apple", "comb1": 1, "comb2": "Cherry" } CachingTestModel.objects.create(**entity_data) with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get: CachingTestModel.objects.get(field1="Apple") self.assertFalse(datastore_get.called) @disable_cache(memcache=True, context=False) def test_context_cache_cleared_after_request(self): """ The context cache should be cleared between requests. """ CachingTestModel.objects.create(field1="test") with sleuth.watch("google.appengine.api.datastore.Query.Run") as query: CachingTestModel.objects.get(field1="test") self.assertEqual(query.call_count, 0) # Now start a new request, which should clear the cache request_started.send(HttpRequest(), keep_disabled_flags=True) CachingTestModel.objects.get(field1="test") self.assertEqual(query.call_count, 1) # Now do another call, which should use the cache (because it would have been # populated by the previous call) CachingTestModel.objects.get(field1="test") self.assertEqual(query.call_count, 1) # Now clear the cache again by *finishing* a request request_finished.send(HttpRequest(), keep_disabled_flags=True) CachingTestModel.objects.get(field1="test") self.assertEqual(query.call_count, 2)
bsd-3-clause
2015fallproject/2015fallcase2
static/Brython3.2.0-20150701-214155/Lib/test/unittests/test_nntplib.py
23
56030
import io import socket import datetime import textwrap import unittest import functools import contextlib from test import support from nntplib import NNTP, GroupInfo, _have_ssl import nntplib if _have_ssl: import ssl TIMEOUT = 30 # TODO: # - test the `file` arg to more commands # - test error conditions # - test auth and `usenetrc` class NetworkedNNTPTestsMixin: def test_welcome(self): welcome = self.server.getwelcome() self.assertEqual(str, type(welcome)) def test_help(self): resp, lines = self.server.help() self.assertTrue(resp.startswith("100 "), resp) for line in lines: self.assertEqual(str, type(line)) def test_list(self): resp, groups = self.server.list() if len(groups) > 0: self.assertEqual(GroupInfo, type(groups[0])) self.assertEqual(str, type(groups[0].group)) def test_list_active(self): resp, groups = self.server.list(self.GROUP_PAT) if len(groups) > 0: self.assertEqual(GroupInfo, type(groups[0])) self.assertEqual(str, type(groups[0].group)) def test_unknown_command(self): with self.assertRaises(nntplib.NNTPPermanentError) as cm: self.server._shortcmd("XYZZY") resp = cm.exception.response self.assertTrue(resp.startswith("500 "), resp) def test_newgroups(self): # gmane gets a constant influx of new groups. In order not to stress # the server too much, we choose a recent date in the past. dt = datetime.date.today() - datetime.timedelta(days=7) resp, groups = self.server.newgroups(dt) if len(groups) > 0: self.assertIsInstance(groups[0], GroupInfo) self.assertIsInstance(groups[0].group, str) def test_description(self): def _check_desc(desc): # Sanity checks self.assertIsInstance(desc, str) self.assertNotIn(self.GROUP_NAME, desc) desc = self.server.description(self.GROUP_NAME) _check_desc(desc) # Another sanity check self.assertIn("Python", desc) # With a pattern desc = self.server.description(self.GROUP_PAT) _check_desc(desc) # Shouldn't exist desc = self.server.description("zk.brrtt.baz") self.assertEqual(desc, '') def test_descriptions(self): resp, descs = self.server.descriptions(self.GROUP_PAT) # 215 for LIST NEWSGROUPS, 282 for XGTITLE self.assertTrue( resp.startswith("215 ") or resp.startswith("282 "), resp) self.assertIsInstance(descs, dict) desc = descs[self.GROUP_NAME] self.assertEqual(desc, self.server.description(self.GROUP_NAME)) def test_group(self): result = self.server.group(self.GROUP_NAME) self.assertEqual(5, len(result)) resp, count, first, last, group = result self.assertEqual(group, self.GROUP_NAME) self.assertIsInstance(count, int) self.assertIsInstance(first, int) self.assertIsInstance(last, int) self.assertLessEqual(first, last) self.assertTrue(resp.startswith("211 "), resp) def test_date(self): resp, date = self.server.date() self.assertIsInstance(date, datetime.datetime) # Sanity check self.assertGreaterEqual(date.year, 1995) self.assertLessEqual(date.year, 2030) def _check_art_dict(self, art_dict): # Some sanity checks for a field dictionary returned by OVER / XOVER self.assertIsInstance(art_dict, dict) # NNTP has 7 mandatory fields self.assertGreaterEqual(art_dict.keys(), {"subject", "from", "date", "message-id", "references", ":bytes", ":lines"} ) for v in art_dict.values(): self.assertIsInstance(v, (str, type(None))) def test_xover(self): resp, count, first, last, name = self.server.group(self.GROUP_NAME) resp, lines = self.server.xover(last - 5, last) if len(lines) == 0: self.skipTest("no articles retrieved") # The 'last' article is not necessarily part of the output (cancelled?) art_num, art_dict = lines[0] self.assertGreaterEqual(art_num, last - 5) self.assertLessEqual(art_num, last) self._check_art_dict(art_dict) def test_over(self): resp, count, first, last, name = self.server.group(self.GROUP_NAME) start = last - 10 # The "start-" article range form resp, lines = self.server.over((start, None)) art_num, art_dict = lines[0] self._check_art_dict(art_dict) # The "start-end" article range form resp, lines = self.server.over((start, last)) art_num, art_dict = lines[-1] # The 'last' article is not necessarily part of the output (cancelled?) self.assertGreaterEqual(art_num, start) self.assertLessEqual(art_num, last) self._check_art_dict(art_dict) # XXX The "message_id" form is unsupported by gmane # 503 Overview by message-ID unsupported def test_xhdr(self): resp, count, first, last, name = self.server.group(self.GROUP_NAME) resp, lines = self.server.xhdr('subject', last) for line in lines: self.assertEqual(str, type(line[1])) def check_article_resp(self, resp, article, art_num=None): self.assertIsInstance(article, nntplib.ArticleInfo) if art_num is not None: self.assertEqual(article.number, art_num) for line in article.lines: self.assertIsInstance(line, bytes) # XXX this could exceptionally happen... self.assertNotIn(article.lines[-1], (b".", b".\n", b".\r\n")) def test_article_head_body(self): resp, count, first, last, name = self.server.group(self.GROUP_NAME) # Try to find an available article for art_num in (last, first, last - 1): try: resp, head = self.server.head(art_num) except nntplib.NNTPTemporaryError as e: if not e.response.startswith("423 "): raise # "423 No such article" => choose another one continue break else: self.skipTest("could not find a suitable article number") self.assertTrue(resp.startswith("221 "), resp) self.check_article_resp(resp, head, art_num) resp, body = self.server.body(art_num) self.assertTrue(resp.startswith("222 "), resp) self.check_article_resp(resp, body, art_num) resp, article = self.server.article(art_num) self.assertTrue(resp.startswith("220 "), resp) self.check_article_resp(resp, article, art_num) # Tolerate running the tests from behind a NNTP virus checker blacklist = lambda line: line.startswith(b'X-Antivirus') filtered_head_lines = [line for line in head.lines if not blacklist(line)] filtered_lines = [line for line in article.lines if not blacklist(line)] self.assertEqual(filtered_lines, filtered_head_lines + [b''] + body.lines) def test_capabilities(self): # The server under test implements NNTP version 2 and has a # couple of well-known capabilities. Just sanity check that we # got them. def _check_caps(caps): caps_list = caps['LIST'] self.assertIsInstance(caps_list, (list, tuple)) self.assertIn('OVERVIEW.FMT', caps_list) self.assertGreaterEqual(self.server.nntp_version, 2) _check_caps(self.server.getcapabilities()) # This re-emits the command resp, caps = self.server.capabilities() _check_caps(caps) if _have_ssl: def test_starttls(self): file = self.server.file sock = self.server.sock try: self.server.starttls() except nntplib.NNTPPermanentError: self.skipTest("STARTTLS not supported by server.") else: # Check that the socket and internal pseudo-file really were # changed. self.assertNotEqual(file, self.server.file) self.assertNotEqual(sock, self.server.sock) # Check that the new socket really is an SSL one self.assertIsInstance(self.server.sock, ssl.SSLSocket) # Check that trying starttls when it's already active fails. self.assertRaises(ValueError, self.server.starttls) def test_zlogin(self): # This test must be the penultimate because further commands will be # refused. baduser = "notarealuser" badpw = "notarealpassword" # Check that bogus credentials cause failure self.assertRaises(nntplib.NNTPError, self.server.login, user=baduser, password=badpw, usenetrc=False) # FIXME: We should check that correct credentials succeed, but that # would require valid details for some server somewhere to be in the # test suite, I think. Gmane is anonymous, at least as used for the # other tests. def test_zzquit(self): # This test must be called last, hence the name cls = type(self) try: self.server.quit() finally: cls.server = None @classmethod def wrap_methods(cls): # Wrap all methods in a transient_internet() exception catcher # XXX put a generic version in test.support? def wrap_meth(meth): @functools.wraps(meth) def wrapped(self): with support.transient_internet(self.NNTP_HOST): meth(self) return wrapped for name in dir(cls): if not name.startswith('test_'): continue meth = getattr(cls, name) if not callable(meth): continue # Need to use a closure so that meth remains bound to its current # value setattr(cls, name, wrap_meth(meth)) def test_with_statement(self): def is_connected(): if not hasattr(server, 'file'): return False try: server.help() except (socket.error, EOFError): return False return True with self.NNTP_CLASS(self.NNTP_HOST, timeout=TIMEOUT, usenetrc=False) as server: self.assertTrue(is_connected()) self.assertTrue(server.help()) self.assertFalse(is_connected()) with self.NNTP_CLASS(self.NNTP_HOST, timeout=TIMEOUT, usenetrc=False) as server: server.quit() self.assertFalse(is_connected()) NetworkedNNTPTestsMixin.wrap_methods() class NetworkedNNTPTests(NetworkedNNTPTestsMixin, unittest.TestCase): # This server supports STARTTLS (gmane doesn't) NNTP_HOST = 'news.trigofacile.com' GROUP_NAME = 'fr.comp.lang.python' GROUP_PAT = 'fr.comp.lang.*' NNTP_CLASS = NNTP @classmethod def setUpClass(cls): support.requires("network") with support.transient_internet(cls.NNTP_HOST): cls.server = cls.NNTP_CLASS(cls.NNTP_HOST, timeout=TIMEOUT, usenetrc=False) @classmethod def tearDownClass(cls): if cls.server is not None: cls.server.quit() if _have_ssl: class NetworkedNNTP_SSLTests(NetworkedNNTPTests): # Technical limits for this public NNTP server (see http://www.aioe.org): # "Only two concurrent connections per IP address are allowed and # 400 connections per day are accepted from each IP address." NNTP_HOST = 'nntp.aioe.org' GROUP_NAME = 'comp.lang.python' GROUP_PAT = 'comp.lang.*' NNTP_CLASS = nntplib.NNTP_SSL # Disabled as it produces too much data test_list = None # Disabled as the connection will already be encrypted. test_starttls = None # # Non-networked tests using a local server (or something mocking it). # class _NNTPServerIO(io.RawIOBase): """A raw IO object allowing NNTP commands to be received and processed by a handler. The handler can push responses which can then be read from the IO object.""" def __init__(self, handler): io.RawIOBase.__init__(self) # The channel from the client self.c2s = io.BytesIO() # The channel to the client self.s2c = io.BytesIO() self.handler = handler self.handler.start(self.c2s.readline, self.push_data) def readable(self): return True def writable(self): return True def push_data(self, data): """Push (buffer) some data to send to the client.""" pos = self.s2c.tell() self.s2c.seek(0, 2) self.s2c.write(data) self.s2c.seek(pos) def write(self, b): """The client sends us some data""" pos = self.c2s.tell() self.c2s.write(b) self.c2s.seek(pos) self.handler.process_pending() return len(b) def readinto(self, buf): """The client wants to read a response""" self.handler.process_pending() b = self.s2c.read(len(buf)) n = len(b) buf[:n] = b return n class MockedNNTPTestsMixin: # Override in derived classes handler_class = None def setUp(self): super().setUp() self.make_server() def tearDown(self): super().tearDown() del self.server def make_server(self, *args, **kwargs): self.handler = self.handler_class() self.sio = _NNTPServerIO(self.handler) # Using BufferedRWPair instead of BufferedRandom ensures the file # isn't seekable. file = io.BufferedRWPair(self.sio, self.sio) self.server = nntplib._NNTPBase(file, 'test.server', *args, **kwargs) return self.server class MockedNNTPWithReaderModeMixin(MockedNNTPTestsMixin): def setUp(self): super().setUp() self.make_server(readermode=True) class NNTPv1Handler: """A handler for RFC 977""" welcome = "200 NNTP mock server" def start(self, readline, push_data): self.in_body = False self.allow_posting = True self._readline = readline self._push_data = push_data self._logged_in = False self._user_sent = False # Our welcome self.handle_welcome() def _decode(self, data): return str(data, "utf-8", "surrogateescape") def process_pending(self): if self.in_body: while True: line = self._readline() if not line: return self.body.append(line) if line == b".\r\n": break try: meth, tokens = self.body_callback meth(*tokens, body=self.body) finally: self.body_callback = None self.body = None self.in_body = False while True: line = self._decode(self._readline()) if not line: return if not line.endswith("\r\n"): raise ValueError("line doesn't end with \\r\\n: {!r}".format(line)) line = line[:-2] cmd, *tokens = line.split() #meth = getattr(self.handler, "handle_" + cmd.upper(), None) meth = getattr(self, "handle_" + cmd.upper(), None) if meth is None: self.handle_unknown() else: try: meth(*tokens) except Exception as e: raise ValueError("command failed: {!r}".format(line)) from e else: if self.in_body: self.body_callback = meth, tokens self.body = [] def expect_body(self): """Flag that the client is expected to post a request body""" self.in_body = True def push_data(self, data): """Push some binary data""" self._push_data(data) def push_lit(self, lit): """Push a string literal""" lit = textwrap.dedent(lit) lit = "\r\n".join(lit.splitlines()) + "\r\n" lit = lit.encode('utf-8') self.push_data(lit) def handle_unknown(self): self.push_lit("500 What?") def handle_welcome(self): self.push_lit(self.welcome) def handle_QUIT(self): self.push_lit("205 Bye!") def handle_DATE(self): self.push_lit("111 20100914001155") def handle_GROUP(self, group): if group == "fr.comp.lang.python": self.push_lit("211 486 761 1265 fr.comp.lang.python") else: self.push_lit("411 No such group {}".format(group)) def handle_HELP(self): self.push_lit("""\ 100 Legal commands authinfo user Name|pass Password|generic <prog> <args> date help Report problems to <root@example.org> .""") def handle_STAT(self, message_spec=None): if message_spec is None: self.push_lit("412 No newsgroup selected") elif message_spec == "3000234": self.push_lit("223 3000234 <45223423@example.com>") elif message_spec == "<45223423@example.com>": self.push_lit("223 0 <45223423@example.com>") else: self.push_lit("430 No Such Article Found") def handle_NEXT(self): self.push_lit("223 3000237 <668929@example.org> retrieved") def handle_LAST(self): self.push_lit("223 3000234 <45223423@example.com> retrieved") def handle_LIST(self, action=None, param=None): if action is None: self.push_lit("""\ 215 Newsgroups in form "group high low flags". comp.lang.python 0000052340 0000002828 y comp.lang.python.announce 0000001153 0000000993 m free.it.comp.lang.python 0000000002 0000000002 y fr.comp.lang.python 0000001254 0000000760 y free.it.comp.lang.python.learner 0000000000 0000000001 y tw.bbs.comp.lang.python 0000000304 0000000304 y .""") elif action == "ACTIVE": if param == "*distutils*": self.push_lit("""\ 215 Newsgroups in form "group high low flags" gmane.comp.python.distutils.devel 0000014104 0000000001 m gmane.comp.python.distutils.cvs 0000000000 0000000001 m .""") else: self.push_lit("""\ 215 Newsgroups in form "group high low flags" .""") elif action == "OVERVIEW.FMT": self.push_lit("""\ 215 Order of fields in overview database. Subject: From: Date: Message-ID: References: Bytes: Lines: Xref:full .""") elif action == "NEWSGROUPS": assert param is not None if param == "comp.lang.python": self.push_lit("""\ 215 Descriptions in form "group description". comp.lang.python\tThe Python computer language. .""") elif param == "comp.lang.python*": self.push_lit("""\ 215 Descriptions in form "group description". comp.lang.python.announce\tAnnouncements about the Python language. (Moderated) comp.lang.python\tThe Python computer language. .""") else: self.push_lit("""\ 215 Descriptions in form "group description". .""") else: self.push_lit('501 Unknown LIST keyword') def handle_NEWNEWS(self, group, date_str, time_str): # We hard code different return messages depending on passed # argument and date syntax. if (group == "comp.lang.python" and date_str == "20100913" and time_str == "082004"): # Date was passed in RFC 3977 format (NNTP "v2") self.push_lit("""\ 230 list of newsarticles (NNTP v2) created after Mon Sep 13 08:20:04 2010 follows <a4929a40-6328-491a-aaaf-cb79ed7309a2@q2g2000vbk.googlegroups.com> <f30c0419-f549-4218-848f-d7d0131da931@y3g2000vbm.googlegroups.com> .""") elif (group == "comp.lang.python" and date_str == "100913" and time_str == "082004"): # Date was passed in RFC 977 format (NNTP "v1") self.push_lit("""\ 230 list of newsarticles (NNTP v1) created after Mon Sep 13 08:20:04 2010 follows <a4929a40-6328-491a-aaaf-cb79ed7309a2@q2g2000vbk.googlegroups.com> <f30c0419-f549-4218-848f-d7d0131da931@y3g2000vbm.googlegroups.com> .""") elif (group == 'comp.lang.python' and date_str in ('20100101', '100101') and time_str == '090000'): self.push_lit('too long line' * 3000 + '\n.') else: self.push_lit("""\ 230 An empty list of newsarticles follows .""") # (Note for experiments: many servers disable NEWNEWS. # As of this writing, sicinfo3.epfl.ch doesn't.) def handle_XOVER(self, message_spec): if message_spec == "57-59": self.push_lit( "224 Overview information for 57-58 follows\n" "57\tRe: ANN: New Plone book with strong Python (and Zope) themes throughout" "\tDoug Hellmann <doug.hellmann-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>" "\tSat, 19 Jun 2010 18:04:08 -0400" "\t<4FD05F05-F98B-44DC-8111-C6009C925F0C@gmail.com>" "\t<hvalf7$ort$1@dough.gmane.org>\t7103\t16" "\tXref: news.gmane.org gmane.comp.python.authors:57" "\n" "58\tLooking for a few good bloggers" "\tDoug Hellmann <doug.hellmann-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>" "\tThu, 22 Jul 2010 09:14:14 -0400" "\t<A29863FA-F388-40C3-AA25-0FD06B09B5BF@gmail.com>" "\t\t6683\t16" "\t" "\n" # An UTF-8 overview line from fr.comp.lang.python "59\tRe: Message d'erreur incompréhensible (par moi)" "\tEric Brunel <eric.brunel@pragmadev.nospam.com>" "\tWed, 15 Sep 2010 18:09:15 +0200" "\t<eric.brunel-2B8B56.18091515092010@news.wanadoo.fr>" "\t<4c90ec87$0$32425$ba4acef3@reader.news.orange.fr>\t1641\t27" "\tXref: saria.nerim.net fr.comp.lang.python:1265" "\n" ".\n") else: self.push_lit("""\ 224 No articles .""") def handle_POST(self, *, body=None): if body is None: if self.allow_posting: self.push_lit("340 Input article; end with <CR-LF>.<CR-LF>") self.expect_body() else: self.push_lit("440 Posting not permitted") else: assert self.allow_posting self.push_lit("240 Article received OK") self.posted_body = body def handle_IHAVE(self, message_id, *, body=None): if body is None: if (self.allow_posting and message_id == "<i.am.an.article.you.will.want@example.com>"): self.push_lit("335 Send it; end with <CR-LF>.<CR-LF>") self.expect_body() else: self.push_lit("435 Article not wanted") else: assert self.allow_posting self.push_lit("235 Article transferred OK") self.posted_body = body sample_head = """\ From: "Demo User" <nobody@example.net> Subject: I am just a test article Content-Type: text/plain; charset=UTF-8; format=flowed Message-ID: <i.am.an.article.you.will.want@example.com>""" sample_body = """\ This is just a test article. ..Here is a dot-starting line. -- Signed by Andr\xe9.""" sample_article = sample_head + "\n\n" + sample_body def handle_ARTICLE(self, message_spec=None): if message_spec is None: self.push_lit("220 3000237 <45223423@example.com>") elif message_spec == "<45223423@example.com>": self.push_lit("220 0 <45223423@example.com>") elif message_spec == "3000234": self.push_lit("220 3000234 <45223423@example.com>") else: self.push_lit("430 No Such Article Found") return self.push_lit(self.sample_article) self.push_lit(".") def handle_HEAD(self, message_spec=None): if message_spec is None: self.push_lit("221 3000237 <45223423@example.com>") elif message_spec == "<45223423@example.com>": self.push_lit("221 0 <45223423@example.com>") elif message_spec == "3000234": self.push_lit("221 3000234 <45223423@example.com>") else: self.push_lit("430 No Such Article Found") return self.push_lit(self.sample_head) self.push_lit(".") def handle_BODY(self, message_spec=None): if message_spec is None: self.push_lit("222 3000237 <45223423@example.com>") elif message_spec == "<45223423@example.com>": self.push_lit("222 0 <45223423@example.com>") elif message_spec == "3000234": self.push_lit("222 3000234 <45223423@example.com>") else: self.push_lit("430 No Such Article Found") return self.push_lit(self.sample_body) self.push_lit(".") def handle_AUTHINFO(self, cred_type, data): if self._logged_in: self.push_lit('502 Already Logged In') elif cred_type == 'user': if self._user_sent: self.push_lit('482 User Credential Already Sent') else: self.push_lit('381 Password Required') self._user_sent = True elif cred_type == 'pass': self.push_lit('281 Login Successful') self._logged_in = True else: raise Exception('Unknown cred type {}'.format(cred_type)) class NNTPv2Handler(NNTPv1Handler): """A handler for RFC 3977 (NNTP "v2")""" def handle_CAPABILITIES(self): fmt = """\ 101 Capability list: VERSION 2 3 IMPLEMENTATION INN 2.5.1{} HDR LIST ACTIVE ACTIVE.TIMES DISTRIB.PATS HEADERS NEWSGROUPS OVERVIEW.FMT OVER POST READER .""" if not self._logged_in: self.push_lit(fmt.format('\n AUTHINFO USER')) else: self.push_lit(fmt.format('')) def handle_MODE(self, _): raise Exception('MODE READER sent despite READER has been advertised') def handle_OVER(self, message_spec=None): return self.handle_XOVER(message_spec) class CapsAfterLoginNNTPv2Handler(NNTPv2Handler): """A handler that allows CAPABILITIES only after login""" def handle_CAPABILITIES(self): if not self._logged_in: self.push_lit('480 You must log in.') else: super().handle_CAPABILITIES() class ModeSwitchingNNTPv2Handler(NNTPv2Handler): """A server that starts in transit mode""" def __init__(self): self._switched = False def handle_CAPABILITIES(self): fmt = """\ 101 Capability list: VERSION 2 3 IMPLEMENTATION INN 2.5.1 HDR LIST ACTIVE ACTIVE.TIMES DISTRIB.PATS HEADERS NEWSGROUPS OVERVIEW.FMT OVER POST {}READER .""" if self._switched: self.push_lit(fmt.format('')) else: self.push_lit(fmt.format('MODE-')) def handle_MODE(self, what): assert not self._switched and what == 'reader' self._switched = True self.push_lit('200 Posting allowed') class NNTPv1v2TestsMixin: def setUp(self): super().setUp() def test_welcome(self): self.assertEqual(self.server.welcome, self.handler.welcome) def test_authinfo(self): if self.nntp_version == 2: self.assertIn('AUTHINFO', self.server._caps) self.server.login('testuser', 'testpw') # if AUTHINFO is gone from _caps we also know that getcapabilities() # has been called after login as it should self.assertNotIn('AUTHINFO', self.server._caps) def test_date(self): resp, date = self.server.date() self.assertEqual(resp, "111 20100914001155") self.assertEqual(date, datetime.datetime(2010, 9, 14, 0, 11, 55)) def test_quit(self): self.assertFalse(self.sio.closed) resp = self.server.quit() self.assertEqual(resp, "205 Bye!") self.assertTrue(self.sio.closed) def test_help(self): resp, help = self.server.help() self.assertEqual(resp, "100 Legal commands") self.assertEqual(help, [ ' authinfo user Name|pass Password|generic <prog> <args>', ' date', ' help', 'Report problems to <root@example.org>', ]) def test_list(self): resp, groups = self.server.list() self.assertEqual(len(groups), 6) g = groups[1] self.assertEqual(g, GroupInfo("comp.lang.python.announce", "0000001153", "0000000993", "m")) resp, groups = self.server.list("*distutils*") self.assertEqual(len(groups), 2) g = groups[0] self.assertEqual(g, GroupInfo("gmane.comp.python.distutils.devel", "0000014104", "0000000001", "m")) def test_stat(self): resp, art_num, message_id = self.server.stat(3000234) self.assertEqual(resp, "223 3000234 <45223423@example.com>") self.assertEqual(art_num, 3000234) self.assertEqual(message_id, "<45223423@example.com>") resp, art_num, message_id = self.server.stat("<45223423@example.com>") self.assertEqual(resp, "223 0 <45223423@example.com>") self.assertEqual(art_num, 0) self.assertEqual(message_id, "<45223423@example.com>") with self.assertRaises(nntplib.NNTPTemporaryError) as cm: self.server.stat("<non.existent.id>") self.assertEqual(cm.exception.response, "430 No Such Article Found") with self.assertRaises(nntplib.NNTPTemporaryError) as cm: self.server.stat() self.assertEqual(cm.exception.response, "412 No newsgroup selected") def test_next(self): resp, art_num, message_id = self.server.next() self.assertEqual(resp, "223 3000237 <668929@example.org> retrieved") self.assertEqual(art_num, 3000237) self.assertEqual(message_id, "<668929@example.org>") def test_last(self): resp, art_num, message_id = self.server.last() self.assertEqual(resp, "223 3000234 <45223423@example.com> retrieved") self.assertEqual(art_num, 3000234) self.assertEqual(message_id, "<45223423@example.com>") def test_description(self): desc = self.server.description("comp.lang.python") self.assertEqual(desc, "The Python computer language.") desc = self.server.description("comp.lang.pythonx") self.assertEqual(desc, "") def test_descriptions(self): resp, groups = self.server.descriptions("comp.lang.python") self.assertEqual(resp, '215 Descriptions in form "group description".') self.assertEqual(groups, { "comp.lang.python": "The Python computer language.", }) resp, groups = self.server.descriptions("comp.lang.python*") self.assertEqual(groups, { "comp.lang.python": "The Python computer language.", "comp.lang.python.announce": "Announcements about the Python language. (Moderated)", }) resp, groups = self.server.descriptions("comp.lang.pythonx") self.assertEqual(groups, {}) def test_group(self): resp, count, first, last, group = self.server.group("fr.comp.lang.python") self.assertTrue(resp.startswith("211 "), resp) self.assertEqual(first, 761) self.assertEqual(last, 1265) self.assertEqual(count, 486) self.assertEqual(group, "fr.comp.lang.python") with self.assertRaises(nntplib.NNTPTemporaryError) as cm: self.server.group("comp.lang.python.devel") exc = cm.exception self.assertTrue(exc.response.startswith("411 No such group"), exc.response) def test_newnews(self): # NEWNEWS comp.lang.python [20]100913 082004 dt = datetime.datetime(2010, 9, 13, 8, 20, 4) resp, ids = self.server.newnews("comp.lang.python", dt) expected = ( "230 list of newsarticles (NNTP v{0}) " "created after Mon Sep 13 08:20:04 2010 follows" ).format(self.nntp_version) self.assertEqual(resp, expected) self.assertEqual(ids, [ "<a4929a40-6328-491a-aaaf-cb79ed7309a2@q2g2000vbk.googlegroups.com>", "<f30c0419-f549-4218-848f-d7d0131da931@y3g2000vbm.googlegroups.com>", ]) # NEWNEWS fr.comp.lang.python [20]100913 082004 dt = datetime.datetime(2010, 9, 13, 8, 20, 4) resp, ids = self.server.newnews("fr.comp.lang.python", dt) self.assertEqual(resp, "230 An empty list of newsarticles follows") self.assertEqual(ids, []) def _check_article_body(self, lines): self.assertEqual(len(lines), 4) self.assertEqual(lines[-1].decode('utf-8'), "-- Signed by André.") self.assertEqual(lines[-2], b"") self.assertEqual(lines[-3], b".Here is a dot-starting line.") self.assertEqual(lines[-4], b"This is just a test article.") def _check_article_head(self, lines): self.assertEqual(len(lines), 4) self.assertEqual(lines[0], b'From: "Demo User" <nobody@example.net>') self.assertEqual(lines[3], b"Message-ID: <i.am.an.article.you.will.want@example.com>") def _check_article_data(self, lines): self.assertEqual(len(lines), 9) self._check_article_head(lines[:4]) self._check_article_body(lines[-4:]) self.assertEqual(lines[4], b"") def test_article(self): # ARTICLE resp, info = self.server.article() self.assertEqual(resp, "220 3000237 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 3000237) self.assertEqual(message_id, "<45223423@example.com>") self._check_article_data(lines) # ARTICLE num resp, info = self.server.article(3000234) self.assertEqual(resp, "220 3000234 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 3000234) self.assertEqual(message_id, "<45223423@example.com>") self._check_article_data(lines) # ARTICLE id resp, info = self.server.article("<45223423@example.com>") self.assertEqual(resp, "220 0 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 0) self.assertEqual(message_id, "<45223423@example.com>") self._check_article_data(lines) # Non-existent id with self.assertRaises(nntplib.NNTPTemporaryError) as cm: self.server.article("<non-existent@example.com>") self.assertEqual(cm.exception.response, "430 No Such Article Found") def test_article_file(self): # With a "file" argument f = io.BytesIO() resp, info = self.server.article(file=f) self.assertEqual(resp, "220 3000237 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 3000237) self.assertEqual(message_id, "<45223423@example.com>") self.assertEqual(lines, []) data = f.getvalue() self.assertTrue(data.startswith( b'From: "Demo User" <nobody@example.net>\r\n' b'Subject: I am just a test article\r\n' ), ascii(data)) self.assertTrue(data.endswith( b'This is just a test article.\r\n' b'.Here is a dot-starting line.\r\n' b'\r\n' b'-- Signed by Andr\xc3\xa9.\r\n' ), ascii(data)) def test_head(self): # HEAD resp, info = self.server.head() self.assertEqual(resp, "221 3000237 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 3000237) self.assertEqual(message_id, "<45223423@example.com>") self._check_article_head(lines) # HEAD num resp, info = self.server.head(3000234) self.assertEqual(resp, "221 3000234 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 3000234) self.assertEqual(message_id, "<45223423@example.com>") self._check_article_head(lines) # HEAD id resp, info = self.server.head("<45223423@example.com>") self.assertEqual(resp, "221 0 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 0) self.assertEqual(message_id, "<45223423@example.com>") self._check_article_head(lines) # Non-existent id with self.assertRaises(nntplib.NNTPTemporaryError) as cm: self.server.head("<non-existent@example.com>") self.assertEqual(cm.exception.response, "430 No Such Article Found") def test_head_file(self): f = io.BytesIO() resp, info = self.server.head(file=f) self.assertEqual(resp, "221 3000237 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 3000237) self.assertEqual(message_id, "<45223423@example.com>") self.assertEqual(lines, []) data = f.getvalue() self.assertTrue(data.startswith( b'From: "Demo User" <nobody@example.net>\r\n' b'Subject: I am just a test article\r\n' ), ascii(data)) self.assertFalse(data.endswith( b'This is just a test article.\r\n' b'.Here is a dot-starting line.\r\n' b'\r\n' b'-- Signed by Andr\xc3\xa9.\r\n' ), ascii(data)) def test_body(self): # BODY resp, info = self.server.body() self.assertEqual(resp, "222 3000237 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 3000237) self.assertEqual(message_id, "<45223423@example.com>") self._check_article_body(lines) # BODY num resp, info = self.server.body(3000234) self.assertEqual(resp, "222 3000234 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 3000234) self.assertEqual(message_id, "<45223423@example.com>") self._check_article_body(lines) # BODY id resp, info = self.server.body("<45223423@example.com>") self.assertEqual(resp, "222 0 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 0) self.assertEqual(message_id, "<45223423@example.com>") self._check_article_body(lines) # Non-existent id with self.assertRaises(nntplib.NNTPTemporaryError) as cm: self.server.body("<non-existent@example.com>") self.assertEqual(cm.exception.response, "430 No Such Article Found") def test_body_file(self): f = io.BytesIO() resp, info = self.server.body(file=f) self.assertEqual(resp, "222 3000237 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 3000237) self.assertEqual(message_id, "<45223423@example.com>") self.assertEqual(lines, []) data = f.getvalue() self.assertFalse(data.startswith( b'From: "Demo User" <nobody@example.net>\r\n' b'Subject: I am just a test article\r\n' ), ascii(data)) self.assertTrue(data.endswith( b'This is just a test article.\r\n' b'.Here is a dot-starting line.\r\n' b'\r\n' b'-- Signed by Andr\xc3\xa9.\r\n' ), ascii(data)) def check_over_xover_resp(self, resp, overviews): self.assertTrue(resp.startswith("224 "), resp) self.assertEqual(len(overviews), 3) art_num, over = overviews[0] self.assertEqual(art_num, 57) self.assertEqual(over, { "from": "Doug Hellmann <doug.hellmann-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>", "subject": "Re: ANN: New Plone book with strong Python (and Zope) themes throughout", "date": "Sat, 19 Jun 2010 18:04:08 -0400", "message-id": "<4FD05F05-F98B-44DC-8111-C6009C925F0C@gmail.com>", "references": "<hvalf7$ort$1@dough.gmane.org>", ":bytes": "7103", ":lines": "16", "xref": "news.gmane.org gmane.comp.python.authors:57" }) art_num, over = overviews[1] self.assertEqual(over["xref"], None) art_num, over = overviews[2] self.assertEqual(over["subject"], "Re: Message d'erreur incompréhensible (par moi)") def test_xover(self): resp, overviews = self.server.xover(57, 59) self.check_over_xover_resp(resp, overviews) def test_over(self): # In NNTP "v1", this will fallback on XOVER resp, overviews = self.server.over((57, 59)) self.check_over_xover_resp(resp, overviews) sample_post = ( b'From: "Demo User" <nobody@example.net>\r\n' b'Subject: I am just a test article\r\n' b'Content-Type: text/plain; charset=UTF-8; format=flowed\r\n' b'Message-ID: <i.am.an.article.you.will.want@example.com>\r\n' b'\r\n' b'This is just a test article.\r\n' b'.Here is a dot-starting line.\r\n' b'\r\n' b'-- Signed by Andr\xc3\xa9.\r\n' ) def _check_posted_body(self): # Check the raw body as received by the server lines = self.handler.posted_body # One additional line for the "." terminator self.assertEqual(len(lines), 10) self.assertEqual(lines[-1], b'.\r\n') self.assertEqual(lines[-2], b'-- Signed by Andr\xc3\xa9.\r\n') self.assertEqual(lines[-3], b'\r\n') self.assertEqual(lines[-4], b'..Here is a dot-starting line.\r\n') self.assertEqual(lines[0], b'From: "Demo User" <nobody@example.net>\r\n') def _check_post_ihave_sub(self, func, *args, file_factory): # First the prepared post with CRLF endings post = self.sample_post func_args = args + (file_factory(post),) self.handler.posted_body = None resp = func(*func_args) self._check_posted_body() # Then the same post with "normal" line endings - they should be # converted by NNTP.post and NNTP.ihave. post = self.sample_post.replace(b"\r\n", b"\n") func_args = args + (file_factory(post),) self.handler.posted_body = None resp = func(*func_args) self._check_posted_body() return resp def check_post_ihave(self, func, success_resp, *args): # With a bytes object resp = self._check_post_ihave_sub(func, *args, file_factory=bytes) self.assertEqual(resp, success_resp) # With a bytearray object resp = self._check_post_ihave_sub(func, *args, file_factory=bytearray) self.assertEqual(resp, success_resp) # With a file object resp = self._check_post_ihave_sub(func, *args, file_factory=io.BytesIO) self.assertEqual(resp, success_resp) # With an iterable of terminated lines def iterlines(b): return iter(b.splitlines(keepends=True)) resp = self._check_post_ihave_sub(func, *args, file_factory=iterlines) self.assertEqual(resp, success_resp) # With an iterable of non-terminated lines def iterlines(b): return iter(b.splitlines(keepends=False)) resp = self._check_post_ihave_sub(func, *args, file_factory=iterlines) self.assertEqual(resp, success_resp) def test_post(self): self.check_post_ihave(self.server.post, "240 Article received OK") self.handler.allow_posting = False with self.assertRaises(nntplib.NNTPTemporaryError) as cm: self.server.post(self.sample_post) self.assertEqual(cm.exception.response, "440 Posting not permitted") def test_ihave(self): self.check_post_ihave(self.server.ihave, "235 Article transferred OK", "<i.am.an.article.you.will.want@example.com>") with self.assertRaises(nntplib.NNTPTemporaryError) as cm: self.server.ihave("<another.message.id>", self.sample_post) self.assertEqual(cm.exception.response, "435 Article not wanted") def test_too_long_lines(self): dt = datetime.datetime(2010, 1, 1, 9, 0, 0) self.assertRaises(nntplib.NNTPDataError, self.server.newnews, "comp.lang.python", dt) class NNTPv1Tests(NNTPv1v2TestsMixin, MockedNNTPTestsMixin, unittest.TestCase): """Tests an NNTP v1 server (no capabilities).""" nntp_version = 1 handler_class = NNTPv1Handler def test_caps(self): caps = self.server.getcapabilities() self.assertEqual(caps, {}) self.assertEqual(self.server.nntp_version, 1) self.assertEqual(self.server.nntp_implementation, None) class NNTPv2Tests(NNTPv1v2TestsMixin, MockedNNTPTestsMixin, unittest.TestCase): """Tests an NNTP v2 server (with capabilities).""" nntp_version = 2 handler_class = NNTPv2Handler def test_caps(self): caps = self.server.getcapabilities() self.assertEqual(caps, { 'VERSION': ['2', '3'], 'IMPLEMENTATION': ['INN', '2.5.1'], 'AUTHINFO': ['USER'], 'HDR': [], 'LIST': ['ACTIVE', 'ACTIVE.TIMES', 'DISTRIB.PATS', 'HEADERS', 'NEWSGROUPS', 'OVERVIEW.FMT'], 'OVER': [], 'POST': [], 'READER': [], }) self.assertEqual(self.server.nntp_version, 3) self.assertEqual(self.server.nntp_implementation, 'INN 2.5.1') class CapsAfterLoginNNTPv2Tests(MockedNNTPTestsMixin, unittest.TestCase): """Tests a probably NNTP v2 server with capabilities only after login.""" nntp_version = 2 handler_class = CapsAfterLoginNNTPv2Handler def test_caps_only_after_login(self): self.assertEqual(self.server._caps, {}) self.server.login('testuser', 'testpw') self.assertIn('VERSION', self.server._caps) class SendReaderNNTPv2Tests(MockedNNTPWithReaderModeMixin, unittest.TestCase): """Same tests as for v2 but we tell NTTP to send MODE READER to a server that isn't in READER mode by default.""" nntp_version = 2 handler_class = ModeSwitchingNNTPv2Handler def test_we_are_in_reader_mode_after_connect(self): self.assertIn('READER', self.server._caps) class MiscTests(unittest.TestCase): def test_decode_header(self): def gives(a, b): self.assertEqual(nntplib.decode_header(a), b) gives("" , "") gives("a plain header", "a plain header") gives(" with extra spaces ", " with extra spaces ") gives("=?ISO-8859-15?Q?D=E9buter_en_Python?=", "Débuter en Python") gives("=?utf-8?q?Re=3A_=5Bsqlite=5D_probl=C3=A8me_avec_ORDER_BY_sur_des_cha?=" " =?utf-8?q?=C3=AEnes_de_caract=C3=A8res_accentu=C3=A9es?=", "Re: [sqlite] problème avec ORDER BY sur des chaînes de caractères accentuées") gives("Re: =?UTF-8?B?cHJvYmzDqG1lIGRlIG1hdHJpY2U=?=", "Re: problème de matrice") # A natively utf-8 header (found in the real world!) gives("Re: Message d'erreur incompréhensible (par moi)", "Re: Message d'erreur incompréhensible (par moi)") def test_parse_overview_fmt(self): # The minimal (default) response lines = ["Subject:", "From:", "Date:", "Message-ID:", "References:", ":bytes", ":lines"] self.assertEqual(nntplib._parse_overview_fmt(lines), ["subject", "from", "date", "message-id", "references", ":bytes", ":lines"]) # The minimal response using alternative names lines = ["Subject:", "From:", "Date:", "Message-ID:", "References:", "Bytes:", "Lines:"] self.assertEqual(nntplib._parse_overview_fmt(lines), ["subject", "from", "date", "message-id", "references", ":bytes", ":lines"]) # Variations in casing lines = ["subject:", "FROM:", "DaTe:", "message-ID:", "References:", "BYTES:", "Lines:"] self.assertEqual(nntplib._parse_overview_fmt(lines), ["subject", "from", "date", "message-id", "references", ":bytes", ":lines"]) # First example from RFC 3977 lines = ["Subject:", "From:", "Date:", "Message-ID:", "References:", ":bytes", ":lines", "Xref:full", "Distribution:full"] self.assertEqual(nntplib._parse_overview_fmt(lines), ["subject", "from", "date", "message-id", "references", ":bytes", ":lines", "xref", "distribution"]) # Second example from RFC 3977 lines = ["Subject:", "From:", "Date:", "Message-ID:", "References:", "Bytes:", "Lines:", "Xref:FULL", "Distribution:FULL"] self.assertEqual(nntplib._parse_overview_fmt(lines), ["subject", "from", "date", "message-id", "references", ":bytes", ":lines", "xref", "distribution"]) # A classic response from INN lines = ["Subject:", "From:", "Date:", "Message-ID:", "References:", "Bytes:", "Lines:", "Xref:full"] self.assertEqual(nntplib._parse_overview_fmt(lines), ["subject", "from", "date", "message-id", "references", ":bytes", ":lines", "xref"]) def test_parse_overview(self): fmt = nntplib._DEFAULT_OVERVIEW_FMT + ["xref"] # First example from RFC 3977 lines = [ '3000234\tI am just a test article\t"Demo User" ' '<nobody@example.com>\t6 Oct 1998 04:38:40 -0500\t' '<45223423@example.com>\t<45454@example.net>\t1234\t' '17\tXref: news.example.com misc.test:3000363', ] overview = nntplib._parse_overview(lines, fmt) (art_num, fields), = overview self.assertEqual(art_num, 3000234) self.assertEqual(fields, { 'subject': 'I am just a test article', 'from': '"Demo User" <nobody@example.com>', 'date': '6 Oct 1998 04:38:40 -0500', 'message-id': '<45223423@example.com>', 'references': '<45454@example.net>', ':bytes': '1234', ':lines': '17', 'xref': 'news.example.com misc.test:3000363', }) # Second example; here the "Xref" field is totally absent (including # the header name) and comes out as None lines = [ '3000234\tI am just a test article\t"Demo User" ' '<nobody@example.com>\t6 Oct 1998 04:38:40 -0500\t' '<45223423@example.com>\t<45454@example.net>\t1234\t' '17\t\t', ] overview = nntplib._parse_overview(lines, fmt) (art_num, fields), = overview self.assertEqual(fields['xref'], None) # Third example; the "Xref" is an empty string, while "references" # is a single space. lines = [ '3000234\tI am just a test article\t"Demo User" ' '<nobody@example.com>\t6 Oct 1998 04:38:40 -0500\t' '<45223423@example.com>\t \t1234\t' '17\tXref: \t', ] overview = nntplib._parse_overview(lines, fmt) (art_num, fields), = overview self.assertEqual(fields['references'], ' ') self.assertEqual(fields['xref'], '') def test_parse_datetime(self): def gives(a, b, *c): self.assertEqual(nntplib._parse_datetime(a, b), datetime.datetime(*c)) # Output of DATE command gives("19990623135624", None, 1999, 6, 23, 13, 56, 24) # Variations gives("19990623", "135624", 1999, 6, 23, 13, 56, 24) gives("990623", "135624", 1999, 6, 23, 13, 56, 24) gives("090623", "135624", 2009, 6, 23, 13, 56, 24) def test_unparse_datetime(self): # Test non-legacy mode # 1) with a datetime def gives(y, M, d, h, m, s, date_str, time_str): dt = datetime.datetime(y, M, d, h, m, s) self.assertEqual(nntplib._unparse_datetime(dt), (date_str, time_str)) self.assertEqual(nntplib._unparse_datetime(dt, False), (date_str, time_str)) gives(1999, 6, 23, 13, 56, 24, "19990623", "135624") gives(2000, 6, 23, 13, 56, 24, "20000623", "135624") gives(2010, 6, 5, 1, 2, 3, "20100605", "010203") # 2) with a date def gives(y, M, d, date_str, time_str): dt = datetime.date(y, M, d) self.assertEqual(nntplib._unparse_datetime(dt), (date_str, time_str)) self.assertEqual(nntplib._unparse_datetime(dt, False), (date_str, time_str)) gives(1999, 6, 23, "19990623", "000000") gives(2000, 6, 23, "20000623", "000000") gives(2010, 6, 5, "20100605", "000000") def test_unparse_datetime_legacy(self): # Test legacy mode (RFC 977) # 1) with a datetime def gives(y, M, d, h, m, s, date_str, time_str): dt = datetime.datetime(y, M, d, h, m, s) self.assertEqual(nntplib._unparse_datetime(dt, True), (date_str, time_str)) gives(1999, 6, 23, 13, 56, 24, "990623", "135624") gives(2000, 6, 23, 13, 56, 24, "000623", "135624") gives(2010, 6, 5, 1, 2, 3, "100605", "010203") # 2) with a date def gives(y, M, d, date_str, time_str): dt = datetime.date(y, M, d) self.assertEqual(nntplib._unparse_datetime(dt, True), (date_str, time_str)) gives(1999, 6, 23, "990623", "000000") gives(2000, 6, 23, "000623", "000000") gives(2010, 6, 5, "100605", "000000") def test_main(): tests = [MiscTests, NNTPv1Tests, NNTPv2Tests, CapsAfterLoginNNTPv2Tests, SendReaderNNTPv2Tests, NetworkedNNTPTests] if _have_ssl: tests.append(NetworkedNNTP_SSLTests) support.run_unittest(*tests) if __name__ == "__main__": test_main()
agpl-3.0
sunzhxjs/JobGIS
lib/python2.7/site-packages/pip/req/req_install.py
21
46670
from __future__ import absolute_import import logging import os import re import shutil import sys import tempfile import traceback import warnings import zipfile from distutils import sysconfig from distutils.util import change_root from email.parser import FeedParser from pip._vendor import pkg_resources, six from pip._vendor.distlib.markers import interpret as markers_interpret from pip._vendor.packaging import specifiers from pip._vendor.six.moves import configparser import pip.wheel from pip.compat import native_str, get_stdlib, WINDOWS from pip.download import is_url, url_to_path, path_to_url, is_archive_file from pip.exceptions import ( InstallationError, UninstallationError, UnsupportedWheel, ) from pip.locations import ( bin_py, running_under_virtualenv, PIP_DELETE_MARKER_FILENAME, bin_user, ) from pip.utils import ( display_path, rmtree, ask_path_exists, backup_dir, is_installable_dir, dist_in_usersite, dist_in_site_packages, egg_link_path, call_subprocess, read_text_file, FakeFile, _make_build_dir, ensure_dir, get_installed_version, canonicalize_name, normalize_path, dist_is_local, ) from pip.utils.hashes import Hashes from pip.utils.deprecation import RemovedInPip10Warning from pip.utils.logging import indent_log from pip.utils.setuptools_build import SETUPTOOLS_SHIM from pip.utils.ui import open_spinner from pip.req.req_uninstall import UninstallPathSet from pip.vcs import vcs from pip.wheel import move_wheel_files, Wheel from pip._vendor.packaging.version import Version logger = logging.getLogger(__name__) operators = specifiers.Specifier._operators.keys() def _strip_extras(path): m = re.match(r'^(.+)(\[[^\]]+\])$', path) extras = None if m: path_no_extras = m.group(1) extras = m.group(2) else: path_no_extras = path return path_no_extras, extras class InstallRequirement(object): def __init__(self, req, comes_from, source_dir=None, editable=False, link=None, as_egg=False, update=True, editable_options=None, pycompile=True, markers=None, isolated=False, options=None, wheel_cache=None, constraint=False): self.extras = () if isinstance(req, six.string_types): try: req = pkg_resources.Requirement.parse(req) except pkg_resources.RequirementParseError: if os.path.sep in req: add_msg = "It looks like a path. Does it exist ?" elif '=' in req and not any(op in req for op in operators): add_msg = "= is not a valid operator. Did you mean == ?" else: add_msg = traceback.format_exc() raise InstallationError( "Invalid requirement: '%s'\n%s" % (req, add_msg)) self.extras = req.extras self.req = req self.comes_from = comes_from self.constraint = constraint self.source_dir = source_dir self.editable = editable if editable_options is None: editable_options = {} self.editable_options = editable_options self._wheel_cache = wheel_cache self.link = self.original_link = link self.as_egg = as_egg self.markers = markers self._egg_info_path = None # This holds the pkg_resources.Distribution object if this requirement # is already available: self.satisfied_by = None # This hold the pkg_resources.Distribution object if this requirement # conflicts with another installed distribution: self.conflicts_with = None # Temporary build location self._temp_build_dir = None # Used to store the global directory where the _temp_build_dir should # have been created. Cf _correct_build_location method. self._ideal_build_dir = None # True if the editable should be updated: self.update = update # Set to True after successful installation self.install_succeeded = None # UninstallPathSet of uninstalled distribution (for possible rollback) self.uninstalled = None # Set True if a legitimate do-nothing-on-uninstall has happened - e.g. # system site packages, stdlib packages. self.nothing_to_uninstall = False self.use_user_site = False self.target_dir = None self.options = options if options else {} self.pycompile = pycompile # Set to True after successful preparation of this requirement self.prepared = False self.isolated = isolated @classmethod def from_editable(cls, editable_req, comes_from=None, default_vcs=None, isolated=False, options=None, wheel_cache=None, constraint=False): from pip.index import Link name, url, extras_override, editable_options = parse_editable( editable_req, default_vcs) if url.startswith('file:'): source_dir = url_to_path(url) else: source_dir = None res = cls(name, comes_from, source_dir=source_dir, editable=True, link=Link(url), constraint=constraint, editable_options=editable_options, isolated=isolated, options=options if options else {}, wheel_cache=wheel_cache) if extras_override is not None: res.extras = extras_override return res @classmethod def from_line( cls, name, comes_from=None, isolated=False, options=None, wheel_cache=None, constraint=False): """Creates an InstallRequirement from a name, which might be a requirement, directory containing 'setup.py', filename, or URL. """ from pip.index import Link if is_url(name): marker_sep = '; ' else: marker_sep = ';' if marker_sep in name: name, markers = name.split(marker_sep, 1) markers = markers.strip() if not markers: markers = None else: markers = None name = name.strip() req = None path = os.path.normpath(os.path.abspath(name)) link = None extras = None if is_url(name): link = Link(name) else: p, extras = _strip_extras(path) if (os.path.isdir(p) and (os.path.sep in name or name.startswith('.'))): if not is_installable_dir(p): raise InstallationError( "Directory %r is not installable. File 'setup.py' " "not found." % name ) link = Link(path_to_url(p)) elif is_archive_file(p): if not os.path.isfile(p): logger.warning( 'Requirement %r looks like a filename, but the ' 'file does not exist', name ) link = Link(path_to_url(p)) # it's a local file, dir, or url if link: # Handle relative file URLs if link.scheme == 'file' and re.search(r'\.\./', link.url): link = Link( path_to_url(os.path.normpath(os.path.abspath(link.path)))) # wheel file if link.is_wheel: wheel = Wheel(link.filename) # can raise InvalidWheelFilename if not wheel.supported(): raise UnsupportedWheel( "%s is not a supported wheel on this platform." % wheel.filename ) req = "%s==%s" % (wheel.name, wheel.version) else: # set the req to the egg fragment. when it's not there, this # will become an 'unnamed' requirement req = link.egg_fragment # a requirement specifier else: req = name options = options if options else {} res = cls(req, comes_from, link=link, markers=markers, isolated=isolated, options=options, wheel_cache=wheel_cache, constraint=constraint) if extras: res.extras = pkg_resources.Requirement.parse('__placeholder__' + extras).extras return res def __str__(self): if self.req: s = str(self.req) if self.link: s += ' from %s' % self.link.url else: s = self.link.url if self.link else None if self.satisfied_by is not None: s += ' in %s' % display_path(self.satisfied_by.location) if self.comes_from: if isinstance(self.comes_from, six.string_types): comes_from = self.comes_from else: comes_from = self.comes_from.from_path() if comes_from: s += ' (from %s)' % comes_from return s def __repr__(self): return '<%s object: %s editable=%r>' % ( self.__class__.__name__, str(self), self.editable) def populate_link(self, finder, upgrade, require_hashes): """Ensure that if a link can be found for this, that it is found. Note that self.link may still be None - if Upgrade is False and the requirement is already installed. If require_hashes is True, don't use the wheel cache, because cached wheels, always built locally, have different hashes than the files downloaded from the index server and thus throw false hash mismatches. Furthermore, cached wheels at present have undeterministic contents due to file modification times. """ if self.link is None: self.link = finder.find_requirement(self, upgrade) if self._wheel_cache is not None and not require_hashes: old_link = self.link self.link = self._wheel_cache.cached_wheel(self.link, self.name) if old_link != self.link: logger.debug('Using cached wheel link: %s', self.link) @property def specifier(self): return self.req.specifier @property def is_pinned(self): """Return whether I am pinned to an exact version. For example, some-package==1.2 is pinned; some-package>1.2 is not. """ specifiers = self.specifier return (len(specifiers) == 1 and next(iter(specifiers)).operator in ('==', '===')) def from_path(self): if self.req is None: return None s = str(self.req) if self.comes_from: if isinstance(self.comes_from, six.string_types): comes_from = self.comes_from else: comes_from = self.comes_from.from_path() if comes_from: s += '->' + comes_from return s def build_location(self, build_dir): if self._temp_build_dir is not None: return self._temp_build_dir if self.req is None: # for requirement via a path to a directory: the name of the # package is not available yet so we create a temp directory # Once run_egg_info will have run, we'll be able # to fix it via _correct_build_location self._temp_build_dir = tempfile.mkdtemp('-build', 'pip-') self._ideal_build_dir = build_dir return self._temp_build_dir if self.editable: name = self.name.lower() else: name = self.name # FIXME: Is there a better place to create the build_dir? (hg and bzr # need this) if not os.path.exists(build_dir): logger.debug('Creating directory %s', build_dir) _make_build_dir(build_dir) return os.path.join(build_dir, name) def _correct_build_location(self): """Move self._temp_build_dir to self._ideal_build_dir/self.req.name For some requirements (e.g. a path to a directory), the name of the package is not available until we run egg_info, so the build_location will return a temporary directory and store the _ideal_build_dir. This is only called by self.egg_info_path to fix the temporary build directory. """ if self.source_dir is not None: return assert self.req is not None assert self._temp_build_dir assert self._ideal_build_dir old_location = self._temp_build_dir self._temp_build_dir = None new_location = self.build_location(self._ideal_build_dir) if os.path.exists(new_location): raise InstallationError( 'A package already exists in %s; please remove it to continue' % display_path(new_location)) logger.debug( 'Moving package %s from %s to new location %s', self, display_path(old_location), display_path(new_location), ) shutil.move(old_location, new_location) self._temp_build_dir = new_location self._ideal_build_dir = None self.source_dir = new_location self._egg_info_path = None @property def name(self): if self.req is None: return None return native_str(self.req.project_name) @property def setup_py(self): assert self.source_dir, "No source dir for %s" % self try: import setuptools # noqa except ImportError: if get_installed_version('setuptools') is None: add_msg = "Please install setuptools." else: add_msg = traceback.format_exc() # Setuptools is not available raise InstallationError( "Could not import setuptools which is required to " "install from a source distribution.\n%s" % add_msg ) setup_file = 'setup.py' if self.editable_options and 'subdirectory' in self.editable_options: setup_py = os.path.join(self.source_dir, self.editable_options['subdirectory'], setup_file) else: setup_py = os.path.join(self.source_dir, setup_file) # Python2 __file__ should not be unicode if six.PY2 and isinstance(setup_py, six.text_type): setup_py = setup_py.encode(sys.getfilesystemencoding()) return setup_py def run_egg_info(self): assert self.source_dir if self.name: logger.debug( 'Running setup.py (path:%s) egg_info for package %s', self.setup_py, self.name, ) else: logger.debug( 'Running setup.py (path:%s) egg_info for package from %s', self.setup_py, self.link, ) with indent_log(): script = SETUPTOOLS_SHIM % self.setup_py base_cmd = [sys.executable, '-c', script] if self.isolated: base_cmd += ["--no-user-cfg"] egg_info_cmd = base_cmd + ['egg_info'] # We can't put the .egg-info files at the root, because then the # source code will be mistaken for an installed egg, causing # problems if self.editable: egg_base_option = [] else: egg_info_dir = os.path.join(self.source_dir, 'pip-egg-info') ensure_dir(egg_info_dir) egg_base_option = ['--egg-base', 'pip-egg-info'] cwd = self.source_dir if self.editable_options and \ 'subdirectory' in self.editable_options: cwd = os.path.join(cwd, self.editable_options['subdirectory']) call_subprocess( egg_info_cmd + egg_base_option, cwd=cwd, show_stdout=False, command_level=logging.DEBUG, command_desc='python setup.py egg_info') if not self.req: if isinstance( pkg_resources.parse_version(self.pkg_info()["Version"]), Version): op = "==" else: op = "===" self.req = pkg_resources.Requirement.parse( "".join([ self.pkg_info()["Name"], op, self.pkg_info()["Version"], ])) self._correct_build_location() else: metadata_name = canonicalize_name(self.pkg_info()["Name"]) if canonicalize_name(self.req.project_name) != metadata_name: logger.warning( 'Running setup.py (path:%s) egg_info for package %s ' 'produced metadata for project name %s. Fix your ' '#egg=%s fragments.', self.setup_py, self.name, metadata_name, self.name ) self.req = pkg_resources.Requirement.parse(metadata_name) def egg_info_data(self, filename): if self.satisfied_by is not None: if not self.satisfied_by.has_metadata(filename): return None return self.satisfied_by.get_metadata(filename) assert self.source_dir filename = self.egg_info_path(filename) if not os.path.exists(filename): return None data = read_text_file(filename) return data def egg_info_path(self, filename): if self._egg_info_path is None: if self.editable: base = self.source_dir else: base = os.path.join(self.source_dir, 'pip-egg-info') filenames = os.listdir(base) if self.editable: filenames = [] for root, dirs, files in os.walk(base): for dir in vcs.dirnames: if dir in dirs: dirs.remove(dir) # Iterate over a copy of ``dirs``, since mutating # a list while iterating over it can cause trouble. # (See https://github.com/pypa/pip/pull/462.) for dir in list(dirs): # Don't search in anything that looks like a virtualenv # environment if ( os.path.exists( os.path.join(root, dir, 'bin', 'python') ) or os.path.exists( os.path.join( root, dir, 'Scripts', 'Python.exe' ) )): dirs.remove(dir) # Also don't search through tests elif dir == 'test' or dir == 'tests': dirs.remove(dir) filenames.extend([os.path.join(root, dir) for dir in dirs]) filenames = [f for f in filenames if f.endswith('.egg-info')] if not filenames: raise InstallationError( 'No files/directories in %s (from %s)' % (base, filename) ) assert filenames, \ "No files/directories in %s (from %s)" % (base, filename) # if we have more than one match, we pick the toplevel one. This # can easily be the case if there is a dist folder which contains # an extracted tarball for testing purposes. if len(filenames) > 1: filenames.sort( key=lambda x: x.count(os.path.sep) + (os.path.altsep and x.count(os.path.altsep) or 0) ) self._egg_info_path = os.path.join(base, filenames[0]) return os.path.join(self._egg_info_path, filename) def pkg_info(self): p = FeedParser() data = self.egg_info_data('PKG-INFO') if not data: logger.warning( 'No PKG-INFO file found in %s', display_path(self.egg_info_path('PKG-INFO')), ) p.feed(data or '') return p.close() _requirements_section_re = re.compile(r'\[(.*?)\]') @property def installed_version(self): return get_installed_version(self.name) def assert_source_matches_version(self): assert self.source_dir version = self.pkg_info()['version'] if version not in self.req: logger.warning( 'Requested %s, but installing version %s', self, self.installed_version, ) else: logger.debug( 'Source in %s has version %s, which satisfies requirement %s', display_path(self.source_dir), version, self, ) def update_editable(self, obtain=True): if not self.link: logger.debug( "Cannot update repository at %s; repository location is " "unknown", self.source_dir, ) return assert self.editable assert self.source_dir if self.link.scheme == 'file': # Static paths don't get updated return assert '+' in self.link.url, "bad url: %r" % self.link.url if not self.update: return vc_type, url = self.link.url.split('+', 1) backend = vcs.get_backend(vc_type) if backend: vcs_backend = backend(self.link.url) if obtain: vcs_backend.obtain(self.source_dir) else: vcs_backend.export(self.source_dir) else: assert 0, ( 'Unexpected version control type (in %s): %s' % (self.link, vc_type)) def uninstall(self, auto_confirm=False): """ Uninstall the distribution currently satisfying this requirement. Prompts before removing or modifying files unless ``auto_confirm`` is True. Refuses to delete or modify files outside of ``sys.prefix`` - thus uninstallation within a virtual environment can only modify that virtual environment, even if the virtualenv is linked to global site-packages. """ if not self.check_if_exists(): raise UninstallationError( "Cannot uninstall requirement %s, not installed" % (self.name,) ) dist = self.satisfied_by or self.conflicts_with dist_path = normalize_path(dist.location) if not dist_is_local(dist): logger.info( "Not uninstalling %s at %s, outside environment %s", dist.key, dist_path, sys.prefix, ) self.nothing_to_uninstall = True return if dist_path in get_stdlib(): logger.info( "Not uninstalling %s at %s, as it is in the standard library.", dist.key, dist_path, ) self.nothing_to_uninstall = True return paths_to_remove = UninstallPathSet(dist) develop_egg_link = egg_link_path(dist) develop_egg_link_egg_info = '{0}.egg-info'.format( pkg_resources.to_filename(dist.project_name)) egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info) # Special case for distutils installed package distutils_egg_info = getattr(dist._provider, 'path', None) # Uninstall cases order do matter as in the case of 2 installs of the # same package, pip needs to uninstall the currently detected version if (egg_info_exists and dist.egg_info.endswith('.egg-info') and not dist.egg_info.endswith(develop_egg_link_egg_info)): # if dist.egg_info.endswith(develop_egg_link_egg_info), we # are in fact in the develop_egg_link case paths_to_remove.add(dist.egg_info) if dist.has_metadata('installed-files.txt'): for installed_file in dist.get_metadata( 'installed-files.txt').splitlines(): path = os.path.normpath( os.path.join(dist.egg_info, installed_file) ) paths_to_remove.add(path) # FIXME: need a test for this elif block # occurs with --single-version-externally-managed/--record outside # of pip elif dist.has_metadata('top_level.txt'): if dist.has_metadata('namespace_packages.txt'): namespaces = dist.get_metadata('namespace_packages.txt') else: namespaces = [] for top_level_pkg in [ p for p in dist.get_metadata('top_level.txt').splitlines() if p and p not in namespaces]: path = os.path.join(dist.location, top_level_pkg) paths_to_remove.add(path) paths_to_remove.add(path + '.py') paths_to_remove.add(path + '.pyc') paths_to_remove.add(path + '.pyo') elif distutils_egg_info: warnings.warn( "Uninstalling a distutils installed project ({0}) has been " "deprecated and will be removed in a future version. This is " "due to the fact that uninstalling a distutils project will " "only partially uninstall the project.".format(self.name), RemovedInPip10Warning, ) paths_to_remove.add(distutils_egg_info) elif dist.location.endswith('.egg'): # package installed by easy_install # We cannot match on dist.egg_name because it can slightly vary # i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg paths_to_remove.add(dist.location) easy_install_egg = os.path.split(dist.location)[1] easy_install_pth = os.path.join(os.path.dirname(dist.location), 'easy-install.pth') paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg) elif develop_egg_link: # develop egg with open(develop_egg_link, 'r') as fh: link_pointer = os.path.normcase(fh.readline().strip()) assert (link_pointer == dist.location), ( 'Egg-link %s does not match installed location of %s ' '(at %s)' % (link_pointer, self.name, dist.location) ) paths_to_remove.add(develop_egg_link) easy_install_pth = os.path.join(os.path.dirname(develop_egg_link), 'easy-install.pth') paths_to_remove.add_pth(easy_install_pth, dist.location) elif egg_info_exists and dist.egg_info.endswith('.dist-info'): for path in pip.wheel.uninstallation_paths(dist): paths_to_remove.add(path) else: logger.debug( 'Not sure how to uninstall: %s - Check: %s', dist, dist.location) # find distutils scripts= scripts if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'): for script in dist.metadata_listdir('scripts'): if dist_in_usersite(dist): bin_dir = bin_user else: bin_dir = bin_py paths_to_remove.add(os.path.join(bin_dir, script)) if WINDOWS: paths_to_remove.add(os.path.join(bin_dir, script) + '.bat') # find console_scripts if dist.has_metadata('entry_points.txt'): config = configparser.SafeConfigParser() config.readfp( FakeFile(dist.get_metadata_lines('entry_points.txt')) ) if config.has_section('console_scripts'): for name, value in config.items('console_scripts'): if dist_in_usersite(dist): bin_dir = bin_user else: bin_dir = bin_py paths_to_remove.add(os.path.join(bin_dir, name)) if WINDOWS: paths_to_remove.add( os.path.join(bin_dir, name) + '.exe' ) paths_to_remove.add( os.path.join(bin_dir, name) + '.exe.manifest' ) paths_to_remove.add( os.path.join(bin_dir, name) + '-script.py' ) paths_to_remove.remove(auto_confirm) self.uninstalled = paths_to_remove def rollback_uninstall(self): if self.uninstalled: self.uninstalled.rollback() else: logger.error( "Can't rollback %s, nothing uninstalled.", self.name, ) def commit_uninstall(self): if self.uninstalled: self.uninstalled.commit() elif not self.nothing_to_uninstall: logger.error( "Can't commit %s, nothing uninstalled.", self.name, ) def archive(self, build_dir): assert self.source_dir create_archive = True archive_name = '%s-%s.zip' % (self.name, self.pkg_info()["version"]) archive_path = os.path.join(build_dir, archive_name) if os.path.exists(archive_path): response = ask_path_exists( 'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' % display_path(archive_path), ('i', 'w', 'b')) if response == 'i': create_archive = False elif response == 'w': logger.warning('Deleting %s', display_path(archive_path)) os.remove(archive_path) elif response == 'b': dest_file = backup_dir(archive_path) logger.warning( 'Backing up %s to %s', display_path(archive_path), display_path(dest_file), ) shutil.move(archive_path, dest_file) if create_archive: zip = zipfile.ZipFile( archive_path, 'w', zipfile.ZIP_DEFLATED, allowZip64=True ) dir = os.path.normcase(os.path.abspath(self.source_dir)) for dirpath, dirnames, filenames in os.walk(dir): if 'pip-egg-info' in dirnames: dirnames.remove('pip-egg-info') for dirname in dirnames: dirname = os.path.join(dirpath, dirname) name = self._clean_zip_name(dirname, dir) zipdir = zipfile.ZipInfo(self.name + '/' + name + '/') zipdir.external_attr = 0x1ED << 16 # 0o755 zip.writestr(zipdir, '') for filename in filenames: if filename == PIP_DELETE_MARKER_FILENAME: continue filename = os.path.join(dirpath, filename) name = self._clean_zip_name(filename, dir) zip.write(filename, self.name + '/' + name) zip.close() logger.info('Saved %s', display_path(archive_path)) def _clean_zip_name(self, name, prefix): assert name.startswith(prefix + os.path.sep), ( "name %r doesn't start with prefix %r" % (name, prefix) ) name = name[len(prefix) + 1:] name = name.replace(os.path.sep, '/') return name def match_markers(self): if self.markers is not None: return markers_interpret(self.markers) else: return True def install(self, install_options, global_options=[], root=None, prefix=None): if self.editable: self.install_editable( install_options, global_options, prefix=prefix) return if self.is_wheel: version = pip.wheel.wheel_version(self.source_dir) pip.wheel.check_compatibility(version, self.name) self.move_wheel_files(self.source_dir, root=root, prefix=prefix) self.install_succeeded = True return # Extend the list of global and install options passed on to # the setup.py call with the ones from the requirements file. # Options specified in requirements file override those # specified on the command line, since the last option given # to setup.py is the one that is used. global_options += self.options.get('global_options', []) install_options += self.options.get('install_options', []) if self.isolated: global_options = list(global_options) + ["--no-user-cfg"] temp_location = tempfile.mkdtemp('-record', 'pip-') record_filename = os.path.join(temp_location, 'install-record.txt') try: install_args = [sys.executable, "-u"] install_args.append('-c') install_args.append(SETUPTOOLS_SHIM % self.setup_py) install_args += list(global_options) + \ ['install', '--record', record_filename] if not self.as_egg: install_args += ['--single-version-externally-managed'] if root is not None: install_args += ['--root', root] if prefix is not None: install_args += ['--prefix', prefix] if self.pycompile: install_args += ["--compile"] else: install_args += ["--no-compile"] if running_under_virtualenv(): py_ver_str = 'python' + sysconfig.get_python_version() install_args += ['--install-headers', os.path.join(sys.prefix, 'include', 'site', py_ver_str, self.name)] msg = 'Running setup.py install for %s' % (self.name,) with open_spinner(msg) as spinner: with indent_log(): call_subprocess( install_args + install_options, cwd=self.source_dir, show_stdout=False, spinner=spinner, ) if not os.path.exists(record_filename): logger.debug('Record file %s not found', record_filename) return self.install_succeeded = True if self.as_egg: # there's no --always-unzip option we can pass to install # command so we unable to save the installed-files.txt return def prepend_root(path): if root is None or not os.path.isabs(path): return path else: return change_root(root, path) with open(record_filename) as f: for line in f: directory = os.path.dirname(line) if directory.endswith('.egg-info'): egg_info_dir = prepend_root(directory) break else: logger.warning( 'Could not find .egg-info directory in install record' ' for %s', self, ) # FIXME: put the record somewhere # FIXME: should this be an error? return new_lines = [] with open(record_filename) as f: for line in f: filename = line.strip() if os.path.isdir(filename): filename += os.path.sep new_lines.append( os.path.relpath( prepend_root(filename), egg_info_dir) ) inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt') with open(inst_files_path, 'w') as f: f.write('\n'.join(new_lines) + '\n') finally: if os.path.exists(record_filename): os.remove(record_filename) rmtree(temp_location) def ensure_has_source_dir(self, parent_dir): """Ensure that a source_dir is set. This will create a temporary build dir if the name of the requirement isn't known yet. :param parent_dir: The ideal pip parent_dir for the source_dir. Generally src_dir for editables and build_dir for sdists. :return: self.source_dir """ if self.source_dir is None: self.source_dir = self.build_location(parent_dir) return self.source_dir def remove_temporary_source(self): """Remove the source files from this requirement, if they are marked for deletion""" if self.source_dir and os.path.exists( os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)): logger.debug('Removing source in %s', self.source_dir) rmtree(self.source_dir) self.source_dir = None if self._temp_build_dir and os.path.exists(self._temp_build_dir): rmtree(self._temp_build_dir) self._temp_build_dir = None def install_editable(self, install_options, global_options=(), prefix=None): logger.info('Running setup.py develop for %s', self.name) if self.isolated: global_options = list(global_options) + ["--no-user-cfg"] if prefix: prefix_param = ['--prefix={0}'.format(prefix)] install_options = list(install_options) + prefix_param with indent_log(): # FIXME: should we do --install-headers here too? cwd = self.source_dir if self.editable_options and \ 'subdirectory' in self.editable_options: cwd = os.path.join(cwd, self.editable_options['subdirectory']) call_subprocess( [ sys.executable, '-c', SETUPTOOLS_SHIM % self.setup_py ] + list(global_options) + ['develop', '--no-deps'] + list(install_options), cwd=cwd, show_stdout=False) self.install_succeeded = True def check_if_exists(self): """Find an installed distribution that satisfies or conflicts with this requirement, and set self.satisfied_by or self.conflicts_with appropriately. """ if self.req is None: return False try: self.satisfied_by = pkg_resources.get_distribution(self.req) except pkg_resources.DistributionNotFound: return False except pkg_resources.VersionConflict: existing_dist = pkg_resources.get_distribution( self.req.project_name ) if self.use_user_site: if dist_in_usersite(existing_dist): self.conflicts_with = existing_dist elif (running_under_virtualenv() and dist_in_site_packages(existing_dist)): raise InstallationError( "Will not install to the user site because it will " "lack sys.path precedence to %s in %s" % (existing_dist.project_name, existing_dist.location) ) else: self.conflicts_with = existing_dist return True @property def is_wheel(self): return self.link and self.link.is_wheel def move_wheel_files(self, wheeldir, root=None, prefix=None): move_wheel_files( self.name, self.req, wheeldir, user=self.use_user_site, home=self.target_dir, root=root, prefix=prefix, pycompile=self.pycompile, isolated=self.isolated, ) def get_dist(self): """Return a pkg_resources.Distribution built from self.egg_info_path""" egg_info = self.egg_info_path('').rstrip('/') base_dir = os.path.dirname(egg_info) metadata = pkg_resources.PathMetadata(base_dir, egg_info) dist_name = os.path.splitext(os.path.basename(egg_info))[0] return pkg_resources.Distribution( os.path.dirname(egg_info), project_name=dist_name, metadata=metadata) @property def has_hash_options(self): """Return whether any known-good hashes are specified as options. These activate --require-hashes mode; hashes specified as part of a URL do not. """ return bool(self.options.get('hashes', {})) def hashes(self, trust_internet=True): """Return a hash-comparer that considers my option- and URL-based hashes to be known-good. Hashes in URLs--ones embedded in the requirements file, not ones downloaded from an index server--are almost peers with ones from flags. They satisfy --require-hashes (whether it was implicitly or explicitly activated) but do not activate it. md5 and sha224 are not allowed in flags, which should nudge people toward good algos. We always OR all hashes together, even ones from URLs. :param trust_internet: Whether to trust URL-based (#md5=...) hashes downloaded from the internet, as by populate_link() """ good_hashes = self.options.get('hashes', {}).copy() link = self.link if trust_internet else self.original_link if link and link.hash: good_hashes.setdefault(link.hash_name, []).append(link.hash) return Hashes(good_hashes) def _strip_postfix(req): """ Strip req postfix ( -dev, 0.2, etc ) """ # FIXME: use package_to_requirement? match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req) if match: # Strip off -dev, -0.2, etc. req = match.group(1) return req def _build_req_from_url(url): parts = [p for p in url.split('#', 1)[0].split('/') if p] req = None if parts[-2] in ('tags', 'branches', 'tag', 'branch'): req = parts[-3] elif parts[-1] == 'trunk': req = parts[-2] return req def _build_editable_options(req): """ This method generates a dictionary of the query string parameters contained in a given editable URL. """ regexp = re.compile(r"[\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)") matched = regexp.findall(req) if matched: ret = dict() for option in matched: (name, value) = option if name in ret: raise Exception("%s option already defined" % name) ret[name] = value return ret return None def parse_editable(editable_req, default_vcs=None): """Parses an editable requirement into: - a requirement name - an URL - extras - editable options Accepted requirements: svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir .[some_extra] """ from pip.index import Link url = editable_req extras = None # If a file path is specified with extras, strip off the extras. m = re.match(r'^(.+)(\[[^\]]+\])$', url) if m: url_no_extras = m.group(1) extras = m.group(2) else: url_no_extras = url if os.path.isdir(url_no_extras): if not os.path.exists(os.path.join(url_no_extras, 'setup.py')): raise InstallationError( "Directory %r is not installable. File 'setup.py' not found." % url_no_extras ) # Treating it as code that has already been checked out url_no_extras = path_to_url(url_no_extras) if url_no_extras.lower().startswith('file:'): package_name = Link(url_no_extras).egg_fragment if extras: return ( package_name, url_no_extras, pkg_resources.Requirement.parse( '__placeholder__' + extras ).extras, {}, ) else: return package_name, url_no_extras, None, {} for version_control in vcs: if url.lower().startswith('%s:' % version_control): url = '%s+%s' % (version_control, url) break if '+' not in url: if default_vcs: url = default_vcs + '+' + url else: raise InstallationError( '%s should either be a path to a local project or a VCS url ' 'beginning with svn+, git+, hg+, or bzr+' % editable_req ) vc_type = url.split('+', 1)[0].lower() if not vcs.get_backend(vc_type): error_message = 'For --editable=%s only ' % editable_req + \ ', '.join([backend.name + '+URL' for backend in vcs.backends]) + \ ' is currently supported' raise InstallationError(error_message) try: options = _build_editable_options(editable_req) except Exception as exc: raise InstallationError( '--editable=%s error in editable options:%s' % (editable_req, exc) ) if not options or 'egg' not in options: req = _build_req_from_url(editable_req) if not req: raise InstallationError( '--editable=%s is not the right format; it must have ' '#egg=Package' % editable_req ) else: req = options['egg'] package = _strip_postfix(req) return package, url, None, options
mit
wevote/WebAppPublic
polling_location/urls.py
1
1083
# polling_location/urls.py # Brought to you by We Vote. Be good. # -*- coding: UTF-8 -*- from django.conf.urls import url from . import views_admin urlpatterns = [ url(r'^$', views_admin.polling_location_list_view, name='polling_location_list',), url(r'^import/$', views_admin.polling_locations_import_from_master_server_view, name='polling_locations_import_from_master_server'), # Processing incoming file with polling locations url(r'^import_polling_locations_process/$', views_admin.import_polling_locations_process_view, name='import_polling_locations_process'), url(r'^(?P<polling_location_local_id>[0-9]+)/edit/$', views_admin.polling_location_edit_view, name='polling_location_edit'), url(r'^(?P<polling_location_local_id>[0-9]+)/summary/$', views_admin.polling_location_summary_view, name='polling_location_summary'), url(r'^(?P<polling_location_we_vote_id>wv[\w]{2}ploc[\w]+)/summary/$', views_admin.polling_location_summary_by_we_vote_id_view, name='polling_location_summary_by_we_vote_id'), ]
bsd-3-clause
aosagie/spark
python/pyspark/shell.py
37
2333
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ An interactive shell. This file is designed to be launched as a PYTHONSTARTUP script. """ import atexit import os import platform import warnings import py4j from pyspark import SparkConf from pyspark.context import SparkContext from pyspark.sql import SparkSession, SQLContext if os.environ.get("SPARK_EXECUTOR_URI"): SparkContext.setSystemProperty("spark.executor.uri", os.environ["SPARK_EXECUTOR_URI"]) SparkContext._ensure_initialized() try: spark = SparkSession._create_shell_session() except Exception: import sys import traceback warnings.warn("Failed to initialize Spark session.") traceback.print_exc(file=sys.stderr) sys.exit(1) sc = spark.sparkContext sql = spark.sql atexit.register(lambda: sc.stop()) # for compatibility sqlContext = spark._wrapped sqlCtx = sqlContext print(r"""Welcome to ____ __ / __/__ ___ _____/ /__ _\ \/ _ \/ _ `/ __/ '_/ /__ / .__/\_,_/_/ /_/\_\ version %s /_/ """ % sc.version) print("Using Python version %s (%s, %s)" % ( platform.python_version(), platform.python_build()[0], platform.python_build()[1])) print("SparkSession available as 'spark'.") # The ./bin/pyspark script stores the old PYTHONSTARTUP value in OLD_PYTHONSTARTUP, # which allows us to execute the user's PYTHONSTARTUP file: _pythonstartup = os.environ.get('OLD_PYTHONSTARTUP') if _pythonstartup and os.path.isfile(_pythonstartup): with open(_pythonstartup) as f: code = compile(f.read(), _pythonstartup, 'exec') exec(code)
apache-2.0
garbled1/ansible
lib/ansible/modules/web_infrastructure/ansible_tower/tower_group.py
34
6284
#!/usr/bin/python # coding: utf-8 -*- # (c) 2017, Wayne Witzel III <wayne@riotousliving.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: tower_group author: "Wayne Witzel III (@wwitzel3)" version_added: "2.3" short_description: create, update, or destroy Ansible Tower group. description: - Create, update, or destroy Ansible Tower groups. See U(https://www.ansible.com/tower) for an overview. options: name: description: - The name to use for the group. required: True description: description: - The description to use for the group. required: False default: null inventory: description: - Inventory the group should be made a member of. required: True variables: description: - Variables to use for the group, use '@' for a file. required: False default: null credential: description: - Credential to use for the group. required: False default: null source: description: - The source to use for this group. required: False default: null, choices: ["manual", "file", "ec2", "rax", "vmware", "gce", "azure", "azure_rm", "openstack", "satellite6" , "cloudforms", "custom"] source_regions: description: - Regions for cloud provider. required: False default: null source_vars: description: - Override variables from source with variables from this field. required: False default: null instance_filters: description: - Comma-separated list of filter expressions for matching hosts. required: False default: null group_by: description: - Limit groups automatically created from inventory source. required: False default: null source_script: description: - Inventory script to be used when group type is "custom". required: False default: null overwrite: description: - Delete child roups and hosts not found in source. required: False default: False overwrite_vars: description: - Override vars in child groups and hosts with those from external source. required: False default: null update_on_launch: description: - Refresh inventory data from its source each time a job is run. required: False default: False state: description: - Desired state of the resource. required: False default: "present" choices: ["present", "absent"] extends_documentation_fragment: tower ''' EXAMPLES = ''' - name: Add tower group tower_group: name: localhost description: "Local Host Group" inventory: "Local Inventory" state: present tower_config_file: "~/tower_cli.cfg" ''' import os from ansible.module_utils.ansible_tower import tower_argument_spec, tower_auth_config, tower_check_mode, HAS_TOWER_CLI try: import tower_cli import tower_cli.utils.exceptions as exc from tower_cli.conf import settings except ImportError: pass def main(): argument_spec = tower_argument_spec() argument_spec.update(dict( name=dict(required=True), description=dict(), inventory=dict(required=True), variables=dict(), credential=dict(), source=dict(choices=["manual", "file", "ec2", "rax", "vmware", "gce", "azure", "azure_rm", "openstack", "satellite6", "cloudforms", "custom"], default="manual"), source_regions=dict(), source_vars=dict(), instance_filters=dict(), group_by=dict(), source_script=dict(), overwrite=dict(type='bool', default=False), overwrite_vars=dict(), update_on_launch=dict(type='bool', default=False), state=dict(choices=['present', 'absent'], default='present'), )) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if not HAS_TOWER_CLI: module.fail_json(msg='ansible-tower-cli required for this module') name = module.params.get('name') inventory = module.params.get('inventory') credential = module.params.get('credential') state = module.params.get('state') variables = module.params.get('variables') if variables: if variables.startswith('@'): filename = os.path.expanduser(variables[1:]) variables = module.contents_from_file(filename) json_output = {'group': name, 'state': state} tower_auth = tower_auth_config(module) with settings.runtime_values(**tower_auth): tower_check_mode(module) group = tower_cli.get_resource('group') try: params = module.params.copy() params['create_on_missing'] = True params['variables'] = variables inv_res = tower_cli.get_resource('inventory') inv = inv_res.get(name=inventory) params['inventory'] = inv['id'] if credential: cred_res = tower_cli.get_resource('credential') cred = cred_res.get(name=credential) params['credential'] = cred['id'] if state == 'present': result = group.modify(**params) json_output['id'] = result['id'] elif state == 'absent': result = group.delete(**params) except (exc.NotFound) as excinfo: module.fail_json(msg='Failed to update the group, inventory not found: {0}'.format(excinfo), changed=False) except (exc.ConnectionError, exc.BadRequest, exc.NotFound) as excinfo: module.fail_json(msg='Failed to update the group: {0}'.format(excinfo), changed=False) json_output['changed'] = result['changed'] module.exit_json(**json_output) from ansible.module_utils.basic import AnsibleModule if __name__ == '__main__': main()
gpl-3.0
leighpauls/k2cro4
third_party/webpagereplay/third_party/dns/entropy.py
250
3872
# Copyright (C) 2009 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import os import time try: import threading as _threading except ImportError: import dummy_threading as _threading class EntropyPool(object): def __init__(self, seed=None): self.pool_index = 0 self.digest = None self.next_byte = 0 self.lock = _threading.Lock() try: import hashlib self.hash = hashlib.sha1() self.hash_len = 20 except: try: import sha self.hash = sha.new() self.hash_len = 20 except: import md5 self.hash = md5.new() self.hash_len = 16 self.pool = '\0' * self.hash_len if not seed is None: self.stir(seed) self.seeded = True else: self.seeded = False def stir(self, entropy, already_locked=False): if not already_locked: self.lock.acquire() try: bytes = [ord(c) for c in self.pool] for c in entropy: if self.pool_index == self.hash_len: self.pool_index = 0 b = ord(c) & 0xff bytes[self.pool_index] ^= b self.pool_index += 1 self.pool = ''.join([chr(c) for c in bytes]) finally: if not already_locked: self.lock.release() def _maybe_seed(self): if not self.seeded: try: seed = os.urandom(16) except: try: r = file('/dev/urandom', 'r', 0) try: seed = r.read(16) finally: r.close() except: seed = str(time.time()) self.seeded = True self.stir(seed, True) def random_8(self): self.lock.acquire() self._maybe_seed() try: if self.digest is None or self.next_byte == self.hash_len: self.hash.update(self.pool) self.digest = self.hash.digest() self.stir(self.digest, True) self.next_byte = 0 value = ord(self.digest[self.next_byte]) self.next_byte += 1 finally: self.lock.release() return value def random_16(self): return self.random_8() * 256 + self.random_8() def random_32(self): return self.random_16() * 65536 + self.random_16() def random_between(self, first, last): size = last - first + 1 if size > 4294967296L: raise ValueError('too big') if size > 65536: rand = self.random_32 max = 4294967295L elif size > 256: rand = self.random_16 max = 65535 else: rand = self.random_8 max = 255 return (first + size * rand() // (max + 1)) pool = EntropyPool() def random_16(): return pool.random_16() def between(first, last): return pool.random_between(first, last)
bsd-3-clause
ned14/BEurtle
Installer/test/ZSI-2.1-a1/ZSI/resolvers.py
2
4755
#! /usr/bin/env python # $Header$ '''SOAP messaging parsing. ''' from ZSI import _copyright, _child_elements, EvaluateException, TC import multifile, mimetools, urllib from base64 import decodestring as b64decode import cStringIO as StringIO def Opaque(uri, tc, ps, **keywords): '''Resolve a URI and return its content as a string. ''' source = urllib.urlopen(uri, **keywords) enc = source.info().getencoding() if enc in ['7bit', '8bit', 'binary']: return source.read() data = StringIO.StringIO() mimetools.decode(source, data, enc) return data.getvalue() def XML(uri, tc, ps, **keywords): '''Resolve a URI and return its content as an XML DOM. ''' source = urllib.urlopen(uri, **keywords) enc = source.info().getencoding() if enc in ['7bit', '8bit', 'binary']: data = source else: data = StringIO.StringIO() mimetools.decode(source, data, enc) data.seek(0) dom = ps.readerclass().fromStream(data) return _child_elements(dom)[0] class NetworkResolver: '''A resolver that support string and XML. ''' def __init__(self, prefix=None): self.allowed = prefix or [] def _check_allowed(self, uri): for a in self.allowed: if uri.startswith(a): return raise EvaluateException("Disallowed URI prefix") def Opaque(self, uri, tc, ps, **keywords): self._check_allowed(uri) return Opaque(uri, tc, ps, **keywords) def XML(self, uri, tc, ps, **keywords): self._check_allowed(uri) return XML(uri, tc, ps, **keywords) def Resolve(self, uri, tc, ps, **keywords): if isinstance(tc, TC.XML): return XML(uri, tc, ps, **keywords) return Opaque(uri, tc, ps, **keywords) class MIMEResolver: '''Multi-part MIME resolver -- SOAP With Attachments, mostly. ''' def __init__(self, ct, f, next=None, uribase='thismessage:/', seekable=0, **kw): # Get the boundary. It's too bad I have to write this myself, # but no way am I going to import cgi for 10 lines of code! for param in ct.split(';'): a = param.strip() if a.startswith('boundary='): if a[9] in [ '"', "'" ]: boundary = a[10:-1] else: boundary = a[9:] break else: raise ValueError('boundary parameter not found') self.id_dict, self.loc_dict, self.parts = {}, {}, [] self.next = next self.base = uribase mf = multifile.MultiFile(f, seekable) mf.push(boundary) while mf.next(): head = mimetools.Message(mf) body = StringIO.StringIO() mimetools.decode(mf, body, head.getencoding()) body.seek(0) part = (head, body) self.parts.append(part) key = head.get('content-id') if key: if key[0] == '<' and key[-1] == '>': key = key[1:-1] self.id_dict[key] = part key = head.get('content-location') if key: self.loc_dict[key] = part mf.pop() def GetSOAPPart(self): '''Get the SOAP body part. ''' head, part = self.parts[0] return StringIO.StringIO(part.getvalue()) def get(self, uri): '''Get the content for the bodypart identified by the uri. ''' if uri.startswith('cid:'): # Content-ID, so raise exception if not found. head, part = self.id_dict[uri[4:]] return StringIO.StringIO(part.getvalue()) if self.loc_dict.has_key(uri): head, part = self.loc_dict[uri] return StringIO.StringIO(part.getvalue()) return None def Opaque(self, uri, tc, ps, **keywords): content = self.get(uri) if content: return content.getvalue() if not self.next: raise EvaluateException("Unresolvable URI " + uri) return self.next.Opaque(uri, tc, ps, **keywords) def XML(self, uri, tc, ps, **keywords): content = self.get(uri) if content: dom = ps.readerclass().fromStream(content) return _child_elements(dom)[0] if not self.next: raise EvaluateException("Unresolvable URI " + uri) return self.next.XML(uri, tc, ps, **keywords) def Resolve(self, uri, tc, ps, **keywords): if isinstance(tc, TC.XML): return self.XML(uri, tc, ps, **keywords) return self.Opaque(uri, tc, ps, **keywords) def __getitem__(self, cid): head, body = self.id_dict[cid] newio = StringIO.StringIO(body.getvalue()) return newio if __name__ == '__main__': print _copyright
lgpl-2.1
qjcina/GomokuBot
GomokuBot/GomokuBot/Input/MouseClicker.py
1
1119
from win32 import win32api, win32gui import win32con import time from Input.CommandListener import * class MouseClicker(object): lastMousePosition = None def __init__(self): self.oCommandListener = getCommandListener() self.lastPos = (0,0) def click(self, iX, iY): iX = int(iX) iY = int(iY) if(self.oCommandListener.Flag is False): flags, hcursor, (x, y) = win32gui.GetCursorInfo() if(self.lastPos != (x, y)): print("If you want to block mouse moving press SPACE!") self.lastPos = (iX, iY) win32api.SetCursorPos((iX,iY)) win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,iX,iY,0,0) win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,iX,iY,0,0) win32api.SetCursorPos((x,y)) print("Mouse click at",iX,iY) return True else: print("Tried to click at",iX,iX) return False def tryClick(self, iX, iY, iTimeToSleep=1): while(not self.click(iX, iY)): time.sleep(iTimeToSleep) pass
gpl-2.0
unixnut/cpylmnl
examples/netfilter/nf-log-graphite.py
2
9342
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function, absolute_import import sys, logging, errno import socket, time, struct, multiprocessing import signal import cPickle as pickle import dpkt import cpylmnl.linux.netlinkh as netlink import cpylmnl.linux.netfilter.nfnetlinkh as nfnl import cpylmnl.linux.netfilter.nfnetlink_logh as nfulnl import cpylmnl as mnl """quote from ulogd2 document Just add rules using the NFLOG target to your firewalling chain. A very basic example: iptables -A FORWARD -j NFLOG --nflog-group 32 --nflog-prefix foo To increase logging performance, try to use the --nflog-qthreshold N option (where 1 < N <= 50). The number you specify is the amount of packets batched together in one multipart netlink message. If you set this to 20, the kernel schedules ulogd only once every 20 packets. All 20 packets are then processed by ulogd. This reduces the number of context switches between kernel and userspace. Of course you can combine the NFLOG target with the different netfilter match modules. For a more detailed description, have a look at the netfilter HOWTO's, available on the netfilter homepage. ... --nflog-range N Copyrange. This works like the 'snaplen' parameter of tcpdump. You can specify a number of bytes up to which the packet is copied. If you say '40', you will receive the first fourty bytes of every packet. Leave it to 0 to dump the whole packet. my adversaria: iptables -t raw -A PREROUTING -j NFLOG --nflog-group 1 --nflog-prefix myrouter \ --nflog-qthreshold 16 --nflog-range 64 --- carbon path structure is: <src addr>.<dst addr>.<protocol> IPv4 addresses are not dotted decimal, divide decimal by ``:'' because of my lack of knowledge. Value is (epoch from time.time(), <IP datagram length>) Talking about iptables options above, --nflog-range would be enough 64 for those addresses, l4 proto. Sending to carbon every sigalarm raised by seeing ``sendable'' global variable. """ log = logging.getLogger(__name__) CARBON_SERVER = '127.0.0.1' CARBON_PORT = 2004 @mnl.attr_cb def parse_attr_cb(attr, tb): """only interested in length from NFULA_PACKET_HDR payload from NFULA_PAYLOAD """ attr_type = attr.get_type() if attr_type == nfulnl.NFULA_PACKET_HDR: try: attr.validate2(mnl.MNL_TYPE_UNSPEC, nfulnl.NfulnlMsgPacketHdr.csize()) except OSError as e: log.warn("invalid NFULA_PACKET_HDR: %s" % e) else: tb[attr_type] = attr elif attr_type == nfulnl.NFULA_PAYLOAD: tb[attr_type] = attr return mnl.MNL_CB_OK def make_tuple(ethtype, pktbuf): """make 3 elements list. src and dst address, l4 protocol """ if ethtype == 0x0800: # ETH_P_IP: dg = dpkt.ip.IP(pktbuf) elif ethtype == 0x86DD: # ETH_P_IPV6: dg = dpkt.ip6.IP6(pktbuf) elif ethtype == 0x0806: # ETH_P_ARP # dg = dpkt.arp.ARP(pktbuf) log.info("ignore ARP") return None else: log.info("ignore unknown ether type (not in ETH_P_IP, ETH_P_IPV6, ETH_P_ARP)") return None return (dg.src, dg.dst, dg.p) @mnl.nlmsg_cb def log_cb(nlh, data): tb = dict() nlh.parse(nfnl.Nfgenmsg.csize(), parse_attr_cb, tb) if not nfulnl.NFULA_PACKET_HDR in tb: log.warn("no NFULA_PACKET_HDR") return mnl.MNL_CB_OK if not nfulnl.NFULA_PAYLOAD in tb: log.warn("no NFULA_PAYLOAD") return mnl.MNL_CB_OK ph = tb[nfulnl.NFULA_PACKET_HDR].get_payload_as(nfulnl.NfulnlMsgPacketHdr) # copying - dpkt require bytes, it uses struct.unpack pkt_buffer = bytes(bytearray(tb[nfulnl.NFULA_PAYLOAD].get_payload_v())) k = make_tuple(socket.ntohs(ph.hw_protocol), pkt_buffer) if k is not None: data[k] = data.get(k, 0) + len(pkt_buffer) return mnl.MNL_CB_OK def nflog_build_cfg_pf_request(buf, command): nlh = mnl.Nlmsg(buf) nlh.put_header() nlh.nlmsg_type = (nfnl.NFNL_SUBSYS_ULOG << 8) | nfulnl.NFULNL_MSG_CONFIG nlh.nlmsg_flags = netlink.NLM_F_REQUEST nfg = nlh.put_extra_header_as(nfnl.Nfgenmsg) nfg.nfgen_family = socket.AF_INET nfg.version = nfnl.NFNETLINK_V0 cmd = nfulnl.NfulnlMsgConfigCmd() cmd.command = command nlh.put(nfulnl.NFULA_CFG_CMD, cmd) return nlh def nflog_build_cfg_request(buf, command, qnum): nlh = mnl.Nlmsg(buf) nlh.put_header() nlh.nlmsg_type = (nfnl.NFNL_SUBSYS_ULOG << 8) | nfulnl.NFULNL_MSG_CONFIG nlh.nlmsg_flags = netlink.NLM_F_REQUEST nfg = nlh.put_extra_header_as(nfnl.Nfgenmsg) nfg.nfgen_family = socket.AF_INET nfg.version = nfnl.NFNETLINK_V0 nfg.res_id = socket.htons(qnum) cmd = nfulnl.NfulnlMsgConfigCmd() cmd.command = command nlh.put(nfulnl.NFULA_CFG_CMD, cmd) return nlh def nflog_build_cfg_params(buf, mode, copy_range, qnum): nlh = mnl.Nlmsg(buf) nlh.put_header() nlh.nlmsg_type = (nfnl.NFNL_SUBSYS_ULOG << 8) | nfulnl.NFULNL_MSG_CONFIG nlh.nlmsg_flags = netlink.NLM_F_REQUEST nfg = nlh.put_extra_header_as(nfnl.Nfgenmsg) nfg.nfgen_family = socket.AF_UNSPEC nfg.version = nfnl.NFNETLINK_V0 nfg.res_id = socket.htons(qnum) params = nfulnl.NfulnlMsgConfigMode() params.copy_range = socket.htonl(copy_range) params.copy_mode = mode nlh.put(nfulnl.NFULA_CFG_MODE, params) return nlh def make_carbon_path(t): # t: (saddr, dattr, proto) # XXX: addr len condition if len(t[0]) == 4: # IPv4 "represents IPv4 address :decimal" return ".".join((":".join([str(ord(i)) for i in t[0]]), ":".join([str(ord(i)) for i in t[1]]), str(t[2]))) else: # IPv6 return ".".join((":".join(["%04x" % ((ord(t[0][i]) << 8) + ord(t[0][i + 1])) for i in range(len(t[0])) if i %2 == 0]), ":".join(["%04x" % ((ord(t[1][i]) << 8) + ord(t[1][i + 1])) for i in range(len(t[1])) if i %2 == 0]), str(t[2]))) def send_process(sock, q): while True: # got from q: {(saddr, dattr, proto): payload_len} d = q.get() if d is None: return now = int(time.time()) listOfMetricTuples = [] for k, v in d.iteritems(): listOfMetricTuples.append((make_carbon_path(k), (now, v))) """ print("\n\nsending entries: %d" % len(listOfMetricTuples)) for e in listOfMetricTuples: print(e) """ payload = pickle.dumps(listOfMetricTuples) header = struct.pack("!L", len(payload)) message = header + payload # should catch EINTR? sock.sendall(message) sendable = False def alarm_handler(signum, frame): global sendable sendable = True def main(): if len(sys.argv) != 2: print("Usage: %s [queue_num]" % sys.argv[0]) sys.exit(-1) # for netlink sending qnum = int(sys.argv[1]) buf = bytearray(mnl.MNL_SOCKET_BUFFER_SIZE) # prepare for sending to carbon sock = socket.socket() try: sock.connect((CARBON_SERVER, CARBON_PORT)) except Exception as e: log.fatal("could not connect to carbon server %d@%s" % (CARBON_PORT, CARBON_SERVER)) sys.exit(-1) q = multiprocessing.Queue() # XXX: size? p = multiprocessing.Process(target=send_process, args=(sock, q)) p.start() # netlink transaction with mnl.Socket(netlink.NETLINK_NETFILTER) as nl: # request that I want to acquire qnum queue packet log nl.bind(0, mnl.MNL_SOCKET_AUTOPID) portid = nl.get_portid() nlh = nflog_build_cfg_pf_request(buf, nfulnl.NFULNL_CFG_CMD_PF_UNBIND) nl.send_nlmsg(nlh) nlh = nflog_build_cfg_pf_request(buf, nfulnl.NFULNL_CFG_CMD_PF_BIND) nl.send_nlmsg(nlh) nlh = nflog_build_cfg_request(buf, nfulnl.NFULNL_CFG_CMD_BIND, qnum) nl.send_nlmsg(nlh) nlh = nflog_build_cfg_params(buf, nfulnl.NFULNL_COPY_PACKET, 0xFFFF, qnum) nl.send_nlmsg(nlh) # prepare sigalrm global sendable signal.signal(signal.SIGALRM, alarm_handler) signal.setitimer(signal.ITIMER_REAL, 2, 10) # {(saddr, dattr, proto): payload_len} data = dict() # receiving loop ret = mnl.MNL_CB_OK while ret >= mnl.MNL_CB_STOP: try: nrecv = nl.recv_into(buf) except OSError as oe: if oe.errno == errno.EINTR: continue except Exception as e: log.error("mnl_socket_recvfrom: %s" % e) continue try: ret = mnl.cb_run(buf[:nrecv], 0, portid, log_cb, data) except Exception as e: log.error("mnl_cb_run: %s" %e) if sendable and len(data) > 0: sendable = False q.put(data) data = dict() q.put(None) if __name__ == '__main__': logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(module)s.%(funcName)s line: %(lineno)d %(message)s') main()
lgpl-2.1
JrtPec/opengrid
opengrid/library/plotting.py
2
6242
# -*- coding: utf-8 -*- """ Created on Wed Nov 26 18:03:24 2014 @author: KDB """ import numpy as np import pandas as pd import datetime as dt import matplotlib.pyplot as plt import matplotlib.cm as cm from matplotlib.dates import date2num, num2date, HourLocator, DayLocator, AutoDateLocator, DateFormatter from matplotlib.colors import LogNorm def carpet(timeseries, **kwargs): """ Draw a carpet plot of a pandas timeseries. The carpet plot reads like a letter. Every day one line is added to the bottom of the figure, minute for minute moving from left (morning) to right (evening). The color denotes the level of consumption and is scaled logarithmically. If vmin and vmax are not provided as inputs, the minimum and maximum of the colorbar represent the minimum and maximum of the (resampled) timeseries. Parameters ---------- timeseries : pandas.Series vmin, vmax : If not None, either or both of these values determine the range of the z axis. If None, the range is given by the minimum and/or maximum of the (resampled) timeseries. zlabel, title : If not None, these determine the labels of z axis and/or title. If None, the name of the timeseries is used if defined. cmap : matplotlib.cm instance, default coolwarm """ # define optional input parameters cmap = kwargs.pop('cmap', cm.coolwarm) norm = kwargs.pop('norm', LogNorm()) interpolation = kwargs.pop('interpolation', 'nearest') cblabel = kwargs.pop('zlabel', timeseries.name if timeseries.name else '') title = kwargs.pop('title', 'carpet plot: ' + timeseries.name if timeseries.name else '') # data preparation if timeseries.dropna().empty: print('skipped {} - no data'.format(title)) return ts = timeseries.resample('min', how='mean', label='left', closed='left') vmin = max(0.1, kwargs.pop('vmin', ts[ts > 0].min())) vmax = max(vmin, kwargs.pop('vmax', ts.quantile(.999))) # convert to dataframe with date as index and time as columns by # first replacing the index by a MultiIndex # tz_convert('UTC'): workaround for https://github.com/matplotlib/matplotlib/issues/3896 mpldatetimes = date2num(ts.index.tz_convert('UTC').astype(dt.datetime)) ts.index = pd.MultiIndex.from_arrays( [np.floor(mpldatetimes), 2 + mpldatetimes % 1]) # '2 +': matplotlib bug workaround. # and then unstacking the second index level to columns df = ts.unstack() # data plotting fig, ax = plt.subplots() # define the extent of the axes (remark the +- 0.5 for the y axis in order to obtain aligned date ticks) extent = [df.columns[0], df.columns[-1], df.index[-1] + 0.5, df.index[0] - 0.5] im = plt.imshow(df, vmin=vmin, vmax=vmax, extent=extent, cmap=cmap, aspect='auto', norm=norm, interpolation=interpolation, **kwargs) # figure formatting # x axis ax.xaxis_date() ax.xaxis.set_major_locator(HourLocator(interval=2)) ax.xaxis.set_major_formatter(DateFormatter('%H:%M')) ax.xaxis.grid(True) plt.xlabel('UTC Time') # y axis ax.yaxis_date() dmin, dmax = ax.yaxis.get_data_interval() number_of_days = (num2date(dmax) - num2date(dmin)).days # AutoDateLocator is not suited in case few data is available if abs(number_of_days) <= 35: ax.yaxis.set_major_locator(DayLocator()) else: ax.yaxis.set_major_locator(AutoDateLocator()) ax.yaxis.set_major_formatter(DateFormatter("%a, %d %b %Y")) # plot colorbar cbticks = np.logspace(np.log10(vmin), np.log10(vmax), 11, endpoint=True) cb = plt.colorbar(format='%.0f', ticks=cbticks) cb.set_label(cblabel) # plot title plt.title(title) return im def fanchart(timeseries, **kwargs): """ Draw a fan chart of the daily consumption profile. The fan chart shows the different quantiles of the daily consumption, with the blue line representing the median, and the black line the average. By default, the consumption of the whole day is taken, but one can select the hours of interest, e.g. night time standby consumption. Parameters ---------- timeseries : pandas.Series start_hour, end_hour : int or float, optional Start and end hours of period of interest, default values are 0, 24 As of now, ensure that start_hour < end_hour ylabel, title : str If not None, these determine the labels of y axis and/or title. If None, the name of the timeseries is used if defined. """ start_hour = 2. + kwargs.pop('start_hour', 0.) / 24. end_hour = 2. + kwargs.pop('end_hour', 24.) / 24. ylabel = kwargs.pop('ylabel', timeseries.name if timeseries.name else '') title = kwargs.pop('title', 'carpet plot: ' + timeseries.name if timeseries.name else '') # data preparation if timeseries.dropna().empty: print('skipped {} - no data'.format(title)) return ts = timeseries.resample('min', how='mean', label='left', closed='left') # convert to dataframe with date as index and time as columns by # first replacing the index by a MultiIndex # tz_convert('UTC'): workaround for https://github.com/matplotlib/matplotlib/issues/3896 mpldatetimes = date2num(ts.index.tz_convert('UTC').astype(dt.datetime)) ts.index = pd.MultiIndex.from_arrays( [np.floor(mpldatetimes), 2 + mpldatetimes % 1]) # '2 +': matplotlib bug workaround. # and then unstacking the second index level to columns df = ts.unstack() df = df.T.truncate(start_hour, end_hour) num = 20 num_max = 4 df_quant = df.quantile(np.linspace(0., 1., 2 * num + 1)) # data plotting fig, ax = plt.subplots() im = plt.plot(df.columns, df_quant.iloc[num], 'b', label='median') for i in range(1, num): plt.fill_between(df.columns, df_quant.iloc[num - i], df_quant.iloc[min(num + i, 2 * num - num_max)], color='b', alpha=0.05) plt.plot(df.columns, df.mean(), 'k--', label='mean') plt.legend() # x axis ax.xaxis_date() plt.xlim(df.columns[0], df.columns[-1]) plt.ylabel(ylabel) # plot title plt.title(title) plt.grid(True) return im
apache-2.0
lfz/Guided-Denoise
Attackset/fgsm_v3_random/nets/inception_v1_test.py
54
8721
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for nets.inception_v1.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from nets import inception slim = tf.contrib.slim class InceptionV1Test(tf.test.TestCase): def testBuildClassificationNetwork(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) logits, end_points = inception.inception_v1(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV1/Logits')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) self.assertTrue('Predictions' in end_points) self.assertListEqual(end_points['Predictions'].get_shape().as_list(), [batch_size, num_classes]) def testBuildBaseNetwork(self): batch_size = 5 height, width = 224, 224 inputs = tf.random_uniform((batch_size, height, width, 3)) mixed_6c, end_points = inception.inception_v1_base(inputs) self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c')) self.assertListEqual(mixed_6c.get_shape().as_list(), [batch_size, 7, 7, 1024]) expected_endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c'] self.assertItemsEqual(end_points.keys(), expected_endpoints) def testBuildOnlyUptoFinalEndpoint(self): batch_size = 5 height, width = 224, 224 endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c'] for index, endpoint in enumerate(endpoints): with tf.Graph().as_default(): inputs = tf.random_uniform((batch_size, height, width, 3)) out_tensor, end_points = inception.inception_v1_base( inputs, final_endpoint=endpoint) self.assertTrue(out_tensor.op.name.startswith( 'InceptionV1/' + endpoint)) self.assertItemsEqual(endpoints[:index+1], end_points) def testBuildAndCheckAllEndPointsUptoMixed5c(self): batch_size = 5 height, width = 224, 224 inputs = tf.random_uniform((batch_size, height, width, 3)) _, end_points = inception.inception_v1_base(inputs, final_endpoint='Mixed_5c') endpoints_shapes = {'Conv2d_1a_7x7': [5, 112, 112, 64], 'MaxPool_2a_3x3': [5, 56, 56, 64], 'Conv2d_2b_1x1': [5, 56, 56, 64], 'Conv2d_2c_3x3': [5, 56, 56, 192], 'MaxPool_3a_3x3': [5, 28, 28, 192], 'Mixed_3b': [5, 28, 28, 256], 'Mixed_3c': [5, 28, 28, 480], 'MaxPool_4a_3x3': [5, 14, 14, 480], 'Mixed_4b': [5, 14, 14, 512], 'Mixed_4c': [5, 14, 14, 512], 'Mixed_4d': [5, 14, 14, 512], 'Mixed_4e': [5, 14, 14, 528], 'Mixed_4f': [5, 14, 14, 832], 'MaxPool_5a_2x2': [5, 7, 7, 832], 'Mixed_5b': [5, 7, 7, 832], 'Mixed_5c': [5, 7, 7, 1024]} self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) for endpoint_name in endpoints_shapes: expected_shape = endpoints_shapes[endpoint_name] self.assertTrue(endpoint_name in end_points) self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape) def testModelHasExpectedNumberOfParameters(self): batch_size = 5 height, width = 224, 224 inputs = tf.random_uniform((batch_size, height, width, 3)) with slim.arg_scope(inception.inception_v1_arg_scope()): inception.inception_v1_base(inputs) total_params, _ = slim.model_analyzer.analyze_vars( slim.get_model_variables()) self.assertAlmostEqual(5607184, total_params) def testHalfSizeImages(self): batch_size = 5 height, width = 112, 112 inputs = tf.random_uniform((batch_size, height, width, 3)) mixed_5c, _ = inception.inception_v1_base(inputs) self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c')) self.assertListEqual(mixed_5c.get_shape().as_list(), [batch_size, 4, 4, 1024]) def testUnknownImageShape(self): tf.reset_default_graph() batch_size = 2 height, width = 224, 224 num_classes = 1000 input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) with self.test_session() as sess: inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) logits, end_points = inception.inception_v1(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV1/Logits')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['Mixed_5c'] feed_dict = {inputs: input_np} tf.global_variables_initializer().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024]) def testUnknowBatchSize(self): batch_size = 1 height, width = 224, 224 num_classes = 1000 inputs = tf.placeholder(tf.float32, (None, height, width, 3)) logits, _ = inception.inception_v1(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV1/Logits')) self.assertListEqual(logits.get_shape().as_list(), [None, num_classes]) images = tf.random_uniform((batch_size, height, width, 3)) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes)) def testEvaluation(self): batch_size = 2 height, width = 224, 224 num_classes = 1000 eval_inputs = tf.random_uniform((batch_size, height, width, 3)) logits, _ = inception.inception_v1(eval_inputs, num_classes, is_training=False) predictions = tf.argmax(logits, 1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (batch_size,)) def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 224, 224 num_classes = 1000 train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) inception.inception_v1(train_inputs, num_classes) eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = inception.inception_v1(eval_inputs, num_classes, reuse=True) predictions = tf.argmax(logits, 1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,)) def testLogitsNotSqueezed(self): num_classes = 25 images = tf.random_uniform([1, 224, 224, 3]) logits, _ = inception.inception_v1(images, num_classes=num_classes, spatial_squeeze=False) with self.test_session() as sess: tf.global_variables_initializer().run() logits_out = sess.run(logits) self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) if __name__ == '__main__': tf.test.main()
apache-2.0
rooty/minishopgae
werkzeug/debug/repr.py
89
8968
# -*- coding: utf-8 -*- """ werkzeug.debug.repr ~~~~~~~~~~~~~~~~~~~ This module implements object representations for debugging purposes. Unlike the default repr these reprs expose a lot more information and produce HTML instead of ASCII. Together with the CSS and JavaScript files of the debugger this gives a colorful and more compact output. :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. :license: BSD. """ import sys import re from traceback import format_exception_only try: from collections import deque except ImportError: # pragma: no cover deque = None from werkzeug.utils import escape missing = object() _paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}') RegexType = type(_paragraph_re) HELP_HTML = '''\ <div class=box> <h3>%(title)s</h3> <pre class=help>%(text)s</pre> </div>\ ''' OBJECT_DUMP_HTML = '''\ <div class=box> <h3>%(title)s</h3> %(repr)s <table>%(items)s</table> </div>\ ''' def debug_repr(obj): """Creates a debug repr of an object as HTML unicode string.""" return DebugReprGenerator().repr(obj) def dump(obj=missing): """Print the object details to stdout._write (for the interactive console of the web debugger. """ gen = DebugReprGenerator() if obj is missing: rv = gen.dump_locals(sys._getframe(1).f_locals) else: rv = gen.dump_object(obj) sys.stdout._write(rv) class _Helper(object): """Displays an HTML version of the normal help, for the interactive debugger only because it requires a patched sys.stdout. """ def __repr__(self): return 'Type help(object) for help about object.' def __call__(self, topic=None): if topic is None: sys.stdout._write('<span class=help>%s</span>' % repr(self)) return import pydoc pydoc.help(topic) rv = sys.stdout.reset().decode('utf-8', 'ignore') paragraphs = _paragraph_re.split(rv) if len(paragraphs) > 1: title = paragraphs[0] text = '\n\n'.join(paragraphs[1:]) else: # pragma: no cover title = 'Help' text = paragraphs[0] sys.stdout._write(HELP_HTML % {'title': title, 'text': text}) helper = _Helper() def _add_subclass_info(inner, obj, base): if isinstance(base, tuple): for base in base: if type(obj) is base: return inner elif type(obj) is base: return inner module = '' if obj.__class__.__module__ not in ('__builtin__', 'exceptions'): module = '<span class="module">%s.</span>' % obj.__class__.__module__ return '%s%s(%s)' % (module, obj.__class__.__name__, inner) class DebugReprGenerator(object): def __init__(self): self._stack = [] def _sequence_repr_maker(left, right, base=object(), limit=8): def proxy(self, obj, recursive): if recursive: return _add_subclass_info(left + '...' + right, obj, base) buf = [left] have_extended_section = False for idx, item in enumerate(obj): if idx: buf.append(', ') if idx == limit: buf.append('<span class="extended">') have_extended_section = True buf.append(self.repr(item)) if have_extended_section: buf.append('</span>') buf.append(right) return _add_subclass_info(u''.join(buf), obj, base) return proxy list_repr = _sequence_repr_maker('[', ']', list) tuple_repr = _sequence_repr_maker('(', ')', tuple) set_repr = _sequence_repr_maker('set([', '])', set) frozenset_repr = _sequence_repr_maker('frozenset([', '])', frozenset) if deque is not None: deque_repr = _sequence_repr_maker('<span class="module">collections.' '</span>deque([', '])', deque) del _sequence_repr_maker def regex_repr(self, obj): pattern = repr(obj.pattern).decode('string-escape', 'ignore') if pattern[:1] == 'u': pattern = 'ur' + pattern[1:] else: pattern = 'r' + pattern return u're.compile(<span class="string regex">%s</span>)' % pattern def string_repr(self, obj, limit=70): buf = ['<span class="string">'] escaped = escape(obj) a = repr(escaped[:limit]) b = repr(escaped[limit:]) if isinstance(obj, unicode): buf.append('u') a = a[1:] b = b[1:] if b != "''": buf.extend((a[:-1], '<span class="extended">', b[1:], '</span>')) else: buf.append(a) buf.append('</span>') return _add_subclass_info(u''.join(buf), obj, (str, unicode)) def dict_repr(self, d, recursive, limit=5): if recursive: return _add_subclass_info(u'{...}', d, dict) buf = ['{'] have_extended_section = False for idx, (key, value) in enumerate(d.iteritems()): if idx: buf.append(', ') if idx == limit - 1: buf.append('<span class="extended">') have_extended_section = True buf.append('<span class="pair"><span class="key">%s</span>: ' '<span class="value">%s</span></span>' % (self.repr(key), self.repr(value))) if have_extended_section: buf.append('</span>') buf.append('}') return _add_subclass_info(u''.join(buf), d, dict) def object_repr(self, obj): return u'<span class="object">%s</span>' % \ escape(repr(obj).decode('utf-8', 'replace')) def dispatch_repr(self, obj, recursive): if obj is helper: return u'<span class="help">%r</span>' % helper if isinstance(obj, (int, long, float, complex)): return u'<span class="number">%r</span>' % obj if isinstance(obj, basestring): return self.string_repr(obj) if isinstance(obj, RegexType): return self.regex_repr(obj) if isinstance(obj, list): return self.list_repr(obj, recursive) if isinstance(obj, tuple): return self.tuple_repr(obj, recursive) if isinstance(obj, set): return self.set_repr(obj, recursive) if isinstance(obj, frozenset): return self.frozenset_repr(obj, recursive) if isinstance(obj, dict): return self.dict_repr(obj, recursive) if deque is not None and isinstance(obj, deque): return self.deque_repr(obj, recursive) return self.object_repr(obj) def fallback_repr(self): try: info = ''.join(format_exception_only(*sys.exc_info()[:2])) except Exception: # pragma: no cover info = '?' return u'<span class="brokenrepr">&lt;broken repr (%s)&gt;' \ u'</span>' % escape(info.decode('utf-8', 'ignore').strip()) def repr(self, obj): recursive = False for item in self._stack: if item is obj: recursive = True break self._stack.append(obj) try: try: return self.dispatch_repr(obj, recursive) except Exception: return self.fallback_repr() finally: self._stack.pop() def dump_object(self, obj): repr = items = None if isinstance(obj, dict): title = 'Contents of' items = [] for key, value in obj.iteritems(): if not isinstance(key, basestring): items = None break items.append((key, self.repr(value))) if items is None: items = [] repr = self.repr(obj) for key in dir(obj): try: items.append((key, self.repr(getattr(obj, key)))) except Exception: pass title = 'Details for' title += ' ' + object.__repr__(obj)[1:-1] return self.render_object_dump(items, title, repr) def dump_locals(self, d): items = [(key, self.repr(value)) for key, value in d.items()] return self.render_object_dump(items, 'Local variables in frame') def render_object_dump(self, items, title, repr=None): html_items = [] for key, value in items: html_items.append('<tr><th>%s<td><pre class=repr>%s</pre>' % (escape(key), value)) if not html_items: html_items.append('<tr><td><em>Nothing</em>') return OBJECT_DUMP_HTML % { 'title': escape(title), 'repr': repr and '<pre class=repr>%s</pre>' % repr or '', 'items': '\n'.join(html_items) }
gpl-3.0
tommo/gii
lib/3rdparty/common/pygments/regexopt.py
78
3067
# -*- coding: utf-8 -*- """ pygments.regexopt ~~~~~~~~~~~~~~~~~ An algorithm that generates optimized regexes for matching long lists of literal strings. :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from re import escape from os.path import commonprefix from itertools import groupby from operator import itemgetter CS_ESCAPE = re.compile(r'[\^\\\-\]]') FIRST_ELEMENT = itemgetter(0) def make_charset(letters): return '[' + CS_ESCAPE.sub(lambda m: '\\' + m.group(), ''.join(letters)) + ']' def regex_opt_inner(strings, open_paren): """Return a regex that matches any string in the sorted list of strings.""" close_paren = open_paren and ')' or '' # print strings, repr(open_paren) if not strings: # print '-> nothing left' return '' first = strings[0] if len(strings) == 1: # print '-> only 1 string' return open_paren + escape(first) + close_paren if not first: # print '-> first string empty' return open_paren + regex_opt_inner(strings[1:], '(?:') \ + '?' + close_paren if len(first) == 1: # multiple one-char strings? make a charset oneletter = [] rest = [] for s in strings: if len(s) == 1: oneletter.append(s) else: rest.append(s) if len(oneletter) > 1: # do we have more than one oneletter string? if rest: # print '-> 1-character + rest' return open_paren + regex_opt_inner(rest, '') + '|' \ + make_charset(oneletter) + close_paren # print '-> only 1-character' return make_charset(oneletter) prefix = commonprefix(strings) if prefix: plen = len(prefix) # we have a prefix for all strings # print '-> prefix:', prefix return open_paren + escape(prefix) \ + regex_opt_inner([s[plen:] for s in strings], '(?:') \ + close_paren # is there a suffix? strings_rev = [s[::-1] for s in strings] suffix = commonprefix(strings_rev) if suffix: slen = len(suffix) # print '-> suffix:', suffix[::-1] return open_paren \ + regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \ + escape(suffix[::-1]) + close_paren # recurse on common 1-string prefixes # print '-> last resort' return open_paren + \ '|'.join(regex_opt_inner(list(group[1]), '') for group in groupby(strings, lambda s: s[0] == first[0])) \ + close_paren def regex_opt(strings, prefix='', suffix=''): """Return a compiled regex that matches any string in the given list. The strings to match must be literal strings, not regexes. They will be regex-escaped. *prefix* and *suffix* are pre- and appended to the final regex. """ strings = sorted(strings) return prefix + regex_opt_inner(strings, '(') + suffix
mit
CSC301H-Fall2013/JuakStore
site-packages/tests/regressiontests/utils/feedgenerator.py
104
4304
from __future__ import unicode_literals import datetime from django.utils import feedgenerator, tzinfo, unittest class FeedgeneratorTest(unittest.TestCase): """ Tests for the low-level syndication feed framework. """ def test_get_tag_uri(self): """ Test get_tag_uri() correctly generates TagURIs. """ self.assertEqual( feedgenerator.get_tag_uri('http://example.org/foo/bar#headline', datetime.date(2004, 10, 25)), 'tag:example.org,2004-10-25:/foo/bar/headline') def test_get_tag_uri_with_port(self): """ Test that get_tag_uri() correctly generates TagURIs from URLs with port numbers. """ self.assertEqual( feedgenerator.get_tag_uri('http://www.example.org:8000/2008/11/14/django#headline', datetime.datetime(2008, 11, 14, 13, 37, 0)), 'tag:www.example.org,2008-11-14:/2008/11/14/django/headline') def test_rfc2822_date(self): """ Test rfc2822_date() correctly formats datetime objects. """ self.assertEqual( feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0)), "Fri, 14 Nov 2008 13:37:00 -0000" ) def test_rfc2822_date_with_timezone(self): """ Test rfc2822_date() correctly formats datetime objects with tzinfo. """ self.assertEqual( feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=tzinfo.FixedOffset(datetime.timedelta(minutes=60)))), "Fri, 14 Nov 2008 13:37:00 +0100" ) def test_rfc2822_date_without_time(self): """ Test rfc2822_date() correctly formats date objects. """ self.assertEqual( feedgenerator.rfc2822_date(datetime.date(2008, 11, 14)), "Fri, 14 Nov 2008 00:00:00 -0000" ) def test_rfc3339_date(self): """ Test rfc3339_date() correctly formats datetime objects. """ self.assertEqual( feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0)), "2008-11-14T13:37:00Z" ) def test_rfc3339_date_with_timezone(self): """ Test rfc3339_date() correctly formats datetime objects with tzinfo. """ self.assertEqual( feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=tzinfo.FixedOffset(datetime.timedelta(minutes=120)))), "2008-11-14T13:37:00+02:00" ) def test_rfc3339_date_without_time(self): """ Test rfc3339_date() correctly formats date objects. """ self.assertEqual( feedgenerator.rfc3339_date(datetime.date(2008, 11, 14)), "2008-11-14T00:00:00Z" ) def test_atom1_mime_type(self): """ Test to make sure Atom MIME type has UTF8 Charset parameter set """ atom_feed = feedgenerator.Atom1Feed("title", "link", "description") self.assertEqual( atom_feed.mime_type, "application/atom+xml; charset=utf-8" ) def test_rss_mime_type(self): """ Test to make sure RSS MIME type has UTF8 Charset parameter set """ rss_feed = feedgenerator.Rss201rev2Feed("title", "link", "description") self.assertEqual( rss_feed.mime_type, "application/rss+xml; charset=utf-8" ) # Two regression tests for #14202 def test_feed_without_feed_url_gets_rendered_without_atom_link(self): feed = feedgenerator.Rss201rev2Feed('title', '/link/', 'descr') self.assertEqual(feed.feed['feed_url'], None) feed_content = feed.writeString('utf-8') self.assertNotIn('<atom:link', feed_content) self.assertNotIn('href="/feed/"', feed_content) self.assertNotIn('rel="self"', feed_content) def test_feed_with_feed_url_gets_rendered_with_atom_link(self): feed = feedgenerator.Rss201rev2Feed('title', '/link/', 'descr', feed_url='/feed/') self.assertEqual(feed.feed['feed_url'], '/feed/') feed_content = feed.writeString('utf-8') self.assertIn('<atom:link', feed_content) self.assertIn('href="/feed/"', feed_content) self.assertIn('rel="self"', feed_content)
mit
brianlsharp/MissionPlanner
Lib/site-packages/numpy/linalg/__init__.py
54
2194
""" Core Linear Algebra Tools ========================= =============== ========================================================== Linear algebra basics ========================================================================== norm Vector or matrix norm inv Inverse of a square matrix solve Solve a linear system of equations det Determinant of a square matrix slogdet Logarithm of the determinant of a square matrix lstsq Solve linear least-squares problem pinv Pseudo-inverse (Moore-Penrose) calculated using a singular value decomposition matrix_power Integer power of a square matrix =============== ========================================================== =============== ========================================================== Eigenvalues and decompositions ========================================================================== eig Eigenvalues and vectors of a square matrix eigh Eigenvalues and eigenvectors of a Hermitian matrix eigvals Eigenvalues of a square matrix eigvalsh Eigenvalues of a Hermitian matrix qr QR decomposition of a matrix svd Singular value decomposition of a matrix cholesky Cholesky decomposition of a matrix =============== ========================================================== =============== ========================================================== Tensor operations ========================================================================== tensorsolve Solve a linear tensor equation tensorinv Calculate an inverse of a tensor =============== ========================================================== =============== ========================================================== Exceptions ========================================================================== LinAlgError Indicates a failed linear algebra operation =============== ========================================================== """ # To get sub-modules from info import __doc__ from linalg import * from numpy.testing import Tester test = Tester(__file__).test bench = Tester(__file__).test
gpl-3.0
dyyi/moneybook
venv/Lib/site-packages/pip/_vendor/requests/packages/urllib3/request.py
714
5988
from __future__ import absolute_import try: from urllib.parse import urlencode except ImportError: from urllib import urlencode from .filepost import encode_multipart_formdata __all__ = ['RequestMethods'] class RequestMethods(object): """ Convenience mixin for classes who implement a :meth:`urlopen` method, such as :class:`~urllib3.connectionpool.HTTPConnectionPool` and :class:`~urllib3.poolmanager.PoolManager`. Provides behavior for making common types of HTTP request methods and decides which type of request field encoding to use. Specifically, :meth:`.request_encode_url` is for sending requests whose fields are encoded in the URL (such as GET, HEAD, DELETE). :meth:`.request_encode_body` is for sending requests whose fields are encoded in the *body* of the request using multipart or www-form-urlencoded (such as for POST, PUT, PATCH). :meth:`.request` is for making any kind of request, it will look up the appropriate encoding format and use one of the above two methods to make the request. Initializer parameters: :param headers: Headers to include with all requests, unless other headers are given explicitly. """ _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS']) def __init__(self, headers=None): self.headers = headers or {} def urlopen(self, method, url, body=None, headers=None, encode_multipart=True, multipart_boundary=None, **kw): # Abstract raise NotImplemented("Classes extending RequestMethods must implement " "their own ``urlopen`` method.") def request(self, method, url, fields=None, headers=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the appropriate encoding of ``fields`` based on the ``method`` used. This is a convenience method that requires the least amount of manual effort. It can be used in most situations, while still having the option to drop down to more specific methods when necessary, such as :meth:`request_encode_url`, :meth:`request_encode_body`, or even the lowest level :meth:`urlopen`. """ method = method.upper() if method in self._encode_url_methods: return self.request_encode_url(method, url, fields=fields, headers=headers, **urlopen_kw) else: return self.request_encode_body(method, url, fields=fields, headers=headers, **urlopen_kw) def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the url. This is useful for request methods like GET, HEAD, DELETE, etc. """ if headers is None: headers = self.headers extra_kw = {'headers': headers} extra_kw.update(urlopen_kw) if fields: url += '?' + urlencode(fields) return self.urlopen(method, url, **extra_kw) def request_encode_body(self, method, url, fields=None, headers=None, encode_multipart=True, multipart_boundary=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the body. This is useful for request methods like POST, PUT, PATCH, etc. When ``encode_multipart=True`` (default), then :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode the payload with the appropriate content type. Otherwise :meth:`urllib.urlencode` is used with the 'application/x-www-form-urlencoded' content type. Multipart encoding must be used when posting files, and it's reasonably safe to use it in other times too. However, it may break request signing, such as with OAuth. Supports an optional ``fields`` parameter of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type) tuple where the MIME type is optional. For example:: fields = { 'foo': 'bar', 'fakefile': ('foofile.txt', 'contents of foofile'), 'realfile': ('barfile.txt', open('realfile').read()), 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), 'nonamefile': 'contents of nonamefile field', } When uploading a file, providing a filename (the first parameter of the tuple) is optional but recommended to best mimick behavior of browsers. Note that if ``headers`` are supplied, the 'Content-Type' header will be overwritten because it depends on the dynamic random boundary string which is used to compose the body of the request. The random boundary string can be explicitly set with the ``multipart_boundary`` parameter. """ if headers is None: headers = self.headers extra_kw = {'headers': {}} if fields: if 'body' in urlopen_kw: raise TypeError( "request got values for both 'fields' and 'body', can only specify one.") if encode_multipart: body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary) else: body, content_type = urlencode(fields), 'application/x-www-form-urlencoded' extra_kw['body'] = body extra_kw['headers'] = {'Content-Type': content_type} extra_kw['headers'].update(headers) extra_kw.update(urlopen_kw) return self.urlopen(method, url, **extra_kw)
apache-2.0
thebongy/MakeMyOutputs
docx/api.py
12
1179
# encoding: utf-8 """ Directly exposed API functions and classes, :func:`Document` for now. Provides a syntactically more convenient API for interacting with the OpcPackage graph. """ from __future__ import absolute_import, division, print_function import os from docx.opc.constants import CONTENT_TYPE as CT from docx.package import Package def Document(docx=None): """ Return a |Document| object loaded from *docx*, where *docx* can be either a path to a ``.docx`` file (a string) or a file-like object. If *docx* is missing or ``None``, the built-in default document "template" is loaded. """ docx = _default_docx_path() if docx is None else docx document_part = Package.open(docx).main_document_part if document_part.content_type != CT.WML_DOCUMENT_MAIN: tmpl = "file '%s' is not a Word file, content type is '%s'" raise ValueError(tmpl % (docx, document_part.content_type)) return document_part.document def _default_docx_path(): """ Return the path to the built-in default .docx package. """ _thisdir = os.path.split(__file__)[0] return os.path.join(_thisdir, 'templates', 'default.docx')
mit
dya2/python-for-android
python-build/python-libs/gdata/tests/gdata_tests/calendar/calendar_acl_test.py
128
8433
#!/usr/bin/python # # Copyright (C) 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __author__ = 'api.lliabraa@google.com (Lane LiaBraaten)' import unittest try: from xml.etree import ElementTree except ImportError: from elementtree import ElementTree import atom import gdata.calendar import gdata.calendar.service import gdata.service import random import getpass from gdata import test_data username = '' password = '' class CalendarServiceAclUnitTest(unittest.TestCase): _aclFeedUri = "/calendar/feeds/default/acl/full" _aclEntryUri = "%s/user:%s" % (_aclFeedUri, "user@gmail.com",) def setUp(self): self.cal_client = gdata.calendar.service.CalendarService() self.cal_client.email = username self.cal_client.password = password self.cal_client.source = 'GCalendarClient ACL "Unit" Tests' def tearDown(self): # No teardown needed pass def _getRandomNumber(self): """Return a random number as a string for testing""" r = random.Random() r.seed() return str(r.randint(100000,1000000)) def _generateAclEntry(self, role="owner", scope_type="user", scope_value=None): """Generates a ACL rule from parameters or makes a random user an owner by default""" if (scope_type=="user" and scope_value is None): scope_value = "user%s@gmail.com" % (self._getRandomNumber()) rule = gdata.calendar.CalendarAclEntry() rule.title = atom.Title(text=role) rule.scope = gdata.calendar.Scope(value=scope_value, type="user") rule.role = gdata.calendar.Role(value="http://schemas.google.com/gCal/2005#%s" % (role)) return rule def assertEqualAclEntry(self, expected, actual): """Compares the values of two ACL entries""" self.assertEqual(expected.role.value, actual.role.value) self.assertEqual(expected.scope.value, actual.scope.value) self.assertEqual(expected.scope.type, actual.scope.type) def testGetAclFeedUnauthenticated(self): """Fiendishly try to get an ACL feed without authenticating""" try: self.cal_client.GetCalendarAclFeed(self._aclFeedUri) self.fail("Unauthenticated request should fail") except gdata.service.RequestError, error: self.assertEqual(error[0]['status'], 401) self.assertEqual(error[0]['reason'], "Authorization required") def testGetAclFeed(self): """Get an ACL feed""" self.cal_client.ProgrammaticLogin() feed = self.cal_client.GetCalendarAclFeed(self._aclFeedUri) self.assertNotEqual(0,len(feed.entry)) def testGetAclEntryUnauthenticated(self): """Fiendishly try to get an ACL entry without authenticating""" try: self.cal_client.GetCalendarAclEntry(self._aclEntryUri) self.fail("Unauthenticated request should fail"); except gdata.service.RequestError, error: self.assertEqual(error[0]['status'], 401) self.assertEqual(error[0]['reason'], "Authorization required") def testGetAclEntry(self): """Get an ACL entry""" self.cal_client.ProgrammaticLogin() self.cal_client.GetCalendarAclEntry(self._aclEntryUri) def testCalendarAclFeedFromString(self): """Create an ACL feed from a hard-coded string""" aclFeed = gdata.calendar.CalendarAclFeedFromString(test_data.ACL_FEED) self.assertEqual("Elizabeth Bennet's access control list", aclFeed.title.text) self.assertEqual(2,len(aclFeed.entry)) def testCalendarAclEntryFromString(self): """Create an ACL entry from a hard-coded string""" aclEntry = gdata.calendar.CalendarAclEntryFromString(test_data.ACL_ENTRY) self.assertEqual("owner", aclEntry.title.text) self.assertEqual("user", aclEntry.scope.type) self.assertEqual("liz@gmail.com", aclEntry.scope.value) self.assertEqual("http://schemas.google.com/gCal/2005#owner", aclEntry.role.value) def testCreateAndDeleteAclEntry(self): """Add an ACL rule and verify that is it returned in the ACL feed. Then delete the rule and verify that the rule is no longer included in the ACL feed.""" # Get the current number of ACL rules self.cal_client.ProgrammaticLogin() aclFeed = self.cal_client.GetCalendarAclFeed(self._aclFeedUri) original_rule_count = len(aclFeed.entry) # Insert entry rule = self._generateAclEntry() returned_rule = self.cal_client.InsertAclEntry(rule, self._aclFeedUri) # Verify rule was added with correct ACL values aclFeed = self.cal_client.GetCalendarAclFeed(self._aclFeedUri) self.assertEqual(original_rule_count+1, len(aclFeed.entry)) self.assertEqualAclEntry(rule, returned_rule) # Delete the event self.cal_client.DeleteAclEntry(returned_rule.GetEditLink().href) aclFeed = self.cal_client.GetCalendarAclFeed(self._aclFeedUri) self.assertEquals(original_rule_count, len(aclFeed.entry)) def testUpdateAclChangeScopeValue(self): """Fiendishly try to insert a test ACL rule and attempt to change the scope value (i.e. username). Verify that an exception is thrown, then delete the test rule.""" # Insert a user-scoped owner role ot random user aclEntry = self._generateAclEntry("owner","user"); self.cal_client.ProgrammaticLogin() rule = self._generateAclEntry() returned_rule = self.cal_client.InsertAclEntry(rule, self._aclFeedUri) # Change the scope value (i.e. what user is the owner) and update the entry updated_rule = returned_rule updated_rule.scope.value = "user_%s@gmail.com" % (self._getRandomNumber()) try: returned_rule = self.cal_client.UpdateAclEntry(returned_rule.GetEditLink().href, updated_rule) except gdata.service.RequestError, error: self.assertEqual(error[0]['status'], 403) self.assertEqual(error[0]['reason'], "Forbidden") self.cal_client.DeleteAclEntry(updated_rule.GetEditLink().href) def testUpdateAclChangeScopeType(self): """Fiendishly try to insert a test ACL rule and attempt to change the scope type (i.e. from 'user' to 'domain'). Verify that an exception is thrown, then delete the test rule.""" # Insert a user-scoped owner role ot random user aclEntry = self._generateAclEntry("owner","user"); self.cal_client.ProgrammaticLogin() rule = self._generateAclEntry() returned_rule = self.cal_client.InsertAclEntry(rule, self._aclFeedUri) # Change the scope value (i.e. what user is the owner) and update the entry updated_rule = returned_rule updated_rule.scope.type = "domain" try: returned_rule = self.cal_client.UpdateAclEntry(returned_rule.GetEditLink().href, updated_rule) except gdata.service.RequestError, error: self.assertEqual(error[0]['status'], 403) self.assertEqual(error[0]['reason'], "Forbidden") self.cal_client.DeleteAclEntry(updated_rule.GetEditLink().href) def testUpdateAclChangeRoleValue(self): """Insert a test ACL rule and attempt to change the scope type (i.e. from 'owner' to 'editor'). Verify that an exception is thrown, then delete the test rule.""" # Insert a user-scoped owner role ot random user aclEntry = self._generateAclEntry("owner","user"); self.cal_client.ProgrammaticLogin() rule = self._generateAclEntry() returned_rule = self.cal_client.InsertAclEntry(rule, self._aclFeedUri) # Change the scope value (i.e. what user is the owner) and update the entry updated_rule = returned_rule updated_rule.role.value = "http://schemas.google.com/gCal/2005#editor" returned_rule = self.cal_client.UpdateAclEntry(returned_rule.GetEditLink().href, updated_rule) self.assertEqualAclEntry(updated_rule, returned_rule) self.cal_client.DeleteAclEntry(updated_rule.GetEditLink().href) if __name__ == '__main__': print ('NOTE: Please run these tests only with a test account. ' + 'The tests may delete or update your data.') username = raw_input('Please enter your username: ') password = getpass.getpass() unittest.main()
apache-2.0
bfaviero/ok
oauthlib/oauth1/rfc5849/endpoints/resource.py
42
7083
# -*- coding: utf-8 -*- """ oauthlib.oauth1.rfc5849.endpoints.resource ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module is an implementation of the resource protection provider logic of OAuth 1.0 RFC 5849. """ from __future__ import absolute_import, unicode_literals import logging from .base import BaseEndpoint from .. import errors log = logging.getLogger(__name__) class ResourceEndpoint(BaseEndpoint): """An endpoint responsible for protecting resources. Typical use is to instantiate with a request validator and invoke the ``validate_protected_resource_request`` in a decorator around a view function. If the request is valid, invoke and return the response of the view. If invalid create and return an error response directly from the decorator. See :doc:`/oauth1/validator` for details on which validator methods to implement for this endpoint. An example decorator:: from functools import wraps from your_validator import your_validator from oauthlib.oauth1 import ResourceEndpoint endpoint = ResourceEndpoint(your_validator) def require_oauth(realms=None): def decorator(f): @wraps(f) def wrapper(request, *args, **kwargs): v, r = provider.validate_protected_resource_request( request.url, http_method=request.method, body=request.data, headers=request.headers, realms=realms or []) if v: return f(*args, **kwargs) else: return abort(403) """ def validate_protected_resource_request(self, uri, http_method='GET', body=None, headers=None, realms=None): """Create a request token response, with a new request token if valid. :param uri: The full URI of the token request. :param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc. :param body: The request body as a string. :param headers: The request headers as a dict. :param realms: A list of realms the resource is protected under. This will be supplied to the ``validate_realms`` method of the request validator. :returns: A tuple of 2 elements. 1. True if valid, False otherwise. 2. An oauthlib.common.Request object. """ try: request = self._create_request(uri, http_method, body, headers) except errors.OAuth1Error: return False, None try: self._check_transport_security(request) self._check_mandatory_parameters(request) except errors.OAuth1Error: return False, request if not request.resource_owner_key: return False, request if not self.request_validator.check_access_token( request.resource_owner_key): return False, request if not self.request_validator.validate_timestamp_and_nonce( request.client_key, request.timestamp, request.nonce, request, access_token=request.resource_owner_key): return False, request # The server SHOULD return a 401 (Unauthorized) status code when # receiving a request with invalid client credentials. # Note: This is postponed in order to avoid timing attacks, instead # a dummy client is assigned and used to maintain near constant # time request verification. # # Note that early exit would enable client enumeration valid_client = self.request_validator.validate_client_key( request.client_key, request) if not valid_client: request.client_key = self.request_validator.dummy_client # The server SHOULD return a 401 (Unauthorized) status code when # receiving a request with invalid or expired token. # Note: This is postponed in order to avoid timing attacks, instead # a dummy token is assigned and used to maintain near constant # time request verification. # # Note that early exit would enable resource owner enumeration valid_resource_owner = self.request_validator.validate_access_token( request.client_key, request.resource_owner_key, request) if not valid_resource_owner: request.resource_owner_key = self.request_validator.dummy_access_token # Note that `realm`_ is only used in authorization headers and how # it should be interepreted is not included in the OAuth spec. # However they could be seen as a scope or realm to which the # client has access and as such every client should be checked # to ensure it is authorized access to that scope or realm. # .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2 # # Note that early exit would enable client realm access enumeration. # # The require_realm indicates this is the first step in the OAuth # workflow where a client requests access to a specific realm. # This first step (obtaining request token) need not require a realm # and can then be identified by checking the require_resource_owner # flag and abscence of realm. # # Clients obtaining an access token will not supply a realm and it will # not be checked. Instead the previously requested realm should be # transferred from the request token to the access token. # # Access to protected resources will always validate the realm but note # that the realm is now tied to the access token and not provided by # the client. valid_realm = self.request_validator.validate_realms(request.client_key, request.resource_owner_key, request, uri=request.uri, realms=realms) valid_signature = self._check_signature(request) # We delay checking validity until the very end, using dummy values for # calculations and fetching secrets/keys to ensure the flow of every # request remains almost identical regardless of whether valid values # have been supplied. This ensures near constant time execution and # prevents malicious users from guessing sensitive information v = all((valid_client, valid_resource_owner, valid_realm, valid_signature)) if not v: log.info("[Failure] request verification failed.") log.info("Valid client: %s", valid_client) log.info("Valid token: %s", valid_resource_owner) log.info("Valid realm: %s", valid_realm) log.info("Valid signature: %s", valid_signature) return v, request
mit
batxes/4c2vhic
Six_zebra_models/Six_zebra_models_final_output_0.1_-0.1_13000/Six_zebra_models44419.py
4
13927
import _surface import chimera try: import chimera.runCommand except: pass from VolumePath import markerset as ms try: from VolumePath import Marker_Set, Link new_marker_set=Marker_Set except: from VolumePath import volume_path_dialog d= volume_path_dialog(True) new_marker_set= d.new_marker_set marker_sets={} surf_sets={} if "particle_0 geometry" not in marker_sets: s=new_marker_set('particle_0 geometry') marker_sets["particle_0 geometry"]=s s= marker_sets["particle_0 geometry"] mark=s.place_marker((14063, 6927.01, 2765.76), (0.7, 0.7, 0.7), 507.685) if "particle_1 geometry" not in marker_sets: s=new_marker_set('particle_1 geometry') marker_sets["particle_1 geometry"]=s s= marker_sets["particle_1 geometry"] mark=s.place_marker((14744.7, 6698.56, 2767.95), (0.7, 0.7, 0.7), 479.978) if "particle_2 geometry" not in marker_sets: s=new_marker_set('particle_2 geometry') marker_sets["particle_2 geometry"]=s s= marker_sets["particle_2 geometry"] mark=s.place_marker((13072.9, 7333.54, 3522.35), (0.7, 0.7, 0.7), 681.834) if "particle_3 geometry" not in marker_sets: s=new_marker_set('particle_3 geometry') marker_sets["particle_3 geometry"]=s s= marker_sets["particle_3 geometry"] mark=s.place_marker((11050.4, 8111.26, 4410.01), (0.7, 0.7, 0.7), 522.532) if "particle_4 geometry" not in marker_sets: s=new_marker_set('particle_4 geometry') marker_sets["particle_4 geometry"]=s s= marker_sets["particle_4 geometry"] mark=s.place_marker((10410.9, 8340.88, 4701.08), (0, 1, 0), 751.925) if "particle_5 geometry" not in marker_sets: s=new_marker_set('particle_5 geometry') marker_sets["particle_5 geometry"]=s s= marker_sets["particle_5 geometry"] mark=s.place_marker((11852.8, 9339.05, 5877.21), (0.7, 0.7, 0.7), 437.001) if "particle_6 geometry" not in marker_sets: s=new_marker_set('particle_6 geometry') marker_sets["particle_6 geometry"]=s s= marker_sets["particle_6 geometry"] mark=s.place_marker((10194.2, 9740.86, 6803.84), (0.7, 0.7, 0.7), 710.767) if "particle_7 geometry" not in marker_sets: s=new_marker_set('particle_7 geometry') marker_sets["particle_7 geometry"]=s s= marker_sets["particle_7 geometry"] mark=s.place_marker((10090.4, 11205.5, 7732.34), (0.7, 0.7, 0.7), 762.077) if "particle_8 geometry" not in marker_sets: s=new_marker_set('particle_8 geometry') marker_sets["particle_8 geometry"]=s s= marker_sets["particle_8 geometry"] mark=s.place_marker((8882.24, 11924.2, 8358.04), (0.7, 0.7, 0.7), 726.799) if "particle_9 geometry" not in marker_sets: s=new_marker_set('particle_9 geometry') marker_sets["particle_9 geometry"]=s s= marker_sets["particle_9 geometry"] mark=s.place_marker((7635.56, 12279.3, 9636.97), (0.7, 0.7, 0.7), 885.508) if "particle_10 geometry" not in marker_sets: s=new_marker_set('particle_10 geometry') marker_sets["particle_10 geometry"]=s s= marker_sets["particle_10 geometry"] mark=s.place_marker((6759.3, 12963.4, 8924.88), (0.7, 0.7, 0.7), 778.489) if "particle_11 geometry" not in marker_sets: s=new_marker_set('particle_11 geometry') marker_sets["particle_11 geometry"]=s s= marker_sets["particle_11 geometry"] mark=s.place_marker((7043.95, 14967.7, 9509.29), (0.7, 0.7, 0.7), 790.333) if "particle_12 geometry" not in marker_sets: s=new_marker_set('particle_12 geometry') marker_sets["particle_12 geometry"]=s s= marker_sets["particle_12 geometry"] mark=s.place_marker((7387.95, 16864.3, 10166), (0.7, 0.7, 0.7), 707.721) if "particle_13 geometry" not in marker_sets: s=new_marker_set('particle_13 geometry') marker_sets["particle_13 geometry"]=s s= marker_sets["particle_13 geometry"] mark=s.place_marker((8615.63, 15991.2, 9554.19), (0.7, 0.7, 0.7), 651.166) if "particle_14 geometry" not in marker_sets: s=new_marker_set('particle_14 geometry') marker_sets["particle_14 geometry"]=s s= marker_sets["particle_14 geometry"] mark=s.place_marker((7638.93, 15401.6, 10900.6), (0.7, 0.7, 0.7), 708.61) if "particle_15 geometry" not in marker_sets: s=new_marker_set('particle_15 geometry') marker_sets["particle_15 geometry"]=s s= marker_sets["particle_15 geometry"] mark=s.place_marker((7030.56, 14033.5, 11264.9), (0.7, 0.7, 0.7), 490.595) if "particle_16 geometry" not in marker_sets: s=new_marker_set('particle_16 geometry') marker_sets["particle_16 geometry"]=s s= marker_sets["particle_16 geometry"] mark=s.place_marker((7298.54, 12639, 11011.8), (0.7, 0.7, 0.7), 591.565) if "particle_17 geometry" not in marker_sets: s=new_marker_set('particle_17 geometry') marker_sets["particle_17 geometry"]=s s= marker_sets["particle_17 geometry"] mark=s.place_marker((7377.84, 11113.6, 10593.3), (0.7, 0.7, 0.7), 581.287) if "particle_18 geometry" not in marker_sets: s=new_marker_set('particle_18 geometry') marker_sets["particle_18 geometry"]=s s= marker_sets["particle_18 geometry"] mark=s.place_marker((9064.15, 10654.1, 10833.4), (0.7, 0.7, 0.7), 789.529) if "particle_19 geometry" not in marker_sets: s=new_marker_set('particle_19 geometry') marker_sets["particle_19 geometry"]=s s= marker_sets["particle_19 geometry"] mark=s.place_marker((9117.68, 9237.51, 11201), (0.7, 0.7, 0.7), 623.587) if "particle_20 geometry" not in marker_sets: s=new_marker_set('particle_20 geometry') marker_sets["particle_20 geometry"]=s s= marker_sets["particle_20 geometry"] mark=s.place_marker((8892.47, 7748.11, 12111.4), (0.7, 0.7, 0.7), 1083.56) if "particle_21 geometry" not in marker_sets: s=new_marker_set('particle_21 geometry') marker_sets["particle_21 geometry"]=s s= marker_sets["particle_21 geometry"] mark=s.place_marker((8556.57, 7579.98, 13732.6), (0.7, 0.7, 0.7), 504.258) if "particle_22 geometry" not in marker_sets: s=new_marker_set('particle_22 geometry') marker_sets["particle_22 geometry"]=s s= marker_sets["particle_22 geometry"] mark=s.place_marker((7858.09, 7718.07, 12492.5), (0.7, 0.7, 0.7), 805.519) if "particle_23 geometry" not in marker_sets: s=new_marker_set('particle_23 geometry') marker_sets["particle_23 geometry"]=s s= marker_sets["particle_23 geometry"] mark=s.place_marker((6390.36, 8288.43, 11059.2), (0.7, 0.7, 0.7), 631.708) if "particle_24 geometry" not in marker_sets: s=new_marker_set('particle_24 geometry') marker_sets["particle_24 geometry"]=s s= marker_sets["particle_24 geometry"] mark=s.place_marker((4571.48, 9148.2, 10292.4), (0.7, 0.7, 0.7), 805.942) if "particle_25 geometry" not in marker_sets: s=new_marker_set('particle_25 geometry') marker_sets["particle_25 geometry"]=s s= marker_sets["particle_25 geometry"] mark=s.place_marker((3657.57, 9580.24, 9994.96), (1, 0.7, 0), 672.697) if "particle_26 geometry" not in marker_sets: s=new_marker_set('particle_26 geometry') marker_sets["particle_26 geometry"]=s s= marker_sets["particle_26 geometry"] mark=s.place_marker((3494.73, 7639.96, 8087.53), (0.7, 0.7, 0.7), 797.863) if "particle_27 geometry" not in marker_sets: s=new_marker_set('particle_27 geometry') marker_sets["particle_27 geometry"]=s s= marker_sets["particle_27 geometry"] mark=s.place_marker((2468.41, 6440.6, 7131.85), (1, 0.7, 0), 735.682) if "particle_28 geometry" not in marker_sets: s=new_marker_set('particle_28 geometry') marker_sets["particle_28 geometry"]=s s= marker_sets["particle_28 geometry"] mark=s.place_marker((2987.7, 5287.5, 7334.11), (0.7, 0.7, 0.7), 602.14) if "particle_29 geometry" not in marker_sets: s=new_marker_set('particle_29 geometry') marker_sets["particle_29 geometry"]=s s= marker_sets["particle_29 geometry"] mark=s.place_marker((3407.73, 3135.39, 8080.85), (0.7, 0.7, 0.7), 954.796) if "particle_30 geometry" not in marker_sets: s=new_marker_set('particle_30 geometry') marker_sets["particle_30 geometry"]=s s= marker_sets["particle_30 geometry"] mark=s.place_marker((3119.14, 3696.72, 8011.85), (0.7, 0.7, 0.7), 1021.88) if "particle_31 geometry" not in marker_sets: s=new_marker_set('particle_31 geometry') marker_sets["particle_31 geometry"]=s s= marker_sets["particle_31 geometry"] mark=s.place_marker((2712.73, 3283.1, 6783.5), (0.7, 0.7, 0.7), 909.323) if "particle_32 geometry" not in marker_sets: s=new_marker_set('particle_32 geometry') marker_sets["particle_32 geometry"]=s s= marker_sets["particle_32 geometry"] mark=s.place_marker((2358.45, 1509.76, 5398.06), (0.7, 0.7, 0.7), 621.049) if "particle_33 geometry" not in marker_sets: s=new_marker_set('particle_33 geometry') marker_sets["particle_33 geometry"]=s s= marker_sets["particle_33 geometry"] mark=s.place_marker((2863.51, 1742.83, 4037.73), (0.7, 0.7, 0.7), 525.154) if "particle_34 geometry" not in marker_sets: s=new_marker_set('particle_34 geometry') marker_sets["particle_34 geometry"]=s s= marker_sets["particle_34 geometry"] mark=s.place_marker((4222.87, 1744.36, 3391.47), (0.7, 0.7, 0.7), 890.246) if "particle_35 geometry" not in marker_sets: s=new_marker_set('particle_35 geometry') marker_sets["particle_35 geometry"]=s s= marker_sets["particle_35 geometry"] mark=s.place_marker((5523.88, 866.114, 2536.52), (0.7, 0.7, 0.7), 671.216) if "particle_36 geometry" not in marker_sets: s=new_marker_set('particle_36 geometry') marker_sets["particle_36 geometry"]=s s= marker_sets["particle_36 geometry"] mark=s.place_marker((6959.71, -16.043, 2766.25), (0.7, 0.7, 0.7), 662.672) if "particle_37 geometry" not in marker_sets: s=new_marker_set('particle_37 geometry') marker_sets["particle_37 geometry"]=s s= marker_sets["particle_37 geometry"] mark=s.place_marker((6642.98, 130.584, 4334.08), (0.7, 0.7, 0.7), 646.682) if "particle_38 geometry" not in marker_sets: s=new_marker_set('particle_38 geometry') marker_sets["particle_38 geometry"]=s s= marker_sets["particle_38 geometry"] mark=s.place_marker((5159.75, -277.178, 4319.91), (0.7, 0.7, 0.7), 769.945) if "particle_39 geometry" not in marker_sets: s=new_marker_set('particle_39 geometry') marker_sets["particle_39 geometry"]=s s= marker_sets["particle_39 geometry"] mark=s.place_marker((4037.05, 1380.26, 4288.82), (0.7, 0.7, 0.7), 606.92) if "particle_40 geometry" not in marker_sets: s=new_marker_set('particle_40 geometry') marker_sets["particle_40 geometry"]=s s= marker_sets["particle_40 geometry"] mark=s.place_marker((3414.37, 1232.3, 3206.66), (0.7, 0.7, 0.7), 622.571) if "particle_41 geometry" not in marker_sets: s=new_marker_set('particle_41 geometry') marker_sets["particle_41 geometry"]=s s= marker_sets["particle_41 geometry"] mark=s.place_marker((3935.26, 2174.76, 4038.82), (0.7, 0.7, 0.7), 466.865) if "particle_42 geometry" not in marker_sets: s=new_marker_set('particle_42 geometry') marker_sets["particle_42 geometry"]=s s= marker_sets["particle_42 geometry"] mark=s.place_marker((4812.35, 2452.13, 3340.2), (0.7, 0.7, 0.7), 682.933) if "particle_43 geometry" not in marker_sets: s=new_marker_set('particle_43 geometry') marker_sets["particle_43 geometry"]=s s= marker_sets["particle_43 geometry"] mark=s.place_marker((3981.15, 2037.12, 3917.71), (0.7, 0.7, 0.7), 809.326) if "particle_44 geometry" not in marker_sets: s=new_marker_set('particle_44 geometry') marker_sets["particle_44 geometry"]=s s= marker_sets["particle_44 geometry"] mark=s.place_marker((2849.89, 2530.17, 5278.02), (0.7, 0.7, 0.7), 796.72) if "particle_45 geometry" not in marker_sets: s=new_marker_set('particle_45 geometry') marker_sets["particle_45 geometry"]=s s= marker_sets["particle_45 geometry"] mark=s.place_marker((2392.24, 5350.29, 5666.86), (0.7, 0.7, 0.7), 870.026) if "particle_46 geometry" not in marker_sets: s=new_marker_set('particle_46 geometry') marker_sets["particle_46 geometry"]=s s= marker_sets["particle_46 geometry"] mark=s.place_marker((1792.02, 6814.82, 4716.16), (0.7, 0.7, 0.7), 909.577) if "particle_47 geometry" not in marker_sets: s=new_marker_set('particle_47 geometry') marker_sets["particle_47 geometry"]=s s= marker_sets["particle_47 geometry"] mark=s.place_marker((1894.86, 7475.5, 3781.06), (0, 1, 0), 500.536) if "particle_48 geometry" not in marker_sets: s=new_marker_set('particle_48 geometry') marker_sets["particle_48 geometry"]=s s= marker_sets["particle_48 geometry"] mark=s.place_marker((1189.92, 7316.61, 1909.55), (0.7, 0.7, 0.7), 725.276) if "particle_49 geometry" not in marker_sets: s=new_marker_set('particle_49 geometry') marker_sets["particle_49 geometry"]=s s= marker_sets["particle_49 geometry"] mark=s.place_marker((-527.067, 6807.99, -82.1603), (0.7, 0.7, 0.7), 570.331) if "particle_50 geometry" not in marker_sets: s=new_marker_set('particle_50 geometry') marker_sets["particle_50 geometry"]=s s= marker_sets["particle_50 geometry"] mark=s.place_marker((-265.232, 5131.16, 122.854), (0.7, 0.7, 0.7), 492.203) if "particle_51 geometry" not in marker_sets: s=new_marker_set('particle_51 geometry') marker_sets["particle_51 geometry"]=s s= marker_sets["particle_51 geometry"] mark=s.place_marker((-577.465, 5072.89, 3003.4), (0, 1, 0), 547.7) if "particle_52 geometry" not in marker_sets: s=new_marker_set('particle_52 geometry') marker_sets["particle_52 geometry"]=s s= marker_sets["particle_52 geometry"] mark=s.place_marker((164.002, 4946.79, 2821.03), (0.7, 0.7, 0.7), 581.921) if "particle_53 geometry" not in marker_sets: s=new_marker_set('particle_53 geometry') marker_sets["particle_53 geometry"]=s s= marker_sets["particle_53 geometry"] mark=s.place_marker((1078.12, 3540.24, 1916.52), (0.7, 0.7, 0.7), 555.314) if "particle_54 geometry" not in marker_sets: s=new_marker_set('particle_54 geometry') marker_sets["particle_54 geometry"]=s s= marker_sets["particle_54 geometry"] mark=s.place_marker((2370.3, 2982.46, 1258.45), (0.7, 0.7, 0.7), 404.219) if "particle_55 geometry" not in marker_sets: s=new_marker_set('particle_55 geometry') marker_sets["particle_55 geometry"]=s s= marker_sets["particle_55 geometry"] mark=s.place_marker((3890.71, 3803.84, 1856.76), (0.7, 0.7, 0.7), 764.234) for k in surf_sets.keys(): chimera.openModels.add([surf_sets[k]])
gpl-3.0
jctanner/ansible
test/support/integration/plugins/modules/selogin.py
85
7779
#!/usr/bin/python # (c) 2017, Petr Lautrbach <plautrba@redhat.com> # Based on seport.py module (c) 2014, Dan Keder <dan.keder@gmail.com> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: selogin short_description: Manages linux user to SELinux user mapping description: - Manages linux user to SELinux user mapping version_added: "2.8" options: login: description: - a Linux user required: true seuser: description: - SELinux user name required: true selevel: aliases: [ serange ] description: - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range. default: s0 state: description: - Desired mapping value. required: true default: present choices: [ 'present', 'absent' ] reload: description: - Reload SELinux policy after commit. default: yes ignore_selinux_state: description: - Run independent of selinux runtime state type: bool default: false notes: - The changes are persistent across reboots - Not tested on any debian based system requirements: [ 'libselinux', 'policycoreutils' ] author: - Dan Keder (@dankeder) - Petr Lautrbach (@bachradsusi) - James Cassell (@jamescassell) ''' EXAMPLES = ''' # Modify the default user on the system to the guest_u user - selogin: login: __default__ seuser: guest_u state: present # Assign gijoe user on an MLS machine a range and to the staff_u user - selogin: login: gijoe seuser: staff_u serange: SystemLow-Secret state: present # Assign all users in the engineering group to the staff_u user - selogin: login: '%engineering' seuser: staff_u state: present ''' RETURN = r''' # Default return values ''' import traceback SELINUX_IMP_ERR = None try: import selinux HAVE_SELINUX = True except ImportError: SELINUX_IMP_ERR = traceback.format_exc() HAVE_SELINUX = False SEOBJECT_IMP_ERR = None try: import seobject HAVE_SEOBJECT = True except ImportError: SEOBJECT_IMP_ERR = traceback.format_exc() HAVE_SEOBJECT = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils._text import to_native def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=''): """ Add linux user to SELinux user mapping :type module: AnsibleModule :param module: Ansible module :type login: str :param login: a Linux User or a Linux group if it begins with % :type seuser: str :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l' :type serange: str :param serange: SELinux MLS/MCS range (defaults to 's0') :type do_reload: bool :param do_reload: Whether to reload SELinux policy after commit :type sestore: str :param sestore: SELinux store :rtype: bool :return: True if the policy was changed, otherwise False """ try: selogin = seobject.loginRecords(sestore) selogin.set_reload(do_reload) change = False all_logins = selogin.get_all() # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore)) # for local_login in all_logins: if login not in all_logins.keys(): change = True if not module.check_mode: selogin.add(login, seuser, serange) else: if all_logins[login][0] != seuser or all_logins[login][1] != serange: change = True if not module.check_mode: selogin.modify(login, seuser, serange) except (ValueError, KeyError, OSError, RuntimeError) as e: module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc()) return change def semanage_login_del(module, login, seuser, do_reload, sestore=''): """ Delete linux user to SELinux user mapping :type module: AnsibleModule :param module: Ansible module :type login: str :param login: a Linux User or a Linux group if it begins with % :type seuser: str :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l' :type do_reload: bool :param do_reload: Whether to reload SELinux policy after commit :type sestore: str :param sestore: SELinux store :rtype: bool :return: True if the policy was changed, otherwise False """ try: selogin = seobject.loginRecords(sestore) selogin.set_reload(do_reload) change = False all_logins = selogin.get_all() # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore)) if login in all_logins.keys(): change = True if not module.check_mode: selogin.delete(login) except (ValueError, KeyError, OSError, RuntimeError) as e: module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc()) return change def get_runtime_status(ignore_selinux_state=False): return True if ignore_selinux_state is True else selinux.is_selinux_enabled() def main(): module = AnsibleModule( argument_spec=dict( ignore_selinux_state=dict(type='bool', default=False), login=dict(type='str', required=True), seuser=dict(type='str'), selevel=dict(type='str', aliases=['serange'], default='s0'), state=dict(type='str', default='present', choices=['absent', 'present']), reload=dict(type='bool', default=True), ), required_if=[ ["state", "present", ["seuser"]] ], supports_check_mode=True ) if not HAVE_SELINUX: module.fail_json(msg=missing_required_lib("libselinux"), exception=SELINUX_IMP_ERR) if not HAVE_SEOBJECT: module.fail_json(msg=missing_required_lib("seobject from policycoreutils"), exception=SEOBJECT_IMP_ERR) ignore_selinux_state = module.params['ignore_selinux_state'] if not get_runtime_status(ignore_selinux_state): module.fail_json(msg="SELinux is disabled on this host.") login = module.params['login'] seuser = module.params['seuser'] serange = module.params['selevel'] state = module.params['state'] do_reload = module.params['reload'] result = { 'login': login, 'seuser': seuser, 'serange': serange, 'state': state, } if state == 'present': result['changed'] = semanage_login_add(module, login, seuser, do_reload, serange) elif state == 'absent': result['changed'] = semanage_login_del(module, login, seuser, do_reload) else: module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
xenserver/xs-cim
test/pywbem-tests/RequestStateChangeTest.py
1
11817
#!/usr/bin/env python '''Copyright (C) 2008 Citrix Systems Inc. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA ========================================================================= ''' import sys import pywbem import time import getpass import os from xen_cim_operations import * from TestSetUp import * ''' Exercises the RequestStateChange method of the Xen_ComputerSystem class. Allows caller to change the states of a VM (running, stopped etc) ''' class RequestStateChange(TestSetUp): def __init__(self, Ip, userName, password): TestSetUp.__init__(self, Ip, userName, password) self.testVM = self.GetTargetVM() association_class = 'Xen_ComputerSystemElementCapabilities' # association to traverse via Xen_ComputerSystem result_class = 'Xen_ComputerSystemCapabilities' # class we are looking for in_params = {'ResultClass': result_class, 'AssocClass': association_class } elements = self.conn.AssociatorNames(self.testVM, **in_params) self.supported_states = None for element in elements: element_inst = self.conn.GetInstance(element) self.supported_states = element_inst['RequestedStatesSupported'] ############################################################################## def state_is_supported (self, state): for item in self.supported_states: if item == state: print 'State %d is supported by VM.' % state return True print 'State %d is not supported by VM.' % state return False def getCurrentState(self, vm_ref): vm_inst = self.conn.GetInstance(vm_ref) return vm_inst['EnabledState'] def changeVMState_PowerOn(self): self.TestBegin() result = 1 if self.state_is_supported(2): self.VM_PowerOn() state = self.getCurrentState(self.testVM) result = 0 if (str(state) == '2'): result = 1 else: result = 0 self.TestEnd(result) def xen_tools_are_up(self, testVM): # get the IP address of the VM, whih is available only when the tools are up network_ports = GetNetworkPortsForVM (self.conn, testVM) for network_port in network_ports: inst = self.conn.GetInstance(network_port) if 'NetworkAddresses' in inst: if inst['NetworkAddresses'] != None: if len(inst['NetworkAddresses']) != 0: print 'found networkAddress: %s, xen-tools must be up and running...' % inst['NetworkAddresses'][0] break # finding one IP address is good enough def VM_PowerOn(self): state = self.getCurrentState(self.testVM) if (str(state) == '2'): print "Already started" time.sleep(5) else: print 'starting the VM' in_params = {'RequestedState':'2'} # Start the VM ChangeVMState(self.conn, self.testVM, in_params, True, '2') i = 1 while i < 12: time.sleep(5) # xen-tools services take a while to start up if (self.xen_tools_are_up(self.testVM)): break i = i + 1 def changeVMState_Shutdown(self): self.TestBegin() result = 1 if self.state_is_supported(4): self.VM_PowerOn() in_params = {'RequestedState':'4'} # safe shutdown ChangeVMState(self.conn, self.testVM, in_params) time.sleep(5) #state = self.getCurrentState(self.testVM) vm_inst = self.conn.GetInstance(self.testVM) result = 0 if (vm_inst['status'] == 'Stopped'): result = 1 else: result = 0 self.TestEnd(result) def changeVMState_Reboot(self): self.TestBegin() result = 1 if self.state_is_supported(10): self.VM_PowerOn() in_params = {'RequestedState':'10'} # safe reboot ChangeVMState(self.conn, self.testVM, in_params) time.sleep(5) state = self.getCurrentState(self.testVM) result = 0 if (str(state) == '2'): result = 1 else: result = 0 self.TestEnd(result) def changeVMState_Queisce(self): self.TestBegin() result = 1 if self.state_is_supported(9): self.VM_PowerOn() in_params = {'RequestedState':'9'} # pause the VM ChangeVMState(self.conn, self.testVM, in_params) time.sleep(5) state = self.getCurrentState(self.testVM) result = 0 if (str(state) == '9'): result = 1 else: result = 0 self.TestEnd(result) def changeVMState_Disabled(self): self.TestBegin() result = 1 if self.state_is_supported(3): self.VM_PowerOn() in_params = {'RequestedState':'3'} # shut it down ChangeVMState(self.conn, self.testVM, in_params) time.sleep(5) state = self.getCurrentState(self.testVM) result = 0 if (str(state) == '3'): result = 1 else: result = 0 self.TestEnd(result) def changeVMState_Reset(self): self.TestBegin() result = 1 if self.state_is_supported(11): self.VM_PowerOn() in_params = {'RequestedState':'11'} # hard shutdown/power reset ChangeVMState(self.conn, self.testVM, in_params) time.sleep(5) state = self.getCurrentState(self.testVM) result = 0 if (str(state) == '2'): result = 1 else: result = 0 self.TestEnd(result) def changeVMState_HardShutdown(self): self.TestBegin() result = 1 if self.state_is_supported(32768): self.VM_PowerOn() in_params = {'RequestedState':'32768'} # hard shutdown/power reset ChangeVMState(self.conn, self.testVM, in_params) time.sleep(5) state = self.getCurrentState(self.testVM) result = 0 if (str(state) != '2'): result = 1 else: result = 0 self.TestEnd(result) def changeVMState_HardReboot(self): self.TestBegin() result = 1 if self.state_is_supported(32769): self.VM_PowerOn() in_params = {'RequestedState':'32769'} # hard reboot ChangeVMState(self.conn, self.testVM, in_params) time.sleep(5) state = self.getCurrentState(self.testVM) result = 0 if (str(state) == '2'): result = 1 else: result = 0 self.TestEnd(result) def changeVMState_Defer(self): self.TestBegin() result = 1 if self.state_is_supported(8): self.VM_PowerOn() in_params = {'RequestedState':'8'} # not supported n = ChangeVMState(self.conn, self.testVM, in_params) time.sleep(5) state = self.getCurrentState(self.testVM) result = 0 if ((str(state) == '8') and (n == 1)): result = 1 else: result = 0 self.TestEnd(result) def changeVMState_Test(self): self.TestBegin() result = 1 if self.state_is_supported(7): self.VM_PowerOn() in_params = {'RequestedState':'7'} # not supported n = ChangeVMState(self.conn, self.testVM, in_params) time.sleep(5) result = 0 state = self.getCurrentState(self.testVM) if ((str(state) == '7') and (n == 1)): result = 1 else: result = 0 self.TestEnd(result) def changeVMState_Offline(self): self.TestBegin() result = 1 if self.state_is_supported(6): self.VM_PowerOn() in_params = {'RequestedState':'6'} # suspend n = ChangeVMState(self.conn, self.testVM, in_params) time.sleep(5) result = 0 state = self.getCurrentState(self.testVM) if ((str(state) == '6') and (n == 1)): result = 1 else: result = 0 self.TestEnd(result) def changeVMState_NoParams (self): self.TestBegin() result = 1 if self.state_is_supported(7): self.VM_PowerOn() in_params = {} n = ChangeVMState(self.conn, self.testVM, in_params) if n == 1: print 'Success returned while expecting failure' result = 1 self.TestEnd(result) def GetTargetVM(self): vssd = CIMInstance('Xen_ComputerSystemSettingData') vssd['ElementName'] = 'RequestStateChangeTestCommonTargetVM' vssd['Description'] = "VM to test state changes" vssd['Other_Config'] = ['HideFromXenCenter=false'] return CreateVMBasedOnTemplateName(self.conn, self.vsms[0], "XenServer Transfer VM", vssd) def LocalCleanup (self): print 'Deleting local VM' + str(self.testVM.items()) DeleteVM(self.conn, self.vsms[0], self.testVM) ######################################################## if __name__ == '__main__': #Ip = raw_input("Server IP Address: ") #username = raw_input("User Name: ") #password = getpass.getpass("Password: ") count = len(sys.argv[1:]) if (count != 3): print "Wrong arg count: Must pass Ip, username and password as arguments " print "Count is "+str(count) sys.exit(0) Ip = sys.argv[1] username = sys.argv[2] password = sys.argv[3] cd = RequestStateChange(Ip, username, password) try: # Exercises the states of the VM using the DMTF specified states. cd.changeVMState_PowerOn() # Power on a VM cd.changeVMState_Queisce() # Pause a VM cd.changeVMState_Disabled() # Disable a VM cd.changeVMState_Shutdown() # Shutdown a VM cd.changeVMState_Reboot() # Reboot the VM cd.changeVMState_Reset() # Reset the VM cd.changeVMState_Offline() # Suspend the VM cd.changeVMState_HardReboot() # Xen specific: HardReboot the VM cd.changeVMState_HardShutdown() # Xen specific: HardShutdown the VM #Error scenarios cd.changeVMState_Defer() # Deferred is not a state Xen supports cd.changeVMState_Test() # Test is not a state Xen supports cd.changeVMState_NoParams() # Test for erros when no parameters are sent print '' #+++++++++++++++++++++++++++++++++++++++ finally: cd.LocalCleanup() cd.TestCleanup() sys.exit(0)
lgpl-2.1
ebrehault/diazo
docs/conf.py
3
6987
# -*- coding: utf-8 -*- # # Diazo documentation build configuration file, created by # sphinx-quickstart on Tue Nov 2 18:58:07 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Diazo' copyright = u'2011, Plone Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.0b1' # The full version, including alpha/beta/rc tags. release = '1.0b1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'haiku' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = "Diazo" # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = images/logo.jpg # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Diazodoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Diazo.tex', u'Diazo Documentation', u'Plone Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'diazo', u'Diazo Documentation', [u'Plone Foundation'], 1) ]
bsd-3-clause
oxagast/hashnet
stratum/stratum/server.py
1
5375
def setup(setup_event=None): try: from twisted.internet import epollreactor epollreactor.install() except ImportError: print "Failed to install epoll reactor, default reactor will be used instead." try: import settings except ImportError: print "***** Is configs.py missing? Maybe you want to copy and customize config_default.py?" from twisted.application import service application = service.Application("stratum-server") # Setting up logging from twisted.python.log import ILogObserver, FileLogObserver from twisted.python.logfile import DailyLogFile #logfile = DailyLogFile(settings.LOGFILE, settings.LOGDIR) #application.setComponent(ILogObserver, FileLogObserver(logfile).emit) if settings.ENABLE_EXAMPLE_SERVICE: import stratum.example_service if setup_event == None: setup_finalize(None, application) else: setup_event.addCallback(setup_finalize, application) return application def setup_finalize(event, application): from twisted.application import service, internet from twisted.internet import reactor, ssl from twisted.web.server import Site from twisted.python import log #from twisted.enterprise import adbapi import OpenSSL.SSL from services import ServiceEventHandler import socket_transport import http_transport import websocket_transport import irc from stratum import settings try: import signature signing_key = signature.load_privkey_pem(settings.SIGNING_KEY) except: print "Loading of signing key '%s' failed, protocol messages cannot be signed." % settings.SIGNING_KEY signing_key = None # Attach HTTPS Poll Transport service to application try: sslContext = ssl.DefaultOpenSSLContextFactory(settings.SSL_PRIVKEY, settings.SSL_CACERT) except OpenSSL.SSL.Error: sslContext = None print "Cannot initiate SSL context, are SSL_PRIVKEY or SSL_CACERT missing?" print "This will skip all SSL-based transports." # Set up thread pool size for service threads reactor.suggestThreadPoolSize(settings.THREAD_POOL_SIZE) if settings.LISTEN_SOCKET_TRANSPORT: # Attach Socket Transport service to application socket = internet.TCPServer(settings.LISTEN_SOCKET_TRANSPORT, socket_transport.SocketTransportFactory(debug=settings.DEBUG, signing_key=signing_key, signing_id=settings.SIGNING_ID, event_handler=ServiceEventHandler, tcp_proxy_protocol_enable=settings.TCP_PROXY_PROTOCOL)) socket.setServiceParent(application) # Build the HTTP interface httpsite = Site(http_transport.Root(debug=settings.DEBUG, signing_key=signing_key, signing_id=settings.SIGNING_ID, event_handler=ServiceEventHandler)) httpsite.sessionFactory = http_transport.HttpSession if settings.LISTEN_HTTP_TRANSPORT: # Attach HTTP Poll Transport service to application http = internet.TCPServer(settings.LISTEN_HTTP_TRANSPORT, httpsite) http.setServiceParent(application) if settings.LISTEN_HTTPS_TRANSPORT and sslContext: https = internet.SSLServer(settings.LISTEN_HTTPS_TRANSPORT, httpsite, contextFactory = sslContext) https.setServiceParent(application) if settings.LISTEN_WS_TRANSPORT: from autobahn.websocket import listenWS log.msg("Starting WS transport on %d" % settings.LISTEN_WS_TRANSPORT) ws = websocket_transport.WebsocketTransportFactory(settings.LISTEN_WS_TRANSPORT, debug=settings.DEBUG, signing_key=signing_key, signing_id=settings.SIGNING_ID, event_handler=ServiceEventHandler) listenWS(ws) if settings.LISTEN_WSS_TRANSPORT and sslContext: from autobahn.websocket import listenWS log.msg("Starting WSS transport on %d" % settings.LISTEN_WSS_TRANSPORT) wss = websocket_transport.WebsocketTransportFactory(settings.LISTEN_WSS_TRANSPORT, is_secure=True, debug=settings.DEBUG, signing_key=signing_key, signing_id=settings.SIGNING_ID, event_handler=ServiceEventHandler) listenWS(wss, contextFactory=sslContext) if settings.IRC_NICK: reactor.connectTCP(settings.IRC_SERVER, settings.IRC_PORT, irc.IrcLurkerFactory(settings.IRC_ROOM, settings.IRC_NICK, settings.IRC_HOSTNAME)) return event if __name__ == '__main__': print "This is not executable script. Try 'twistd -ny launcher.tac instead!"
gpl-2.0
tjhei/burnman-original
burnman/equation_of_state.py
1
2790
# BurnMan - a lower mantle toolkit # Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S. # Released under GPL v2 or later. class equation_of_state: """ This class defines the interface for an equation of state that a mineral uses to determine its properties at a given P,T. In order define a new equation of state, you should define these functions. All functions should accept and return values in SI units. In general these functions are functions of pressure, temperature, and volume, as well as a "params" object, which stores the material parameters of the stuff, such as reference volume, Debye temperature, etc. The exceptions are volume and density, which are just assumed to be functions of pressure and temperature. """ def volume(self, pressure, temperature, params): """ Returns molar volume at the pressure and temperature [m^3] """ raise NotImplementedError("") def density(self, pressure, temperature, params): """ Returns density at the pressure and temperature [kg/m^3] """ return params["molar_mass"] / self.volume(pressure, temperature, params) def grueneisen_parameter(self, pressure, temperature, volume, params): """ Returns grueneisen parameter at the pressure, temperature, and volume """ raise NotImplementedError("") def isothermal_bulk_modulus(self, pressure, temperature, volume, params): """ Returns isothermal bulk modulus at the pressure, temperature, and volume [Pa] """ raise NotImplementedError("") def adiabatic_bulk_modulus(self, pressure, temperature, volume, params): """ Returns adiabatic bulk modulus at the pressure, temperature, and volume [Pa] """ raise NotImplementedError("") def shear_modulus(self, pressure, temperature, volume, params): """ Returns shear modulus at the pressure, temperature, and volume [Pa] """ raise NotImplementedError("") def heat_capacity_v(self, pressure, temperature, volume, params): """ Returns heat capacity at constant volume at the pressure, temperature, and volume [J/K/mol] """ raise NotImplementedError("") def heat_capacity_p(self, pressure, temperature, volume, params): """ Returns heat capacity at constant pressure at the pressure, temperature, and volume [J/K/mol] """ raise NotImplementedError("") def thermal_expansivity(self, pressure, temperature, volume, params): """ Returns thermal expansivity at the pressure, temperature, and volume [1/K] """ raise NotImplementedError("")
gpl-2.0
dot-project/dot
share/qt/make_spinner.py
4415
1035
#!/usr/bin/env python # W.J. van der Laan, 2011 # Make spinning .mng animation from a .png # Requires imagemagick 6.7+ from __future__ import division from os import path from PIL import Image from subprocess import Popen SRC='img/reload_scaled.png' DST='../../src/qt/res/movies/update_spinner.mng' TMPDIR='/tmp' TMPNAME='tmp-%03i.png' NUMFRAMES=35 FRAMERATE=10.0 CONVERT='convert' CLOCKWISE=True DSIZE=(16,16) im_src = Image.open(SRC) if CLOCKWISE: im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT) def frame_to_filename(frame): return path.join(TMPDIR, TMPNAME % frame) frame_files = [] for frame in xrange(NUMFRAMES): rotation = (frame + 0.5) / NUMFRAMES * 360.0 if CLOCKWISE: rotation = -rotation im_new = im_src.rotate(rotation, Image.BICUBIC) im_new.thumbnail(DSIZE, Image.ANTIALIAS) outfile = frame_to_filename(frame) im_new.save(outfile, 'png') frame_files.append(outfile) p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST]) p.communicate()
mit
mtils/ems
ems/converter/readers/dbdump.py
1
4101
''' Created on 24.10.2010 @author: michi ''' from xml.etree import ElementTree as et from ems.converter.inputreader import InputReader from ems.core.mimetype import MimeTypeDB from ems.core.mimetype import MimeType class DBDump(InputReader): ''' classdocs ''' def select(self,xpath): pathMode = None if xpath.startswith("//"): query = xpath[2:] pathMode = 'all' elif xpath.startswith("/"): query = xpath[1:] pathMode = 'abs' elif xpath.startswith('.'): query = xpath pathMode = 'rel' else: query = xpath pathMode = 'rel' # /: forward to root Node if query == '' and pathMode == 'abs': if self.__nextCount < 1: self.next() return self # //: iterate all elements if pathMode == 'all': if query != '': self.__nodeTest['query'] = "node-name(.)" self.__nodeTest['result'] = query return self if pathMode == 'rel': if query.endswith(')'): functionSplit = query.rstrip(')').split("(") func = functionSplit[0] par = functionSplit[1] elem = self.getNodeTestElement(par) if elem is not None: if func == 'node-name': return elem.tag else: nodePath = query[:query.rfind('/')] elem = self.getNodeTestElement(nodePath) if elem is not None: nodeQuery = query[query.rfind('/')+1:] if nodeQuery.startswith('@'): try: return elem.attrib[nodeQuery[1:]] except KeyError: return None def getNodeTestElement(self,nodeTest): stackLength = len(self.__elementStack) if nodeTest == '.': if stackLength > 0: return self.__elementStack[stackLength-1] return None if nodeTest == '..': if stackLength > 1: return self.__elementStack[stackLength-2] return None def getType(self): return self.file def getCurrentPosition(self): return self.currentIndex def notify(self,eventType): if eventType == self.startProcess: self.currentIndex = -1 self.__iterator = None self.__nextCount = 0 self.__elementStack = [] self.__nodeTest = {} super(DBDump, self).notify(eventType) def __iter__(self): return self def next(self): self.__nextCount += 1 event,node = self.__getIterator().next() if event == 'start': self.__elementStack.append(node) if self.__nodeTest.has_key('query'): result = self.select(self.__nodeTest['query']) if result != self.__nodeTest['result']: self.next() if event == 'end': self.__elementStack.pop() return self.next() self.currentIndex += 1 if self._plugin is not None: self._plugin.notifyProgress() return self def __getIterator(self): if self.__iterator is None: self.__iterator = et.iterparse(open(self.source),events=("start","end")) return self.__iterator def getSupportedMimeTypes(self): if not len(self.supportedMimeTypes): self.supportedMimeTypes = [] try: self.supportedMimeTypes.append(MimeTypeDB.get(suffix='.xml')) except KeyError: self.supportedMimeTypes.append(MimeType('text/xml',['.xml',])) return self.supportedMimeTypes def getFieldNames(self): return ["id","name"] def __len__(self): return 0
mit
amperser/proselint
tests/test_redundancy_misc.py
1
1445
"""Tests for redundancy.misc check.""" from __future__ import absolute_import from proselint.checks.redundancy import misc as chk from .check import Check class TestCheck(Check): """The test class for redundancy.misc.""" __test__ = True @property def this_check(self): """Boilerplate.""" return chk def test_smoke_check(self): """Basic smoke test for redundancy.misc.check.""" assert self.passes("""Smoke phrase with nothing flagged.""") assert not self.passes("""The table was rectangular in shape.""") def test_smoke_garner(self): """Basic smoke test for redundancy.misc.check_garner.""" assert chk.check_garner( """Smoke phrase with nothing flagged.""") == [] assert chk.check_garner( """It was blatantly obvious what to do next.""") != [] def test_smoke_nordquist(self): """Basic smoke test for redundancy.misc.check_norquist.""" assert chk.check_nordquist( """Smoke phrase with nothing flagged.""") == [] assert chk.check_nordquist( """Taking the package was absolutely essential.""") != [] def test_smoke_atd(self): """Basic smoke test for redundancy.misc.check_norquist.""" assert chk.check_atd( """Smoke phrase with nothing flagged.""") == [] assert chk.check_atd( """He often repeated the old adage.""") != []
bsd-3-clause
LumaPictures/rez
src/rez/vendor/pygraph/readwrite/dot.py
10
9068
# Copyright (c) 2007-2009 Pedro Matiello <pmatiello@gmail.com> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following # conditions: # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. """ Functions for reading and writing graphs in Dot language. @sort: read, read_hypergraph, write, write_hypergraph """ # Imports from rez.vendor.pygraph.classes.digraph import digraph from rez.vendor.pygraph.classes.exceptions import InvalidGraphType from rez.vendor.pygraph.classes.graph import graph from rez.vendor.pygraph.classes.hypergraph import hypergraph from rez.vendor.pydot import pydot # Values colors = ['aquamarine4', 'blue4', 'brown4', 'cornflowerblue', 'cyan4', 'darkgreen', 'darkorange3', 'darkorchid4', 'darkseagreen4', 'darkslategray', 'deeppink4', 'deepskyblue4', 'firebrick3', 'hotpink3', 'indianred3', 'indigo', 'lightblue4', 'lightseagreen', 'lightskyblue4', 'magenta4', 'maroon', 'palevioletred3', 'steelblue', 'violetred3'] def read(string): """ Read a graph from a string in Dot language and return it. Nodes and edges specified in the input will be added to the current graph. @type string: string @param string: Input string in Dot format specifying a graph. @rtype: graph @return: Graph """ dotG = pydot.graph_from_dot_data(string) if (dotG.get_type() == "graph"): G = graph() elif (dotG.get_type() == "digraph"): G = digraph() elif (dotG.get_type() == "hypergraph"): return read_hypergraph(string) else: raise InvalidGraphType # Read nodes... # Note: If the nodes aren't explicitly listed, they need to be for each_node in dotG.get_nodes(): G.add_node(each_node.get_name()) for each_attr_key, each_attr_val in each_node.get_attributes().items(): G.add_node_attribute(each_node.get_name(), (each_attr_key, each_attr_val)) # Read edges... for each_edge in dotG.get_edges(): # Check if the nodes have been added if not G.has_node(each_edge.get_source()): G.add_node(each_edge.get_source()) if not G.has_node(each_edge.get_destination()): G.add_node(each_edge.get_destination()) # See if there's a weight if 'weight' in each_edge.get_attributes().keys(): _wt = each_edge.get_attributes()['weight'] else: _wt = 1 # See if there is a label if 'label' in each_edge.get_attributes().keys(): _label = each_edge.get_attributes()['label'] else: _label = '' G.add_edge((each_edge.get_source(), each_edge.get_destination()), wt = _wt, label = _label) for each_attr_key, each_attr_val in each_edge.get_attributes().items(): if not each_attr_key in ['weight', 'label']: G.add_edge_attribute((each_edge.get_source(), each_edge.get_destination()), \ (each_attr_key, each_attr_val)) return G def write(G, weighted=False): """ Return a string specifying the given graph in Dot language. @type G: graph @param G: Graph. @type weighted: boolean @param weighted: Whether edges should be labelled with their weight. @rtype: string @return: String specifying the graph in Dot Language. """ dotG = pydot.Dot() if not 'name' in dir(G): dotG.set_name('graphname') else: dotG.set_name(G.name) if (isinstance(G, graph)): dotG.set_type('graph') directed = False elif (isinstance(G, digraph)): dotG.set_type('digraph') directed = True elif (isinstance(G, hypergraph)): return write_hypergraph(G) else: raise InvalidGraphType("Expected graph or digraph, got %s" % repr(G) ) for node in G.nodes(): attr_list = {} for attr in G.node_attributes(node): attr_list[str(attr[0])] = str(attr[1]) newNode = pydot.Node(str(node), **attr_list) dotG.add_node(newNode) # Pydot doesn't work properly with the get_edge, so we use # our own set to keep track of what's been added or not. seen_edges = set([]) for edge_from, edge_to in G.edges(): if (str(edge_from) + "-" + str(edge_to)) in seen_edges: continue if (not directed) and (str(edge_to) + "-" + str(edge_from)) in seen_edges: continue attr_list = {} for attr in G.edge_attributes((edge_from, edge_to)): attr_list[str(attr[0])] = str(attr[1]) if str(G.edge_label((edge_from, edge_to))): attr_list['label'] = str(G.edge_label((edge_from, edge_to))) elif weighted: attr_list['label'] = str(G.edge_weight((edge_from, edge_to))) if weighted: attr_list['weight'] = str(G.edge_weight((edge_from, edge_to))) newEdge = pydot.Edge(str(edge_from), str(edge_to), **attr_list) dotG.add_edge(newEdge) seen_edges.add(str(edge_from) + "-" + str(edge_to)) return dotG.to_string() def read_hypergraph(string): """ Read a hypergraph from a string in dot format. Nodes and edges specified in the input will be added to the current hypergraph. @type string: string @param string: Input string in dot format specifying a graph. @rtype: hypergraph @return: Hypergraph """ hgr = hypergraph() dotG = pydot.graph_from_dot_data(string) # Read the hypernode nodes... # Note 1: We need to assume that all of the nodes are listed since we need to know if they # are a hyperedge or a normal node # Note 2: We should read in all of the nodes before putting in the links for each_node in dotG.get_nodes(): if 'hypernode' == each_node.get('hyper_node_type'): hgr.add_node(each_node.get_name()) elif 'hyperedge' == each_node.get('hyper_node_type'): hgr.add_hyperedge(each_node.get_name()) # Now read in the links to connect the hyperedges for each_link in dotG.get_edges(): if hgr.has_node(each_link.get_source()): link_hypernode = each_link.get_source() link_hyperedge = each_link.get_destination() elif hgr.has_node(each_link.get_destination()): link_hypernode = each_link.get_destination() link_hyperedge = each_link.get_source() hgr.link(link_hypernode, link_hyperedge) return hgr def write_hypergraph(hgr, colored = False): """ Return a string specifying the given hypergraph in DOT Language. @type hgr: hypergraph @param hgr: Hypergraph. @type colored: boolean @param colored: Whether hyperedges should be colored. @rtype: string @return: String specifying the hypergraph in DOT Language. """ dotG = pydot.Dot() if not 'name' in dir(hgr): dotG.set_name('hypergraph') else: dotG.set_name(hgr.name) colortable = {} colorcount = 0 # Add all of the nodes first for node in hgr.nodes(): newNode = pydot.Node(str(node), hyper_node_type = 'hypernode') dotG.add_node(newNode) for hyperedge in hgr.hyperedges(): if (colored): colortable[hyperedge] = colors[colorcount % len(colors)] colorcount += 1 newNode = pydot.Node(str(hyperedge), hyper_node_type = 'hyperedge', \ color = str(colortable[hyperedge]), \ shape = 'point') else: newNode = pydot.Node(str(hyperedge), hyper_node_type = 'hyperedge') dotG.add_node(newNode) for link in hgr.links(hyperedge): newEdge = pydot.Edge(str(hyperedge), str(link)) dotG.add_edge(newEdge) return dotG.to_string()
lgpl-3.0
KingxBanana/zulip
zproject/wsgi.py
25
1415
""" WSGI config for zulip project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os from os.path import dirname, abspath import sys BASE_DIR = dirname(dirname(abspath(__file__))) sys.path.append(BASE_DIR) import scripts.lib.setup_path_on_import os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings") import django django.setup() # We need to call setup to load applications. # Because import_module does not correctly handle safe circular imports we # need to import zerver.models first before the middleware tries to import it. import zerver.models # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
apache-2.0
gauribhoite/personfinder
env/google_appengine/lib/django-1.5/django/contrib/gis/tests/utils.py
102
1378
from django.conf import settings from django.db import DEFAULT_DB_ALIAS # function that will pass a test. def pass_test(*args): return def no_backend(test_func, backend): "Use this decorator to disable test on specified backend." if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1] == backend: return pass_test else: return test_func # Decorators to disable entire test functions for specific # spatial backends. def no_oracle(func): return no_backend(func, 'oracle') def no_postgis(func): return no_backend(func, 'postgis') def no_mysql(func): return no_backend(func, 'mysql') def no_spatialite(func): return no_backend(func, 'spatialite') # Shortcut booleans to omit only portions of tests. _default_db = settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1] oracle = _default_db == 'oracle' postgis = _default_db == 'postgis' mysql = _default_db == 'mysql' spatialite = _default_db == 'spatialite' HAS_SPATIALREFSYS = True if oracle and 'gis' in settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE']: from django.contrib.gis.db.backends.oracle.models import SpatialRefSys elif postgis: from django.contrib.gis.db.backends.postgis.models import SpatialRefSys elif spatialite: from django.contrib.gis.db.backends.spatialite.models import SpatialRefSys else: HAS_SPATIALREFSYS = False SpatialRefSys = None
apache-2.0
andmos/ansible
lib/ansible/modules/files/blockinfile.py
18
11773
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2014, 2015 YAEGASHI Takeshi <yaegashi@debian.org> # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'core'} DOCUMENTATION = r''' --- module: blockinfile short_description: Insert/update/remove a text block surrounded by marker lines version_added: '2.0' description: - This module will insert/update/remove a block of multi-line text surrounded by customizable marker lines. author: - Yaegashi Takeshi (@yaegashi) options: path: description: - The file to modify. - Before Ansible 2.3 this option was only usable as I(dest), I(destfile) and I(name). type: path required: yes aliases: [ dest, destfile, name ] state: description: - Whether the block should be there or not. type: str choices: [ absent, present ] default: present marker: description: - The marker line template. - C({mark}) will be replaced with the values C(in marker_begin) (default="BEGIN") and C(marker_end) (default="END"). - Using a custom marker without the C({mark}) variable may result in the block being repeatedly inserted on subsequent playbook runs. type: str default: '# {mark} ANSIBLE MANAGED BLOCK' block: description: - The text to insert inside the marker lines. - If it is missing or an empty string, the block will be removed as if C(state) were specified to C(absent). type: str default: '' aliases: [ content ] insertafter: description: - If specified, the block will be inserted after the last match of specified regular expression. - A special value is available; C(EOF) for inserting the block at the end of the file. - If specified regular expression has no matches, C(EOF) will be used instead. type: str choices: [ EOF, '*regex*' ] default: EOF insertbefore: description: - If specified, the block will be inserted before the last match of specified regular expression. - A special value is available; C(BOF) for inserting the block at the beginning of the file. - If specified regular expression has no matches, the block will be inserted at the end of the file. type: str choices: [ BOF, '*regex*' ] create: description: - Create a new file if it does not exist. type: bool default: no backup: description: - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. type: bool default: no marker_begin: description: - This will be inserted at C({mark}) in the opening ansible block marker. type: str default: BEGIN version_added: '2.5' marker_end: required: false description: - This will be inserted at C({mark}) in the closing ansible block marker. type: str default: END version_added: '2.5' notes: - This module supports check mode. - When using 'with_*' loops be aware that if you do not set a unique mark the block will be overwritten on each iteration. - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well. - Option I(follow) has been removed in Ansible 2.5, because this module modifies the contents of the file so I(follow=no) doesn't make sense. - When more then one block should be handled in one file you must change the I(marker) per task. extends_documentation_fragment: - files - validate ''' EXAMPLES = r''' # Before Ansible 2.3, option 'dest' or 'name' was used instead of 'path' - name: Insert/Update "Match User" configuration block in /etc/ssh/sshd_config blockinfile: path: /etc/ssh/sshd_config block: | Match User ansible-agent PasswordAuthentication no - name: Insert/Update eth0 configuration stanza in /etc/network/interfaces (it might be better to copy files into /etc/network/interfaces.d/) blockinfile: path: /etc/network/interfaces block: | iface eth0 inet static address 192.0.2.23 netmask 255.255.255.0 - name: Insert/Update configuration using a local file and validate it blockinfile: block: "{{ lookup('file', './local/ssh_config') }}" dest: /etc/ssh/ssh_config backup: yes validate: /usr/sbin/sshd -T -f %s - name: Insert/Update HTML surrounded by custom markers after <body> line blockinfile: path: /var/www/html/index.html marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->" insertafter: "<body>" block: | <h1>Welcome to {{ ansible_hostname }}</h1> <p>Last updated on {{ ansible_date_time.iso8601 }}</p> - name: Remove HTML as well as surrounding markers blockinfile: path: /var/www/html/index.html marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->" block: "" - name: Add mappings to /etc/hosts blockinfile: path: /etc/hosts block: | {{ item.ip }} {{ item.name }} marker: "# {mark} ANSIBLE MANAGED BLOCK {{ item.name }}" with_items: - { name: host1, ip: 10.10.1.10 } - { name: host2, ip: 10.10.1.11 } - { name: host3, ip: 10.10.1.12 } ''' import re import os import tempfile from ansible.module_utils.six import b from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_bytes def write_changes(module, contents, path): tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir) f = os.fdopen(tmpfd, 'wb') f.write(contents) f.close() validate = module.params.get('validate', None) valid = not validate if validate: if "%s" not in validate: module.fail_json(msg="validate must contain %%s: %s" % (validate)) (rc, out, err) = module.run_command(validate % tmpfile) valid = rc == 0 if rc != 0: module.fail_json(msg='failed to validate: ' 'rc:%s error:%s' % (rc, err)) if valid: module.atomic_move(tmpfile, path, unsafe_writes=module.params['unsafe_writes']) def check_file_attrs(module, changed, message, diff): file_args = module.load_file_common_arguments(module.params) if module.set_file_attributes_if_different(file_args, False, diff=diff): if changed: message += " and " changed = True message += "ownership, perms or SE linux context changed" return message, changed def main(): module = AnsibleModule( argument_spec=dict( path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']), state=dict(type='str', default='present', choices=['absent', 'present']), marker=dict(type='str', default='# {mark} ANSIBLE MANAGED BLOCK'), block=dict(type='str', default='', aliases=['content']), insertafter=dict(type='str'), insertbefore=dict(type='str'), create=dict(type='bool', default=False), backup=dict(type='bool', default=False), validate=dict(type='str'), marker_begin=dict(type='str', default='BEGIN'), marker_end=dict(type='str', default='END'), ), mutually_exclusive=[['insertbefore', 'insertafter']], add_file_common_args=True, supports_check_mode=True ) params = module.params path = params['path'] if os.path.isdir(path): module.fail_json(rc=256, msg='Path %s is a directory !' % path) path_exists = os.path.exists(path) if not path_exists: if not module.boolean(params['create']): module.fail_json(rc=257, msg='Path %s does not exist !' % path) destpath = os.path.dirname(path) if not os.path.exists(destpath) and not module.check_mode: try: os.makedirs(destpath) except Exception as e: module.fail_json(msg='Error creating %s Error code: %s Error description: %s' % (destpath, e[0], e[1])) original = None lines = [] else: f = open(path, 'rb') original = f.read() f.close() lines = original.splitlines() diff = {'before': '', 'after': '', 'before_header': '%s (content)' % path, 'after_header': '%s (content)' % path} if module._diff and original: diff['before'] = original insertbefore = params['insertbefore'] insertafter = params['insertafter'] block = to_bytes(params['block']) marker = to_bytes(params['marker']) present = params['state'] == 'present' if not present and not path_exists: module.exit_json(changed=False, msg="File %s not present" % path) if insertbefore is None and insertafter is None: insertafter = 'EOF' if insertafter not in (None, 'EOF'): insertre = re.compile(to_bytes(insertafter, errors='surrogate_or_strict')) elif insertbefore not in (None, 'BOF'): insertre = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict')) else: insertre = None marker0 = re.sub(b(r'{mark}'), b(params['marker_begin']), marker) marker1 = re.sub(b(r'{mark}'), b(params['marker_end']), marker) if present and block: # Escape seqeuences like '\n' need to be handled in Ansible 1.x if module.ansible_version.startswith('1.'): block = re.sub('', block, '') blocklines = [marker0] + block.splitlines() + [marker1] else: blocklines = [] n0 = n1 = None for i, line in enumerate(lines): if line == marker0: n0 = i if line == marker1: n1 = i if None in (n0, n1): n0 = None if insertre is not None: for i, line in enumerate(lines): if insertre.search(line): n0 = i if n0 is None: n0 = len(lines) elif insertafter is not None: n0 += 1 elif insertbefore is not None: n0 = 0 # insertbefore=BOF else: n0 = len(lines) # insertafter=EOF elif n0 < n1: lines[n0:n1 + 1] = [] else: lines[n1:n0 + 1] = [] n0 = n1 lines[n0:n0] = blocklines if lines: result = b('\n').join(lines) if original is None or original.endswith(b('\n')): result += b('\n') else: result = b'' if module._diff: diff['after'] = result if original == result: msg = '' changed = False elif original is None: msg = 'File created' changed = True elif not blocklines: msg = 'Block removed' changed = True else: msg = 'Block inserted' changed = True if changed and not module.check_mode: if module.boolean(params['backup']) and path_exists: module.backup_local(path) # We should always follow symlinks so that we change the real file real_path = os.path.realpath(params['path']) write_changes(module, result, real_path) if module.check_mode and not path_exists: module.exit_json(changed=changed, msg=msg, diff=diff) attr_diff = {} msg, changed = check_file_attrs(module, changed, msg, attr_diff) attr_diff['before_header'] = '%s (file attributes)' % path attr_diff['after_header'] = '%s (file attributes)' % path difflist = [diff, attr_diff] module.exit_json(changed=changed, msg=msg, diff=difflist) if __name__ == '__main__': main()
gpl-3.0
TNT-Samuel/Coding-Projects
DNS Server/Source - Copy/Lib/site-packages/dask/bag/tests/test_bag.py
2
40727
# coding=utf-8 from __future__ import absolute_import, division, print_function import pytest import math import os import random import sys from collections import Iterator from itertools import repeat import partd from toolz import merge, join, filter, identity, valmap, groupby, pluck import dask import dask.bag as db from dask.bag.core import (Bag, lazify, lazify_task, map, collect, reduceby, reify, partition, inline_singleton_lists, optimize, from_delayed) from dask.bag.utils import assert_eq from dask.compatibility import BZ2File, GzipFile, PY2 from dask.delayed import Delayed from dask.utils import filetexts, tmpfile, tmpdir from dask.utils_test import inc, add dsk = {('x', 0): (range, 5), ('x', 1): (range, 5), ('x', 2): (range, 5)} L = list(range(5)) * 3 b = Bag(dsk, 'x', 3) def iseven(x): return x % 2 == 0 def isodd(x): return x % 2 == 1 def test_Bag(): assert b.name == 'x' assert b.npartitions == 3 def test_keys(): assert b.__dask_keys__() == sorted(dsk.keys()) def test_bag_map(): b = db.from_sequence(range(100), npartitions=10) b2 = db.from_sequence(range(100, 200), npartitions=10) x = b.compute() x2 = b2.compute() def myadd(a=1, b=2, c=3): return a + b + c assert db.map(myadd, b).compute() == list(map(myadd, x)) assert db.map(myadd, a=b).compute() == list(map(myadd, x)) assert db.map(myadd, b, b2).compute() == list(map(myadd, x, x2)) assert db.map(myadd, b, 10).compute() == [myadd(i, 10) for i in x] assert db.map(myadd, 10, b=b).compute() == [myadd(10, b=i) for i in x] sol = [myadd(i, b=j, c=100) for (i, j) in zip(x, x2)] assert db.map(myadd, b, b=b2, c=100).compute() == sol sol = [myadd(i, c=100) for (i, j) in zip(x, x2)] assert db.map(myadd, b, c=100).compute() == sol x_sum = sum(x) sol = [myadd(x_sum, b=i, c=100) for i in x2] assert db.map(myadd, b.sum(), b=b2, c=100).compute() == sol sol = [myadd(i, b=x_sum, c=100) for i in x2] assert db.map(myadd, b2, b.sum(), c=100).compute() == sol sol = [myadd(a=100, b=x_sum, c=i) for i in x2] assert db.map(myadd, a=100, b=b.sum(), c=b2).compute() == sol a = dask.delayed(10) assert db.map(myadd, b, a).compute() == [myadd(i, 10) for i in x] assert db.map(myadd, b, b=a).compute() == [myadd(i, b=10) for i in x] # Mispatched npartitions fewer_parts = db.from_sequence(range(100), npartitions=5) with pytest.raises(ValueError): db.map(myadd, b, fewer_parts) # No bags with pytest.raises(ValueError): db.map(myadd, b.sum(), 1, 2) # Unequal partitioning unequal = db.from_sequence(range(110), npartitions=10) with pytest.raises(ValueError): db.map(myadd, b, unequal, c=b2).compute() with pytest.raises(ValueError): db.map(myadd, b, b=unequal, c=b2).compute() def test_map_method(): b = db.from_sequence(range(100), npartitions=10) b2 = db.from_sequence(range(100, 200), npartitions=10) x = b.compute() x2 = b2.compute() def myadd(a, b=2, c=3): return a + b + c assert b.map(myadd).compute() == list(map(myadd, x)) assert b.map(myadd, b2).compute() == list(map(myadd, x, x2)) assert b.map(myadd, 10).compute() == [myadd(i, 10) for i in x] assert b.map(myadd, b=10).compute() == [myadd(i, b=10) for i in x] assert (b.map(myadd, b2, c=10).compute() == [myadd(i, j, 10) for (i, j) in zip(x, x2)]) x_sum = sum(x) assert (b.map(myadd, b.sum(), c=10).compute() == [myadd(i, x_sum, 10) for i in x]) # check that map works with multiarg functions. Can be removed after # deprecated behavior is removed assert b.map(add, b2).compute() == list(map(add, x, x2)) # check that map works with vararg functions. Can be removed after # deprecated behavior is removed def vararg_inc(*args): return inc(*args) assert_eq(b.map(vararg_inc), list(map(inc, x))) def test_starmap(): data = [(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)] b = db.from_sequence(data, npartitions=2) def myadd(a, b, c=0): return a + b + c assert b.starmap(myadd).compute() == [myadd(*a) for a in data] assert b.starmap(myadd, c=10).compute() == [myadd(*a, c=10) for a in data] max_second = b.pluck(1).max() assert (b.starmap(myadd, c=max_second).compute() == [myadd(*a, c=max_second.compute()) for a in data]) c = dask.delayed(10) assert b.starmap(myadd, c=c).compute() == [myadd(*a, c=10) for a in data] def test_filter(): c = b.filter(iseven) expected = merge(dsk, dict(((c.name, i), (reify, (filter, iseven, (b.name, i)))) for i in range(b.npartitions))) assert c.dask == expected assert c.name == b.filter(iseven).name def test_remove(): f = lambda x: x % 2 == 0 c = b.remove(f) assert list(c) == [1, 3] * 3 assert c.name == b.remove(f).name def test_iter(): assert sorted(list(b)) == sorted(L) assert sorted(list(b.map(inc))) == sorted(list(range(1, 6)) * 3) @pytest.mark.parametrize('func', [str, repr]) def test_repr(func): assert str(b.npartitions) in func(b) assert b.name[:5] in func(b) def test_pluck(): d = {('x', 0): [(1, 10), (2, 20)], ('x', 1): [(3, 30), (4, 40)]} b = Bag(d, 'x', 2) assert set(b.pluck(0)) == set([1, 2, 3, 4]) assert set(b.pluck(1)) == set([10, 20, 30, 40]) assert set(b.pluck([1, 0])) == set([(10, 1), (20, 2), (30, 3), (40, 4)]) assert b.pluck([1, 0]).name == b.pluck([1, 0]).name def test_pluck_with_default(): b = db.from_sequence(['Hello', '', 'World']) pytest.raises(IndexError, lambda: list(b.pluck(0))) assert list(b.pluck(0, None)) == ['H', None, 'W'] assert b.pluck(0, None).name == b.pluck(0, None).name assert b.pluck(0).name != b.pluck(0, None).name def test_unzip(): b = db.from_sequence(range(100)).map(lambda x: (x, x + 1, x + 2)) one, two, three = b.unzip(3) assert list(one) == list(range(100)) assert list(three) == [i + 2 for i in range(100)] assert one.name == b.unzip(3)[0].name assert one.name != two.name def test_fold(): c = b.fold(add) assert c.compute() == sum(L) assert c.key == b.fold(add).key c2 = b.fold(add, initial=10) assert c2.key != c.key assert c2.compute() == sum(L) + 10 * b.npartitions assert c2.key == b.fold(add, initial=10).key c = db.from_sequence(range(5), npartitions=3) def binop(acc, x): acc = acc.copy() acc.add(x) return acc d = c.fold(binop, set.union, initial=set()) assert d.compute() == set(c) assert d.key == c.fold(binop, set.union, initial=set()).key d = db.from_sequence('hello') assert set(d.fold(lambda a, b: ''.join([a, b]), initial='').compute()) == set('hello') e = db.from_sequence([[1], [2], [3]], npartitions=2) assert set(e.fold(add, initial=[]).compute(scheduler='sync')) == set([1, 2, 3]) def test_distinct(): assert sorted(b.distinct()) == [0, 1, 2, 3, 4] assert b.distinct().name == b.distinct().name assert 'distinct' in b.distinct().name assert b.distinct().count().compute() == 5 bag = db.from_sequence([0] * 50, npartitions=50) assert bag.filter(None).distinct().compute() == [] def test_frequencies(): c = b.frequencies() assert dict(c) == {0: 3, 1: 3, 2: 3, 3: 3, 4: 3} c2 = b.frequencies(split_every=2) assert dict(c2) == {0: 3, 1: 3, 2: 3, 3: 3, 4: 3} assert c.name == b.frequencies().name assert c.name != c2.name assert c2.name == b.frequencies(split_every=2).name # test bag with empty partitions b2 = db.from_sequence(range(20), partition_size=2) b2 = b2.filter(lambda x: x < 10) d = b2.frequencies() assert dict(d) == dict(zip(range(10), [1] * 10)) bag = db.from_sequence([0, 0, 0, 0], npartitions=4) bag2 = bag.filter(None).frequencies(split_every=2) assert_eq(bag2, []) def test_topk(): assert list(b.topk(4)) == [4, 4, 4, 3] c = b.topk(4, key=lambda x: -x) assert list(c) == [0, 0, 0, 1] c2 = b.topk(4, key=lambda x: -x, split_every=2) assert list(c2) == [0, 0, 0, 1] assert c.name != c2.name assert b.topk(4).name == b.topk(4).name @pytest.mark.parametrize('npartitions', [1, 2]) def test_topk_with_non_callable_key(npartitions): b = db.from_sequence([(1, 10), (2, 9), (3, 8)], npartitions=npartitions) assert list(b.topk(2, key=1)) == [(1, 10), (2, 9)] assert list(b.topk(2, key=0)) == [(3, 8), (2, 9)] assert b.topk(2, key=1).name == b.topk(2, key=1).name def test_topk_with_multiarg_lambda(): b = db.from_sequence([(1, 10), (2, 9), (3, 8)], npartitions=2) assert list(b.topk(2, key=lambda a, b: b)) == [(1, 10), (2, 9)] def test_lambdas(): assert list(b.map(lambda x: x + 1)) == list(b.map(inc)) def test_reductions(): assert int(b.count()) == 15 assert int(b.sum()) == 30 assert int(b.max()) == 4 assert int(b.min()) == 0 assert b.any().compute() is True assert b.all().compute() is False assert b.all().key == b.all().key assert b.all().key != b.any().key def test_reduction_names(): assert b.sum().name.startswith('sum') assert b.reduction(sum, sum).name.startswith('sum') assert any(isinstance(k, str) and k.startswith('max') for k in b.reduction(sum, max).dask) assert b.reduction(sum, sum, name='foo').name.startswith('foo') def test_tree_reductions(): b = db.from_sequence(range(12)) c = b.reduction(sum, sum, split_every=2) d = b.reduction(sum, sum, split_every=6) e = b.reduction(sum, sum, split_every=5) assert c.compute() == d.compute() == e.compute() assert len(c.dask) > len(d.dask) c = b.sum(split_every=2) d = b.sum(split_every=5) assert c.compute() == d.compute() assert len(c.dask) > len(d.dask) assert c.key != d.key assert c.key == b.sum(split_every=2).key assert c.key != b.sum().key @pytest.mark.parametrize('npartitions', [1, 3, 4]) def test_aggregation(npartitions): L = list(range(15)) b = db.range(15, npartitions=npartitions) assert_eq(b.mean(), sum(L) / len(L)) assert_eq(b.sum(), sum(L)) assert_eq(b.count(), len(L)) @pytest.mark.parametrize('npartitions', [1, 10]) def test_non_splittable_reductions(npartitions): np = pytest.importorskip('numpy') data = list(range(100)) c = db.from_sequence(data, npartitions=npartitions) assert_eq(c.mean(), np.mean(data)) assert_eq(c.std(), np.std(data)) def test_std(): assert_eq(b.std(), math.sqrt(2.0)) assert float(b.std()) == math.sqrt(2.0) def test_var(): assert_eq(b.var(), 2.0) assert float(b.var()) == 2.0 @pytest.mark.parametrize('transform', [ identity, dask.delayed, lambda x: db.from_sequence(x, npartitions=1) ]) def test_join(transform): other = transform([1, 2, 3]) c = b.join(other, on_self=isodd, on_other=iseven) assert_eq(c, list(join(iseven, [1, 2, 3], isodd, list(b)))) assert_eq(b.join(other, isodd), list(join(isodd, [1, 2, 3], isodd, list(b)))) assert c.name == b.join(other, on_self=isodd, on_other=iseven).name def test_foldby(): c = b.foldby(iseven, add, 0, add, 0) assert (reduceby, iseven, add, (b.name, 0), 0) in list(c.dask.values()) assert set(c) == set(reduceby(iseven, lambda acc, x: acc + x, L, 0).items()) assert c.name == b.foldby(iseven, add, 0, add, 0).name c = b.foldby(iseven, lambda acc, x: acc + x) assert set(c) == set(reduceby(iseven, lambda acc, x: acc + x, L, 0).items()) def test_foldby_tree_reduction(): dsk = list() for n in [1, 7, 32]: b = db.from_sequence(range(100), npartitions=n) c = b.foldby(iseven, add) dsk += [c] for m in [False, None, 2, 3]: d = b.foldby(iseven, add, split_every=m) e = b.foldby(iseven, add, 0, split_every=m) f = b.foldby(iseven, add, 0, add, split_every=m) g = b.foldby(iseven, add, 0, add, 0, split_every=m) dsk += [d,e,f,g] results = dask.compute(dsk) first = results[0] assert all([r == first for r in results]) def test_map_partitions(): assert list(b.map_partitions(len)) == [5, 5, 5] assert b.map_partitions(len).name == b.map_partitions(len).name assert b.map_partitions(lambda a: len(a) + 1).name != b.map_partitions(len).name def test_map_partitions_args_kwargs(): x = [random.randint(-100, 100) for i in range(100)] y = [random.randint(-100, 100) for i in range(100)] dx = db.from_sequence(x, npartitions=10) dy = db.from_sequence(y, npartitions=10) def maximum(x, y=0): y = repeat(y) if isinstance(y, int) else y return [max(a, b) for (a, b) in zip(x, y)] sol = maximum(x, y=10) assert db.map_partitions(maximum, dx, y=10).compute() == sol assert dx.map_partitions(maximum, y=10).compute() == sol assert dx.map_partitions(maximum, 10).compute() == sol sol = maximum(x, y) assert db.map_partitions(maximum, dx, dy).compute() == sol assert dx.map_partitions(maximum, y=dy).compute() == sol assert dx.map_partitions(maximum, dy).compute() == sol dy_mean = dy.mean().apply(int) sol = maximum(x, int(sum(y) / len(y))) assert dx.map_partitions(maximum, y=dy_mean).compute() == sol assert dx.map_partitions(maximum, dy_mean).compute() == sol dy_mean = dask.delayed(dy_mean) assert dx.map_partitions(maximum, y=dy_mean).compute() == sol assert dx.map_partitions(maximum, dy_mean).compute() == sol def test_random_sample_size(): """ Number of randomly sampled elements are in the expected range. """ a = db.from_sequence(range(1000), npartitions=5) # we expect a size of approx. 100, but leave large margins to avoid # random failures assert 10 < len(list(a.random_sample(0.1, 42))) < 300 def test_random_sample_prob_range(): """ Specifying probabilities outside the range [0, 1] raises ValueError. """ a = db.from_sequence(range(50), npartitions=5) with pytest.raises(ValueError): a.random_sample(-1) with pytest.raises(ValueError): a.random_sample(1.1) def test_random_sample_repeated_computation(): """ Repeated computation of a defined random sampling operation generates identical results. """ a = db.from_sequence(range(50), npartitions=5) b = a.random_sample(0.2) assert list(b) == list(b) # computation happens here def test_random_sample_different_definitions(): """ Repeatedly defining a random sampling operation yields different results upon computation if no random seed is specified. """ a = db.from_sequence(range(50), npartitions=5) assert list(a.random_sample(0.5)) != list(a.random_sample(0.5)) assert a.random_sample(0.5).name != a.random_sample(0.5).name def test_random_sample_random_state(): """ Sampling with fixed random seed generates identical results. """ a = db.from_sequence(range(50), npartitions=5) b = a.random_sample(0.5, 1234) c = a.random_sample(0.5, 1234) assert list(b) == list(c) def test_lazify_task(): task = (sum, (reify, (map, inc, [1, 2, 3]))) assert lazify_task(task) == (sum, (map, inc, [1, 2, 3])) task = (reify, (map, inc, [1, 2, 3])) assert lazify_task(task) == task a = (reify, (map, inc, (reify, (filter, iseven, 'y')))) b = (reify, (map, inc, (filter, iseven, 'y'))) assert lazify_task(a) == b f = lambda x: x def test_lazify(): a = {'x': (reify, (map, inc, (reify, (filter, iseven, 'y')))), 'a': (f, 'x'), 'b': (f, 'x')} b = {'x': (reify, (map, inc, (filter, iseven, 'y'))), 'a': (f, 'x'), 'b': (f, 'x')} assert lazify(a) == b def test_inline_singleton_lists(): inp = {'b': (list, 'a'), 'c': (f, 'b', 1)} out = {'c': (f, (list, 'a'), 1)} assert inline_singleton_lists(inp) == out out = {'c': (f, 'a', 1)} assert optimize(inp, ['c'], rename_fused_keys=False) == out inp = {'b': (list, 'a'), 'c': (f, 'b', 1), 'd': (f, 'b', 2)} assert inline_singleton_lists(inp) == inp inp = {'b': (4, 5)} # doesn't inline constants assert inline_singleton_lists(inp) == inp def test_take(): assert list(b.take(2)) == [0, 1] assert b.take(2) == (0, 1) assert isinstance(b.take(2, compute=False), Bag) def test_take_npartitions(): assert list(b.take(6, npartitions=2)) == [0, 1, 2, 3, 4, 0] assert b.take(6, npartitions=-1) == (0, 1, 2, 3, 4, 0) assert b.take(3, npartitions=-1) == (0, 1, 2) with pytest.raises(ValueError): b.take(1, npartitions=5) def test_take_npartitions_warn(): # Use single-threaded scheduler so warnings are properly captured in the # same process with dask.config.set(scheduler='sync'): with pytest.warns(UserWarning): b.take(100) with pytest.warns(UserWarning): b.take(7) with pytest.warns(None) as rec: b.take(7, npartitions=2) assert len(rec) == 0 with pytest.warns(None) as rec: b.take(7, warn=False) assert len(rec) == 0 def test_map_is_lazy(): from dask.bag.core import map assert isinstance(map(lambda x: x, [1, 2, 3]), Iterator) def test_can_use_dict_to_make_concrete(): assert isinstance(dict(b.frequencies()), dict) @pytest.mark.slow @pytest.mark.network @pytest.mark.skip(reason="Hangs") def test_from_url(): a = db.from_url(['http://google.com', 'http://github.com']) assert a.npartitions == 2 b = db.from_url('http://raw.githubusercontent.com/dask/dask/master/README.rst') assert b.npartitions == 1 assert b'Dask\n' in b.take(10) def test_read_text(): with filetexts({'a1.log': 'A\nB', 'a2.log': 'C\nD'}) as fns: assert (set(line.strip() for line in db.read_text(fns)) == set('ABCD')) assert (set(line.strip() for line in db.read_text('a*.log')) == set('ABCD')) pytest.raises(ValueError, lambda: db.read_text('non-existent-*-path')) def test_read_text_large(): with tmpfile() as fn: with open(fn, 'wb') as f: f.write(('Hello, world!' + os.linesep).encode() * 100) b = db.read_text(fn, blocksize=100) c = db.read_text(fn) assert len(b.dask) > 5 assert list(map(str, b.str.strip())) == list(map(str, c.str.strip())) d = db.read_text([fn], blocksize=100) assert list(b) == list(d) def test_read_text_encoding(): with tmpfile() as fn: with open(fn, 'wb') as f: f.write((u'你好!' + os.linesep).encode('gb18030') * 100) b = db.read_text(fn, blocksize=100, encoding='gb18030') c = db.read_text(fn, encoding='gb18030') assert len(b.dask) > 5 assert (list(b.str.strip().map(lambda x: x.encode('utf-8'))) == list(c.str.strip().map(lambda x: x.encode('utf-8')))) d = db.read_text([fn], blocksize=100, encoding='gb18030') assert list(b) == list(d) def test_read_text_large_gzip(): with tmpfile('gz') as fn: f = GzipFile(fn, 'wb') f.write(b'Hello, world!\n' * 100) f.close() with pytest.raises(ValueError): db.read_text(fn, blocksize=50, linedelimiter='\n') c = db.read_text(fn) assert c.npartitions == 1 @pytest.mark.slow @pytest.mark.network def test_from_s3(): # note we don't test connection modes with aws_access_key and # aws_secret_key because these are not on travis-ci pytest.importorskip('s3fs') five_tips = (u'total_bill,tip,sex,smoker,day,time,size\n', u'16.99,1.01,Female,No,Sun,Dinner,2\n', u'10.34,1.66,Male,No,Sun,Dinner,3\n', u'21.01,3.5,Male,No,Sun,Dinner,3\n', u'23.68,3.31,Male,No,Sun,Dinner,2\n') # test compressed data e = db.read_text('s3://tip-data/t*.gz', storage_options=dict(anon=True)) assert e.take(5) == five_tips # test multiple keys in bucket c = db.read_text(['s3://tip-data/tips.gz', 's3://tip-data/tips.json', 's3://tip-data/tips.csv'], storage_options=dict(anon=True)) assert c.npartitions == 3 def test_from_sequence(): b = db.from_sequence([1, 2, 3, 4, 5], npartitions=3) assert len(b.dask) == 3 assert set(b) == set([1, 2, 3, 4, 5]) def test_from_long_sequence(): L = list(range(1001)) b = db.from_sequence(L) assert set(b) == set(L) def test_product(): b2 = b.product(b) assert b2.npartitions == b.npartitions**2 assert set(b2) == set([(i, j) for i in L for j in L]) x = db.from_sequence([1, 2, 3, 4]) y = db.from_sequence([10, 20, 30]) z = x.product(y) assert set(z) == set([(i, j) for i in [1, 2, 3, 4] for j in [10, 20, 30]]) assert z.name != b2.name assert z.name == x.product(y).name def test_partition_collect(): with partd.Pickle() as p: partition(identity, range(6), 3, p) assert set(p.get(0)) == set([0, 3]) assert set(p.get(1)) == set([1, 4]) assert set(p.get(2)) == set([2, 5]) assert sorted(collect(identity, 0, p, '')) == [(0, [0]), (3, [3])] def test_groupby(): c = b.groupby(identity) result = dict(c) assert result == {0: [0, 0 ,0], 1: [1, 1, 1], 2: [2, 2, 2], 3: [3, 3, 3], 4: [4, 4, 4]} assert c.npartitions == b.npartitions assert c.name == b.groupby(identity).name assert c.name != b.groupby(lambda x: x + 1).name def test_groupby_with_indexer(): b = db.from_sequence([[1, 2, 3], [1, 4, 9], [2, 3, 4]]) result = dict(b.groupby(0)) assert valmap(sorted, result) == {1: [[1, 2, 3], [1, 4, 9]], 2: [[2, 3, 4]]} def test_groupby_with_npartitions_changed(): result = b.groupby(lambda x: x, npartitions=1) result2 = dict(result) assert result2 == {0: [0, 0 ,0], 1: [1, 1, 1], 2: [2, 2, 2], 3: [3, 3, 3], 4: [4, 4, 4]} assert result.npartitions == 1 def test_concat(): a = db.from_sequence([1, 2, 3]) b = db.from_sequence([4, 5, 6]) c = db.concat([a, b]) assert list(c) == [1, 2, 3, 4, 5, 6] assert c.name == db.concat([a, b]).name def test_flatten(): b = db.from_sequence([[1], [2, 3]]) assert list(b.flatten()) == [1, 2, 3] assert b.flatten().name == b.flatten().name def test_concat_after_map(): a = db.from_sequence([1, 2]) b = db.from_sequence([4, 5]) result = db.concat([a.map(inc), b]) assert list(result) == [2, 3, 4, 5] def test_args(): c = b.map(lambda x: x + 1) d = Bag(*c._args) assert list(c) == list(d) assert c.npartitions == d.npartitions def test_to_dataframe(): dd = pytest.importorskip('dask.dataframe') pd = pytest.importorskip('pandas') def check_parts(df, sol): assert all((p.dtypes == sol.dtypes).all() for p in dask.compute(*df.to_delayed())) dsk = {('test', 0): [(1, 2)], ('test', 1): [], ('test', 2): [(10, 20), (100, 200)]} b = Bag(dsk, 'test', 3) sol = pd.DataFrame(b.compute(), columns=['a', 'b']) # Elements are tuples df = b.to_dataframe() dd.utils.assert_eq(df, sol.rename(columns={'a': 0, 'b': 1}), check_index=False) df = b.to_dataframe(columns=['a', 'b']) dd.utils.assert_eq(df, sol, check_index=False) check_parts(df, sol) df = b.to_dataframe(meta=[('a', 'i8'), ('b', 'i8')]) dd.utils.assert_eq(df, sol, check_index=False) check_parts(df, sol) # Elements are dictionaries b = b.map(lambda x: dict(zip(['a', 'b'], x))) df = b.to_dataframe() dd.utils.assert_eq(df, sol, check_index=False) check_parts(df, sol) assert df._name == b.to_dataframe()._name # With metadata specified for meta in [sol, [('a', 'i8'), ('b', 'i8')]]: df = b.to_dataframe(meta=meta) dd.utils.assert_eq(df, sol, check_index=False) check_parts(df, sol) # Error to specify both columns and meta with pytest.raises(ValueError): b.to_dataframe(columns=['a', 'b'], meta=sol) # Inference fails if empty first partition b2 = b.filter(lambda x: x['a'] > 200) with pytest.raises(ValueError): b2.to_dataframe() # Single column b = b.pluck('a') sol = sol[['a']] df = b.to_dataframe(meta=sol) dd.utils.assert_eq(df, sol, check_index=False) check_parts(df, sol) # Works with iterators and tuples sol = pd.DataFrame({'a': range(100)}) b = db.from_sequence(range(100), npartitions=5) for f in [iter, tuple]: df = b.map_partitions(f).to_dataframe(meta=sol) dd.utils.assert_eq(df, sol, check_index=False) check_parts(df, sol) ext_open = [('gz', GzipFile), ('', open)] if not PY2: ext_open.append(('bz2', BZ2File)) @pytest.mark.parametrize('ext,myopen', ext_open) def test_to_textfiles(ext, myopen): b = db.from_sequence(['abc', '123', 'xyz'], npartitions=2) with tmpdir() as dir: c = b.to_textfiles(os.path.join(dir, '*.' + ext), compute=False) dask.compute(*c, scheduler='sync') assert os.path.exists(os.path.join(dir, '1.' + ext)) f = myopen(os.path.join(dir, '1.' + ext), 'rb') text = f.read() if hasattr(text, 'decode'): text = text.decode() assert 'xyz' in text f.close() def test_to_textfiles_name_function_preserves_order(): seq = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p'] b = db.from_sequence(seq, npartitions=16) with tmpdir() as dn: b.to_textfiles(dn) out = db.read_text(os.path.join(dn, "*"), encoding='ascii').map(str).map(str.strip).compute() assert seq == out @pytest.mark.skipif(sys.version_info[:2] == (3,3), reason="Python3.3 uses pytest2.7.2, w/o warns method") def test_to_textfiles_name_function_warn(): seq = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p'] a = db.from_sequence(seq, npartitions=16) with tmpdir() as dn: with pytest.warns(None): a.to_textfiles(dn, name_function=str) def test_to_textfiles_encoding(): b = db.from_sequence([u'汽车', u'苹果', u'天气'], npartitions=2) for ext, myopen in [('gz', GzipFile), ('bz2', BZ2File), ('', open)]: if ext == 'bz2' and PY2: continue with tmpdir() as dir: c = b.to_textfiles(os.path.join(dir, '*.' + ext), encoding='gb18030', compute=False) dask.compute(*c) assert os.path.exists(os.path.join(dir, '1.' + ext)) f = myopen(os.path.join(dir, '1.' + ext), 'rb') text = f.read() if hasattr(text, 'decode'): text = text.decode('gb18030') assert u'天气' in text f.close() def test_to_textfiles_inputs(): B = db.from_sequence(['abc', '123', 'xyz'], npartitions=2) with tmpfile() as a: with tmpfile() as b: B.to_textfiles([a, b]) assert os.path.exists(a) assert os.path.exists(b) with tmpdir() as dirname: B.to_textfiles(dirname) assert os.path.exists(dirname) assert os.path.exists(os.path.join(dirname, '0.part')) with pytest.raises(TypeError): B.to_textfiles(5) def test_to_textfiles_endlines(): b = db.from_sequence(['a', 'b', 'c'], npartitions=1) with tmpfile() as fn: for last_endline in False, True: b.to_textfiles([fn], last_endline=last_endline) with open(fn, 'r') as f: result = f.readlines() assert result == ['a\n', 'b\n', 'c\n' if last_endline else 'c'] def test_string_namespace(): b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'], npartitions=2) assert 'split' in dir(b.str) assert 'match' in dir(b.str) assert list(b.str.lower()) == ['alice smith', 'bob jones', 'charlie smith'] assert list(b.str.split(' ')) == [['Alice', 'Smith'], ['Bob', 'Jones'], ['Charlie', 'Smith']] assert list(b.str.match('*Smith')) == ['Alice Smith', 'Charlie Smith'] pytest.raises(AttributeError, lambda: b.str.sfohsofhf) assert b.str.match('*Smith').name == b.str.match('*Smith').name assert b.str.match('*Smith').name != b.str.match('*John').name def test_string_namespace_with_unicode(): b = db.from_sequence([u'Alice Smith', u'Bob Jones', 'Charlie Smith'], npartitions=2) assert list(b.str.lower()) == ['alice smith', 'bob jones', 'charlie smith'] def test_str_empty_split(): b = db.from_sequence([u'Alice Smith', u'Bob Jones', 'Charlie Smith'], npartitions=2) assert list(b.str.split()) == [['Alice', 'Smith'], ['Bob', 'Jones'], ['Charlie', 'Smith']] def test_map_with_iterator_function(): b = db.from_sequence([[1, 2, 3], [4, 5, 6]], npartitions=2) def f(L): for x in L: yield x + 1 c = b.map(f) assert list(c) == [[2, 3, 4], [5, 6, 7]] def test_ensure_compute_output_is_concrete(): b = db.from_sequence([1, 2, 3]) result = b.map(lambda x: x + 1).compute() assert not isinstance(result, Iterator) class BagOfDicts(db.Bag): def get(self, key, default=None): return self.map(lambda d: d.get(key, default)) def set(self, key, value): def setter(d): d[key] = value return d return self.map(setter) def test_bag_class_extend(): dictbag = BagOfDicts(*db.from_sequence([{'a': {'b': 'c'}}])._args) assert dictbag.get('a').get('b').compute()[0] == 'c' assert dictbag.get('a').set('d', 'EXTENSIBILITY!!!').compute()[0] == \ {'b': 'c', 'd': 'EXTENSIBILITY!!!'} assert isinstance(dictbag.get('a').get('b'), BagOfDicts) def test_gh715(): bin_data = u'\u20ac'.encode('utf-8') with tmpfile() as fn: with open(fn, 'wb') as f: f.write(bin_data) a = db.read_text(fn) assert a.compute()[0] == bin_data.decode('utf-8') def test_bag_compute_forward_kwargs(): x = db.from_sequence([1, 2, 3]).map(lambda a: a + 1) x.compute(bogus_keyword=10) def test_to_delayed(): b = db.from_sequence([1, 2, 3, 4, 5, 6], npartitions=3) a, b, c = b.map(inc).to_delayed() assert all(isinstance(x, Delayed) for x in [a, b, c]) assert b.compute() == [4, 5] b = db.from_sequence([1, 2, 3, 4, 5, 6], npartitions=3) t = b.sum().to_delayed() assert isinstance(t, Delayed) assert t.compute() == 21 def test_to_delayed_optimize_graph(): b = db.from_sequence([1, 2, 3, 4, 5, 6], npartitions=1) b2 = b.map(inc).map(inc).map(inc) [d] = b2.to_delayed() text = str(dict(d.dask)) assert text.count('reify') == 1 [d2] = b2.to_delayed(optimize_graph=False) assert dict(d2.dask) == dict(b2.dask) assert d.compute() == d2.compute() x = b2.sum() d = x.to_delayed() text = str(dict(d.dask)) assert text.count('reify') == 0 d2 = x.to_delayed(optimize_graph=False) assert dict(d2.dask) == dict(x.dask) assert d.compute() == d2.compute() [d] = b2.to_textfiles('foo.txt', compute=False) text = str(dict(d.dask)) assert text.count('reify') <= 0 def test_from_delayed(): from dask.delayed import delayed a, b, c = delayed([1, 2, 3]), delayed([4, 5, 6]), delayed([7, 8, 9]) bb = from_delayed([a, b, c]) assert bb.name == from_delayed([a, b, c]).name assert isinstance(bb, Bag) assert list(bb) == [1, 2, 3, 4, 5, 6, 7, 8, 9] asum_value = delayed(lambda X: sum(X))(a) asum_item = db.Item.from_delayed(asum_value) assert asum_value.compute() == asum_item.compute() == 6 def test_from_delayed_iterator(): from dask.delayed import delayed def lazy_records(n): return ({'operations': [1, 2]} for _ in range(n)) delayed_records = delayed(lazy_records, pure=False) bag = db.from_delayed([delayed_records(5) for _ in range(5)]) assert db.compute( bag.count(), bag.pluck('operations').count(), bag.pluck('operations').flatten().count(), scheduler='sync', ) == (25, 25, 50) def test_range(): for npartitions in [1, 7, 10, 28]: b = db.range(100, npartitions=npartitions) assert len(b.dask) == npartitions assert b.npartitions == npartitions assert list(b) == list(range(100)) @pytest.mark.parametrize("npartitions", [1, 7, 10, 28]) def test_zip(npartitions, hi=1000): evens = db.from_sequence(range(0, hi, 2), npartitions=npartitions) odds = db.from_sequence(range(1, hi, 2), npartitions=npartitions) pairs = db.zip(evens, odds) assert pairs.npartitions == npartitions assert list(pairs) == list(zip(range(0, hi, 2), range(1, hi, 2))) @pytest.mark.parametrize('nin', [1, 2, 7, 11, 23]) @pytest.mark.parametrize('nout', [1, 2, 5, 12, 23]) def test_repartition(nin, nout): b = db.from_sequence(range(100), npartitions=nin) c = b.repartition(npartitions=nout) assert c.npartitions == nout assert_eq(b, c) results = dask.get(c.dask, c.__dask_keys__()) assert all(results) def test_repartition_names(): b = db.from_sequence(range(100), npartitions=5) c = b.repartition(2) assert b.name != c.name d = b.repartition(20) assert b.name != c.name assert c.name != d.name c = b.repartition(5) assert b is c @pytest.mark.skipif('not db.core._implement_accumulate') def test_accumulate(): parts = [[1, 2, 3], [4, 5], [], [6, 7]] dsk = dict((('test', i), p) for (i, p) in enumerate(parts)) b = db.Bag(dsk, 'test', len(parts)) r = b.accumulate(add) assert r.name == b.accumulate(add).name assert r.name != b.accumulate(add, -1).name assert r.compute() == [1, 3, 6, 10, 15, 21, 28] assert b.accumulate(add, -1).compute() == [-1, 0, 2, 5, 9, 14, 20, 27] assert b.accumulate(add).map(inc).compute() == [2, 4, 7, 11, 16, 22, 29] b = db.from_sequence([1, 2, 3], npartitions=1) assert b.accumulate(add).compute() == [1, 3, 6] def test_groupby_tasks(): b = db.from_sequence(range(160), npartitions=4) out = b.groupby(lambda x: x % 10, max_branch=4, shuffle='tasks') partitions = dask.get(out.dask, out.__dask_keys__()) for a in partitions: for b in partitions: if a is not b: assert not set(pluck(0, a)) & set(pluck(0, b)) b = db.from_sequence(range(1000), npartitions=100) out = b.groupby(lambda x: x % 123, shuffle='tasks') assert len(out.dask) < 100**2 partitions = dask.get(out.dask, out.__dask_keys__()) for a in partitions: for b in partitions: if a is not b: assert not set(pluck(0, a)) & set(pluck(0, b)) b = db.from_sequence(range(10000), npartitions=345) out = b.groupby(lambda x: x % 2834, max_branch=24, shuffle='tasks') partitions = dask.get(out.dask, out.__dask_keys__()) for a in partitions: for b in partitions: if a is not b: assert not set(pluck(0, a)) & set(pluck(0, b)) def test_groupby_tasks_names(): b = db.from_sequence(range(160), npartitions=4) func = lambda x: x % 10 func2 = lambda x: x % 20 assert (set(b.groupby(func, max_branch=4, shuffle='tasks').dask) == set(b.groupby(func, max_branch=4, shuffle='tasks').dask)) assert (set(b.groupby(func, max_branch=4, shuffle='tasks').dask) != set(b.groupby(func, max_branch=2, shuffle='tasks').dask)) assert (set(b.groupby(func, max_branch=4, shuffle='tasks').dask) != set(b.groupby(func2, max_branch=4, shuffle='tasks').dask)) @pytest.mark.parametrize('size,npartitions,groups', [(1000, 20, 100), (12345, 234, 1042)]) def test_groupby_tasks_2(size, npartitions, groups): func = lambda x: x % groups b = db.range(size, npartitions=npartitions).groupby(func, shuffle='tasks') result = b.compute(scheduler='sync') assert dict(result) == groupby(func, range(size)) def test_groupby_tasks_3(): func = lambda x: x % 10 b = db.range(20, npartitions=5).groupby(func, shuffle='tasks', max_branch=2) result = b.compute(scheduler='sync') assert dict(result) == groupby(func, range(20)) # assert b.npartitions == 5 def test_to_textfiles_empty_partitions(): with tmpdir() as d: b = db.range(5, npartitions=5).filter(lambda x: x == 1).map(str) b.to_textfiles(os.path.join(d, '*.txt')) assert len(os.listdir(d)) == 5 def test_reduction_empty(): b = db.from_sequence(range(10), npartitions=100) assert_eq(b.filter(lambda x: x % 2 == 0).max(), 8) assert_eq(b.filter(lambda x: x % 2 == 0).min(), 0) @pytest.mark.parametrize('npartitions', [1, 2, 4]) def test_reduction_empty_aggregate(npartitions): b = db.from_sequence([0, 0, 0, 1], npartitions=npartitions).filter(None) assert_eq(b.min(split_every=2), 1) vals = db.compute(b.min(split_every=2), b.max(split_every=2), scheduler='sync') assert vals == (1, 1) with pytest.raises(ValueError): b = db.from_sequence([0, 0, 0, 0], npartitions=npartitions) b.filter(None).min(split_every=2).compute(scheduler='sync') class StrictReal(int): def __eq__(self, other): assert isinstance(other, StrictReal) return self.real == other.real def __ne__(self, other): assert isinstance(other, StrictReal) return self.real != other.real def test_reduction_with_non_comparable_objects(): b = db.from_sequence([StrictReal(x) for x in range(10)], partition_size=2) assert_eq(b.fold(max, max), StrictReal(9)) def test_reduction_with_sparse_matrices(): sp = pytest.importorskip('scipy.sparse') b = db.from_sequence([sp.csr_matrix([0]) for x in range(4)], partition_size=2) def sp_reduce(a, b): return sp.vstack([a, b]) assert b.fold(sp_reduce, sp_reduce).compute(scheduler='sync').shape == (4, 1) def test_empty(): list(db.from_sequence([])) == [] def test_bag_picklable(): from pickle import loads, dumps b = db.from_sequence(range(100)) b2 = loads(dumps(b)) assert b.compute() == b2.compute() s = b.sum() s2 = loads(dumps(s)) assert s.compute() == s2.compute() def test_msgpack_unicode(): b = db.from_sequence([{"a": 1}]).groupby("a") result = b.compute(scheduler='sync') assert dict(result) == {1: [{'a': 1}]} def test_bag_with_single_callable(): f = lambda: None b = db.from_sequence([f]) assert_eq(b, [f]) def test_optimize_fuse_keys(): x = db.range(10, npartitions=2) y = x.map(inc) z = y.map(inc) dsk = z.__dask_optimize__(z.dask, z.__dask_keys__()) assert not set(y.dask) & set(dsk) dsk = z.__dask_optimize__(z.dask, z.__dask_keys__(), fuse_keys=y.__dask_keys__()) assert all(k in dsk for k in y.__dask_keys__()) def test_reductions_are_lazy(): current = [None] def part(): for i in range(10): current[0] = i yield i def func(part): assert current[0] == 0 return sum(part) b = Bag({('foo', 0): part()}, 'foo', 1) res = b.reduction(func, sum) assert_eq(res, sum(range(10))) def test_repeated_groupby(): b = db.range(10, npartitions=4) c = b.groupby(lambda x: x % 3) assert valmap(len, dict(c)) == valmap(len, dict(c)) def test_temporary_directory(tmpdir): b = db.range(10, npartitions=4) with dask.config.set(temporary_directory=str(tmpdir)): b2 = b.groupby(lambda x: x % 2) b2.compute() assert any(fn.endswith('.partd') for fn in os.listdir(str(tmpdir))) def test_empty_bag(): b = db.from_sequence([]) assert_eq(b.map(inc).all(), True) assert_eq(b.map(inc).any(), False) assert_eq(b.map(inc).sum(), False) assert_eq(b.map(inc).count(), False) def test_bag_paths(): b = db.from_sequence(['abc', '123', 'xyz'], npartitions=2) assert b.to_textfiles('foo*') == ['foo0', 'foo1'] os.remove('foo0') os.remove('foo1')
gpl-3.0
Haifen/android_kernel_google_msm
tools/perf/python/twatch.py
7370
1334
#! /usr/bin/python # -*- python -*- # -*- coding: utf-8 -*- # twatch - Experimental use of the perf python interface # Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com> # # This application is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2. # # This application is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. import perf def main(): cpus = perf.cpu_map() threads = perf.thread_map() evsel = perf.evsel(task = 1, comm = 1, mmap = 0, wakeup_events = 1, watermark = 1, sample_id_all = 1, sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID) evsel.open(cpus = cpus, threads = threads); evlist = perf.evlist(cpus, threads) evlist.add(evsel) evlist.mmap() while True: evlist.poll(timeout = -1) for cpu in cpus: event = evlist.read_on_cpu(cpu) if not event: continue print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu, event.sample_pid, event.sample_tid), print event if __name__ == '__main__': main()
gpl-2.0
skoslowski/gnuradio
gr-qtgui/examples/pyqt_time_raster_b.py
3
2198
#!/usr/bin/env python # # Copyright 2012,2013,2015 Free Software Foundation, Inc. # # This file is part of GNU Radio # # SPDX-License-Identifier: GPL-3.0-or-later # # from __future__ import print_function from __future__ import unicode_literals from gnuradio import gr from gnuradio import blocks from gnuradio import blocks import sys try: from gnuradio import qtgui from PyQt5 import QtWidgets, Qt import sip except ImportError: print("Error: Program requires PyQt5 and gr-qtgui.") sys.exit(1) class dialog_box(QtWidgets.QWidget): def __init__(self, display): QtWidgets.QWidget.__init__(self, None) self.setWindowTitle('PyQt Test GUI') self.boxlayout = QtWidgets.QBoxLayout(QtWidgets.QBoxLayout.LeftToRight, self) self.boxlayout.addWidget(display, 1) self.resize(800, 500) class my_top_block(gr.top_block): def __init__(self): gr.top_block.__init__(self) self.qapp = QtWidgets.QApplication(sys.argv) data0 = 10*[0,] + 40*[1,0] + 10*[0,] data0 += 10*[0,] + 40*[0,1] + 10*[0,] data1 = 20*[0,] + [0,0,0,1,1,1,0,0,0,0] + 70*[0,] # Adjust these to change the layout of the plot. # Can be set to fractions. ncols = 100.25 nrows = 100 fs = 200 src0 = blocks.vector_source_b(data0, True) src1 = blocks.vector_source_b(data1, True) thr = blocks.throttle(gr.sizeof_char, 50000) head = blocks.head(gr.sizeof_char, 10000000) self.snk1 = qtgui.time_raster_sink_b(fs, nrows, ncols, [], [], "Time Raster Example", 2) self.connect(src0, thr, (self.snk1, 0)) self.connect(src1, (self.snk1, 1)) # Get the reference pointer to the SpectrumDisplayForm QWidget pyQt = self.snk1.pyqwidget() # Wrap the pointer as a PyQt SIP object # This can now be manipulated as a PyQt5.QtWidgets.QWidget pyWin = sip.wrapinstance(pyQt, QtWidgets.QWidget) self.main_box = dialog_box(pyWin) self.main_box.show() if __name__ == "__main__": tb = my_top_block(); tb.start() tb.qapp.exec_() tb.stop()
gpl-3.0
silly-wacky-3-town-toon/SOURCE-COD
toontown/toon/GroupPanel.py
1
18262
from direct.directnotify import DirectNotifyGlobal from toontown.toonbase import ToontownGlobals from toontown.toonbase import TTLocalizer from direct.gui.DirectGui import * from panda3d.core import * from panda3d.direct import * from direct.showbase import DirectObject from toontown.toon import ToonAvatarPanel from toontown.toontowngui import TTDialog from otp.nametag import NametagGlobals class GroupPanel(DirectObject.DirectObject): notify = DirectNotifyGlobal.directNotify.newCategory('GroupPanel') def __init__(self, boardingParty): self.boardingParty = boardingParty self.leaderId = self.boardingParty.getGroupLeader(localAvatar.doId) self.elevatorIdList = self.boardingParty.getElevatorIdList() self.frame = None self.confirmQuitDialog = None self.goButton = None self.destScrollList = None self.destFrame = None self.goingToLabel = None self.destIndexSelected = 0 self.__load() self.ignore('stickerBookEntered') self.accept('stickerBookEntered', self.__forceHide) self.ignore('stickerBookExited') self.accept('stickerBookExited', self.__forceShow) return def cleanup(self): base.setCellsAvailable(base.leftCells, 1) self.quitButton.destroy() self.hideButton.destroy() self.showButton.destroy() self.scrollList.destroy() if self.goButton: self.goButton.destroy() self.goButton = None if self.destScrollList: self.destScrollList.destroy() self.destScrollList = None if self.destFrame: self.destFrame.destroy() self.destFrame = None if self.goingToLabel: self.goingToLabel.destroy() self.goingToLabel = None if self.frame: self.frame.destroy() self.frame = None self.leaveButton = None self.boardingParty = None self.ignoreAll() return def __load(self): self.guiBg = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_groupListBg') self.__defineConstants() if self.boardingParty.maxSize == 4: bgImage = self.guiBg.find('**/tt_t_gui_brd_memberListTop_half') bgImageZPos = 0.14 frameZPos = -0.121442 quitButtonZPos = -0.019958 else: bgImage = self.guiBg.find('**/tt_t_gui_brd_memberListTop') bgImageZPos = 0 frameZPos = 0.0278943 quitButtonZPos = -0.30366 guiButtons = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_status') self.frame = DirectFrame(relief=None, parent=base.a2dLeftCenter, image=bgImage, image_scale=(0.5, 1, 0.5), image_pos=(0, 0, bgImageZPos), textMayChange=1, pos=(0.32, 0, frameZPos)) self.frameBounds = self.frame.getBounds() leaveButtonGui = loader.loadModel('phase_3.5/models/gui/tt_m_gui_brd_leaveBtn') leaveImageList = (leaveButtonGui.find('**/tt_t_gui_brd_leaveUp'), leaveButtonGui.find('**/tt_t_gui_brd_leaveDown'), leaveButtonGui.find('**/tt_t_gui_brd_leaveHover'), leaveButtonGui.find('**/tt_t_gui_brd_leaveUp')) self.leaderButtonImage = guiButtons.find('**/tt_t_gui_brd_statusLeader') self.availableButtonImage = guiButtons.find('**/tt_t_gui_brd_statusOn') self.battleButtonImage = guiButtons.find('**/tt_t_gui_brd_statusBattle') if localAvatar.doId == self.leaderId: quitText = TTLocalizer.QuitBoardingPartyLeader else: quitText = TTLocalizer.QuitBoardingPartyNonLeader self.disabledOrangeColor = Vec4(1, 0.5, 0.25, 0.9) self.quitButton = DirectButton(parent=self.frame, relief=None, image=leaveImageList, image_scale=0.065, command=self.__handleLeaveButton, text=('', quitText, quitText, ''), text_scale=0.06, text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), text_pos=(0.045, 0.0), text_align=TextNode.ALeft, pos=(0.223, 0, quitButtonZPos), image3_color=self.disabledOrangeColor) arrowGui = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_arrow') hideImageList = (arrowGui.find('**/tt_t_gui_brd_arrow_up'), arrowGui.find('**/tt_t_gui_brd_arrow_down'), arrowGui.find('**/tt_t_gui_brd_arrow_hover')) showImageList = (arrowGui.find('**/tt_t_gui_brd_arrow_up'), arrowGui.find('**/tt_t_gui_brd_arrow_down'), arrowGui.find('**/tt_t_gui_brd_arrow_hover')) self.hideButton = DirectButton(relief=None, parent=base.a2dLeftCenter, text_pos=(0, 0.15), text_scale=0.06, text_align=TextNode.ALeft, text_fg=Vec4(0, 0, 0, 1), text_shadow=Vec4(1, 1, 1, 1), image=hideImageList, image_scale=(-0.35, 1, 0.5), pos=(0.025, 0, 0.03), scale=1.05, command=self.hide) self.showButton = DirectButton(relief=None, parent=base.a2dLeftCenter, text=('', TTLocalizer.BoardingGroupShow, TTLocalizer.BoardingGroupShow), text_pos=(0.03, 0), text_scale=0.06, text_align=TextNode.ALeft, text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), image=showImageList, image_scale=(0.35, 1, 0.5), pos=(0.025, 0, 0.03), scale=1.05, command=self.show) self.showButton.hide() self.frame.show() self.__makeAvatarNameScrolledList() if localAvatar.doId == self.leaderId: self.__makeDestinationScrolledList() else: self.__makeDestinationFrame() self.__makeGoingToLabel() self.accept('updateGroupStatus', self.__checkGroupStatus) self.accept('ToonBattleIdUpdate', self.__possibleGroupUpdate) base.setCellsAvailable([base.leftCells[1], base.leftCells[2]], 0) if self.boardingParty.isGroupLeader(localAvatar.doId): base.setCellsAvailable([base.leftCells[0]], 0) self.__addTestNames(self.boardingParty.maxSize) self.guiBg.removeNode() guiButtons.removeNode() leaveButtonGui.removeNode() arrowGui.removeNode() return def __defineConstants(self): self.forcedHidden = False self.textFgcolor = Vec4(0.0, 0.6, 0.2, 1.0) # Ripped strait from hacky-boss-battles self.textBgRolloverColor = Vec4(1, 1, 0, 1) self.textBgDownColor = Vec4(0.5, 0.9, 1, 1) self.textBgDisabledColor = Vec4(0.4, 0.8, 0.4, 1) def __handleLeaveButton(self): messenger.send('wakeup') if not base.cr.playGame.getPlace().getState() == 'elevator': self.confirmQuitDialog = TTDialog.TTDialog(style=TTDialog.YesNo, text=TTLocalizer.QuitBoardingPartyConfirm, command=self.__confirmQuitCallback) self.confirmQuitDialog.show() def __confirmQuitCallback(self, value): if self.confirmQuitDialog: self.confirmQuitDialog.destroy() self.confirmQuitDialog = None if value > 0: if self.boardingParty: self.boardingParty.requestLeave() return def __handleGoButton(self): offset = self.destScrollList.getSelectedIndex() elevatorId = self.elevatorIdList[offset] self.boardingParty.requestGoToFirstTime(elevatorId) def __handleCancelGoButton(self): self.boardingParty.cancelGoToElvatorDest() def __checkGroupStatus(self): if not self.boardingParty: return self.notify.debug('__checkGroupStatus %s' % self.boardingParty.getGroupMemberList(localAvatar.doId)) myMemberList = self.boardingParty.getGroupMemberList(localAvatar.doId) self.scrollList.removeAndDestroyAllItems(refresh=0) if myMemberList: for avId in myMemberList: avatarButton = self.__getAvatarButton(avId) if avatarButton: self.scrollList.addItem(avatarButton, refresh=0) self.scrollList.refresh() def __possibleGroupUpdate(self, avId): self.notify.debug('GroupPanel __possibleGroupUpdate') if not self.boardingParty: return myMemberList = self.boardingParty.getGroupMemberList(localAvatar.doId) if avId in myMemberList: self.__checkGroupStatus() def __makeAvatarNameScrolledList(self): friendsListGui = loader.loadModel('phase_3.5/models/gui/friendslist_gui') self.scrollList = DirectScrolledList(parent=self.frame, relief=None, incButton_image=(friendsListGui.find('**/FndsLst_ScrollUp'), friendsListGui.find('**/FndsLst_ScrollDN'), friendsListGui.find('**/FndsLst_ScrollUp_Rllvr'), friendsListGui.find('**/FndsLst_ScrollUp')), incButton_pos=(0.0, 0.0, -0.35), incButton_image1_color=Vec4(1.0, 0.9, 0.4, 0), incButton_image3_color=Vec4(1.0, 1.0, 0.6, 0), incButton_scale=(1.0, 1.0, -1.0), incButton_relief=None, decButton_image=(friendsListGui.find('**/FndsLst_ScrollUp'), friendsListGui.find('**/FndsLst_ScrollDN'), friendsListGui.find('**/FndsLst_ScrollUp_Rllvr'), friendsListGui.find('**/FndsLst_ScrollUp')), decButton_pos=(0.0, 0.0, 0.1), decButton_image1_color=Vec4(1.0, 1.0, 0.6, 0), decButton_image3_color=Vec4(1.0, 1.0, 0.6, 0), decButton_relief=None, itemFrame_pos=(-0.195, 0.0, 0.185), itemFrame_borderWidth=(0.1, 0.1), numItemsVisible=8, itemFrame_scale=1.0, forceHeight=0.07, items=[], pos=(0, 0, 0.075)) clipper = PlaneNode('clipper') clipper.setPlane(Plane(Vec3(-1, 0, 0), Point3(0.235, 0, 0))) clipNP = self.scrollList.attachNewNode(clipper) self.scrollList.setClipPlane(clipNP) friendsListGui.removeNode() return def __makeDestinationScrolledList(self): arrowGui = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_gotoArrow') incrementImageList = (arrowGui.find('**/tt_t_gui_brd_arrowL_gotoUp'), arrowGui.find('**/tt_t_gui_brd_arrowL_gotoDown'), arrowGui.find('**/tt_t_gui_brd_arrowL_gotoHover'), arrowGui.find('**/tt_t_gui_brd_arrowL_gotoUp')) if self.boardingParty.maxSize == 4: zPos = -0.177083 else: zPos = -0.463843 bottomImage = self.guiBg.find('**/tt_t_gui_brd_memberListBtm_leader') self.destScrollList = DirectScrolledList( parent=self.frame, relief=None, image=bottomImage, image_scale=(0.5, 1, 0.5), incButton_image=incrementImageList, incButton_pos=(0.217302, 0, 0.07), incButton_image3_color=Vec4(1.0, 1.0, 0.6, 0.5), incButton_scale=(-0.5, 1, 0.5), incButton_relief=None, incButtonCallback=self.__informDestChange, decButton_image=incrementImageList, decButton_pos=(-0.217302, 0, 0.07), decButton_scale=(0.5, 1, 0.5), decButton_image3_color=Vec4(1.0, 1.0, 0.6, 0.5), decButton_relief=None, decButtonCallback=self.__informDestChange, itemFrame_pos=(0, 0, 0.06), itemFrame_borderWidth=(0.1, 0.1), numItemsVisible=1, itemFrame_scale=TTLocalizer.GPdestScrollList, forceHeight=0.07, items=[], pos=(0, 0, zPos), scrollSpeed=0.1) arrowGui.removeNode() self.__addDestNames() self.__makeGoButton() return def __addDestNames(self): for i in xrange(len(self.elevatorIdList)): destName = self.__getDestName(i) self.destScrollList.addItem(destName, refresh=0) self.destScrollList.refresh() def __getDestName(self, offset): elevatorId = self.elevatorIdList[offset] elevator = base.cr.doId2do.get(elevatorId) if elevator: destName = elevator.getDestName() return destName def __makeDestinationFrame(self): destName = self.__getDestName(self.destIndexSelected) if self.boardingParty.maxSize == 4: zPos = -0.12 else: zPos = -0.404267 bottomImage = self.guiBg.find('**/tt_t_gui_brd_memberListBtm_nonLeader') self.destFrame = DirectFrame(parent=self.frame, relief=None, image=bottomImage, image_scale=(0.5, 1, 0.5), text=destName, text_align=TextNode.ACenter, text_scale=TTLocalizer.GPdestFrame, pos=(0, 0, zPos)) return def __makeGoButton(self): goGui = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_gotoBtn') self.goImageList = (goGui.find('**/tt_t_gui_brd_gotoUp'), goGui.find('**/tt_t_gui_brd_gotoDown'), goGui.find('**/tt_t_gui_brd_gotoHover'), goGui.find('**/tt_t_gui_brd_gotoUp')) self.cancelGoImageList = (goGui.find('**/tt_t_gui_brd_cancelGotoUp'), goGui.find('**/tt_t_gui_brd_cancelGotoDown'), goGui.find('**/tt_t_gui_brd_cancelGotoHover'), goGui.find('**/tt_t_gui_brd_cancelGotoUp')) if self.boardingParty.maxSize == 4: zPos = -0.028 zPos = -0.0360483 else: zPos = -0.0353787 self.goButton = DirectButton(parent=self.destScrollList, relief=None, image=self.goImageList, image_scale=(0.48, 1, 0.48), command=self.__handleGoButton, text=('', TTLocalizer.BoardingGo, TTLocalizer.BoardingGo, ''), text_scale=TTLocalizer.GPgoButton, text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), text_pos=(0, -0.12), pos=(-0.003, 0, zPos)) goGui.removeNode() return def __getAvatarButton(self, avId): toon = base.cr.doId2do.get(avId) if not toon: return None toonName = toon.getName() inBattle = 0 buttonImage = self.availableButtonImage if toon.battleId: inBattle = 1 buttonImage = self.battleButtonImage if avId == localAvatar.doId: self.__forceHide() else: if avId == self.leaderId: buttonImage = self.leaderButtonImage if avId == localAvatar.doId: self.__forceShow() return DirectButton(parent=self.frame, relief=None, image=buttonImage, image_scale=(0.06, 1.0, 0.06), text=toonName, text_align=TextNode.ALeft, text_wordwrap=16, text_scale=0.04, text_pos=(0.05, -0.015), text_fg=self.textFgcolor, text1_bg=self.textBgDownColor, text2_bg=self.textBgRolloverColor, text3_fg=self.textBgDisabledColor, pos=(0, 0, 0.2), command=self.__openToonAvatarPanel, extraArgs=[toon, avId]) def __openToonAvatarPanel(self, avatar, avId): if avId != localAvatar.doId and avatar: messenger.send('clickedNametag', [avatar]) def __addTestNames(self, num): for i in xrange(num): avatarButton = self.__getAvatarButton(localAvatar.doId) self.scrollList.addItem(avatarButton, refresh=0) self.scrollList.refresh() def __isForcedHidden(self): if self.forcedHidden and self.frame.isHidden(): return True else: return False def hide(self): self.frame.hide() self.hideButton.hide() self.showButton.show() def show(self): self.frame.show() self.forcedHidden = False self.showButton.hide() self.hideButton.show() def __forceHide(self): if not self.frame.isHidden(): self.forcedHidden = True self.hide() def __forceShow(self): if self.__isForcedHidden(): self.show() def __informDestChange(self): self.boardingParty.informDestChange(self.destScrollList.getSelectedIndex()) def changeDestination(self, offset): if localAvatar.doId != self.leaderId: self.destIndexSelected = offset if self.destFrame: self.destFrame['text'] = self.__getDestName(self.destIndexSelected) def scrollToDestination(self, offset): if localAvatar.doId == self.leaderId: if self.destScrollList: self.destIndexSelected = offset self.destScrollList.scrollTo(offset) def __makeGoingToLabel(self): if self.boardingParty.maxSize == 4: zPos = -0.0466546 else: zPos = -0.331731 self.goingToLabel = DirectLabel(parent=self.frame, relief=None, text=TTLocalizer.BoardingGoingTo, text_scale=0.045, text_align=TextNode.ALeft, text_fg=Vec4(0, 0, 0, 1), pos=(-0.1966, 0, zPos)) return def disableQuitButton(self): if self.quitButton and not self.quitButton.isEmpty(): self.quitButton['state'] = DGG.DISABLED def enableQuitButton(self): if self.quitButton and not self.quitButton.isEmpty(): self.quitButton['state'] = DGG.NORMAL def disableGoButton(self): if self.goButton and not self.goButton.isEmpty(): self.goButton['state'] = DGG.DISABLED self.goButton['image_color'] = Vec4(1, 1, 1, 0.4) def enableGoButton(self): if self.goButton and not self.goButton.isEmpty(): self.goButton['state'] = DGG.NORMAL self.goButton['image_color'] = Vec4(1, 1, 1, 1) def disableDestinationScrolledList(self): if self.destScrollList and not self.destScrollList.isEmpty(): self.destScrollList.incButton['state'] = DGG.DISABLED self.destScrollList.decButton['state'] = DGG.DISABLED def enableDestinationScrolledList(self): if self.destScrollList and not self.destScrollList.isEmpty(): self.destScrollList.incButton['state'] = DGG.NORMAL self.destScrollList.decButton['state'] = DGG.NORMAL def changeGoToCancel(self): if self.goButton and not self.goButton.isEmpty(): self.goButton['image'] = self.cancelGoImageList self.goButton['text'] = (TTLocalizer.BoardingCancelGo, TTLocalizer.BoardingCancelGo, TTLocalizer.BoardingCancelGo, '') self.goButton['command'] = self.__handleCancelGoButton def changeCancelToGo(self): if self.goButton and not self.goButton.isEmpty(): self.goButton['image'] = self.goImageList self.goButton['text'] = ('', TTLocalizer.BoardingGo, TTLocalizer.BoardingGo, '') self.goButton['command'] = self.__handleGoButton
apache-2.0
androidarmv6/android_external_chromium_org
third_party/closure_linter/closure_linter/common/filetestcase.py
135
3835
#!/usr/bin/env python # # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test case that runs a checker on a file, matching errors against annotations. Runs the given checker on the given file, accumulating all errors. The list of errors is then matched against those annotated in the file. Based heavily on devtools/javascript/gpylint/full_test.py. """ __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') import re import unittest as googletest from closure_linter.common import erroraccumulator class AnnotatedFileTestCase(googletest.TestCase): """Test case to run a linter against a single file.""" # Matches an all caps letters + underscores error identifer _MESSAGE = {'msg': '[A-Z][A-Z_]+'} # Matches a //, followed by an optional line number with a +/-, followed by a # list of message IDs. Used to extract expected messages from testdata files. # TODO(robbyw): Generalize to use different commenting patterns. _EXPECTED_RE = re.compile(r'\s*//\s*(?:(?P<line>[+-]?[0-9]+):)?' r'\s*(?P<msgs>%(msg)s(?:,\s*%(msg)s)*)' % _MESSAGE) def __init__(self, filename, runner, converter): """Create a single file lint test case. Args: filename: Filename to test. runner: Object implementing the LintRunner interface that lints a file. converter: Function taking an error string and returning an error code. """ googletest.TestCase.__init__(self, 'runTest') self._filename = filename self._messages = [] self._runner = runner self._converter = converter def shortDescription(self): """Provides a description for the test.""" return 'Run linter on %s' % self._filename def runTest(self): """Runs the test.""" try: filename = self._filename stream = open(filename) except IOError, ex: raise IOError('Could not find testdata resource for %s: %s' % (self._filename, ex)) expected = self._GetExpectedMessages(stream) got = self._ProcessFileAndGetMessages(filename) self.assertEqual(expected, got) def _GetExpectedMessages(self, stream): """Parse a file and get a sorted list of expected messages.""" messages = [] for i, line in enumerate(stream): match = self._EXPECTED_RE.search(line) if match: line = match.group('line') msg_ids = match.group('msgs') if line is None: line = i + 1 elif line.startswith('+') or line.startswith('-'): line = i + 1 + int(line) else: line = int(line) for msg_id in msg_ids.split(','): # Ignore a spurious message from the license preamble. if msg_id != 'WITHOUT': messages.append((line, self._converter(msg_id.strip()))) stream.seek(0) messages.sort() return messages def _ProcessFileAndGetMessages(self, filename): """Trap gpylint's output parse it to get messages added.""" errors = erroraccumulator.ErrorAccumulator() self._runner.Run([filename], errors) errors = errors.GetErrors() # Convert to expected tuple format. error_msgs = [(error.token.line_number, error.code) for error in errors] error_msgs.sort() return error_msgs
bsd-3-clause
ogenstad/ansible
lib/ansible/modules/network/avi/avi_wafpolicy.py
26
5675
#!/usr/bin/python # # @author: Gaurav Rastogi (grastogi@avinetworks.com) # Eric Anderson (eanderson@avinetworks.com) # module_check: supported # # Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: avi_wafpolicy author: Gaurav Rastogi (grastogi@avinetworks.com) short_description: Module for setup of WafPolicy Avi RESTful Object description: - This module is used to configure WafPolicy object - more examples at U(https://github.com/avinetworks/devops) requirements: [ avisdk ] version_added: "2.5" options: state: description: - The state that should be applied on the entity. default: present choices: ["absent", "present"] avi_api_update_method: description: - Default method for object update is HTTP PUT. - Setting to patch will override that behavior to use HTTP PATCH. version_added: "2.5" default: put choices: ["put", "patch"] avi_api_patch_op: description: - Patch operation to use when using avi_api_update_method as patch. version_added: "2.5" choices: ["add", "replace", "delete"] created_by: description: - Creator name. - Field introduced in 17.2.4. crs_groups: description: - Waf rules are categorized in to groups based on their characterization. - These groups are system created with crs groups. - Field introduced in 17.2.1. description: description: - Field introduced in 17.2.1. mode: description: - Waf policy mode. - This can be detection or enforcement. - Enum options - WAF_MODE_DETECTION_ONLY, WAF_MODE_ENFORCEMENT. - Field introduced in 17.2.1. - Default value when not specified in API or module is interpreted by Avi Controller as WAF_MODE_DETECTION_ONLY. required: true name: description: - Field introduced in 17.2.1. required: true paranoia_level: description: - Waf ruleset paranoia mode. - This is used to select rules based on the paranoia-level tag. - Enum options - WAF_PARANOIA_LEVEL_LOW, WAF_PARANOIA_LEVEL_MEDIUM, WAF_PARANOIA_LEVEL_HIGH, WAF_PARANOIA_LEVEL_EXTREME. - Field introduced in 17.2.1. - Default value when not specified in API or module is interpreted by Avi Controller as WAF_PARANOIA_LEVEL_LOW. post_crs_groups: description: - Waf rules are categorized in to groups based on their characterization. - These groups are created by the user and will be enforced after the crs groups. - Field introduced in 17.2.1. pre_crs_groups: description: - Waf rules are categorized in to groups based on their characterization. - These groups are created by the user and will be enforced before the crs groups. - Field introduced in 17.2.1. tenant_ref: description: - It is a reference to an object of type tenant. - Field introduced in 17.2.1. url: description: - Avi controller URL of the object. uuid: description: - Field introduced in 17.2.1. waf_profile_ref: description: - Waf profile for waf policy. - It is a reference to an object of type wafprofile. - Field introduced in 17.2.1. required: true extends_documentation_fragment: - avi ''' EXAMPLES = """ - name: Example to create WafPolicy object avi_wafpolicy: controller: 10.10.25.42 username: admin password: something state: present name: sample_wafpolicy """ RETURN = ''' obj: description: WafPolicy (api/wafpolicy) object returned: success, changed type: dict ''' from ansible.module_utils.basic import AnsibleModule try: from ansible.module_utils.network.avi.avi import ( avi_common_argument_spec, HAS_AVI, avi_ansible_api) except ImportError: HAS_AVI = False def main(): argument_specs = dict( state=dict(default='present', choices=['absent', 'present']), avi_api_update_method=dict(default='put', choices=['put', 'patch']), avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), created_by=dict(type='str',), crs_groups=dict(type='list',), description=dict(type='str',), mode=dict(type='str', required=True), name=dict(type='str', required=True), paranoia_level=dict(type='str',), post_crs_groups=dict(type='list',), pre_crs_groups=dict(type='list',), tenant_ref=dict(type='str',), url=dict(type='str',), uuid=dict(type='str',), waf_profile_ref=dict(type='str', required=True), ) argument_specs.update(avi_common_argument_spec()) module = AnsibleModule( argument_spec=argument_specs, supports_check_mode=True) if not HAS_AVI: return module.fail_json(msg=( 'Avi python API SDK (avisdk>=17.1) is not installed. ' 'For more details visit https://github.com/avinetworks/sdk.')) return avi_ansible_api(module, 'wafpolicy', set([])) if __name__ == '__main__': main()
gpl-3.0
pipermerriam/py-geth
tests/installation/test_geth_installation.py
1
1450
import os import pytest import semantic_version from geth import ( get_geth_version, ) from geth.install import ( INSTALL_FUNCTIONS, get_platform, install_geth, get_executable_path, ) INSTALLATION_TEST_PARAMS = tuple( (platform, version) for platform, platform_install_functions in INSTALL_FUNCTIONS.items() for version in platform_install_functions.keys() ) @pytest.mark.skipif( 'GETH_RUN_INSTALL_TESTS' not in os.environ, reason=( "Installation tests will not run unless `GETH_RUN_INSTALL_TESTS` " "environment variable is set" ), ) @pytest.mark.parametrize( "platform,version", INSTALLATION_TEST_PARAMS, ) def test_geth_installation_as_function_call(monkeypatch, tmpdir, platform, version): if get_platform() != platform: pytest.skip("Wront platform for install script") base_install_path = str(tmpdir.mkdir("temporary-dir")) monkeypatch.setenv('GETH_BASE_INSTALL_PATH', base_install_path) # sanity check that it's not already installed. executable_path = get_executable_path(version) assert not os.path.exists(executable_path) install_geth(identifier=version, platform=platform) assert os.path.exists(executable_path) monkeypatch.setenv('GETH_BINARY', executable_path) actual_version = get_geth_version() expected_version = semantic_version.Spec(version.lstrip('v')) assert actual_version in expected_version
mit
mdavid/cherokee-webserver-svnclone
admin/wizards/rails.py
1
14777
# -*- coding: utf-8 -*- # # Cherokee-admin's Ruby on Rails Wizard # # Authors: # Taher Shihadeh <taher@octality.com> # # Copyright (C) 2010 Alvaro Lopez Ortega # # This program is free software; you can redistribute it and/or # modify it under the terms of version 2 of the GNU General Public # License as published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. # # # Tested: # 2010/04/14: Rails 2.2.3 / Cherokee 0.99.41 # import os import re import CTK import Wizard import validations from util import * from consts import * NOTE_WELCOME_H1 = N_("Welcome to the Ruby on Rails wizard") NOTE_WELCOME_P1 = N_('<a target="_blank" href="http://rubyonrails.org/">Ruby on Rails</a> is an open-source web framework optimized for programmer happines and sustainable productivity.') NOTE_WELCOME_P2 = N_('It lets you write beautiful code by favoring convention over configuration.') NOTE_LOCAL_H1 = N_("Ruby on Rails Project") NOTE_HOST_H1 = N_("New Virtual Server") NOTE_WEBDIR_H1 = N_("Web Directory") NOTE_ROR_DIR = N_("Local path to the Ruby on Rails based project.") NOTE_NEW_HOST = N_("Name of the new domain that will be created.") NOTE_NEW_DIR = N_("Directory of the web directory where the Ruby on Rails project will live in.") NOTE_ENV = N_("Value of the RAILS_ENV variable.") NOTE_METHOD = N_("The proxy setting is recommended, but FastCGI can also be used if spawn-fcgi is available.") ERROR_DISPATCH = N_("<p>Even though the directory looks like a Ruby on Rails project, the public/dispatch.fcgi file wasn't found.</p>") ERROR_EXAMPLE = N_("<p>However a <b>public/dispatch.fcgi.example</b> file is present, so you might want to rename it.</p>") ERROR_RAILS23 = N_("<p>If you are using Rails >= 2.3.0, you will have to execute the following command from the project directory in order to add the missing file:</p><p><pre>rake rails:update:generate_dispatchers</pre></p>") ERROR_NO_ROR = N_("It does not look like a Ruby on Rails based project directory.") ERROR_NO_DROOT = N_("The document root directory does not exist.") PREFIX = 'tmp!wizard!rails' URL_APPLY = r'/wizard/vserver/rails/apply' ROR_CHILD_PROCS = 3 DEFAULT_BINS = ['spawn-fcgi'] RAILS_ENV = [ ('production', N_('Production')), ('test', N_('Test')), ('development', N_('Development')), ('', N_('Empty')) ] RAILS_METHOD = [ ('proxy', N_('HTTP proxy')), ('fcgi', N_('FastCGI')) ] SOURCE = """ source!%(src_num)d!type = interpreter source!%(src_num)d!nick = RoR %(new_host)s, instance %(src_instance)d source!%(src_num)d!host = 127.0.0.1:%(src_port)d source!%(src_num)d!env_inherited = 0 """ SOURCE_FCGI = """ source!%(src_num)d!interpreter = spawn-fcgi -n -d %(ror_dir)s -f %(ror_dir)s/public/dispatch.fcgi -p %(src_port)d """ SOURCE_PROXY = """ source!%(src_num)d!interpreter = %(ror_dir)s/script/server -p %(src_port)d """ SOURCE_ENV = """ source!%(src_num)d!env!RAILS_ENV = %(ror_env)s """ CONFIG_VSRV = """ %(vsrv_pre)s!nick = %(new_host)s %(vsrv_pre)s!document_root = %(ror_dir)s/public %(vsrv_pre)s!directory_index = index.html %(vsrv_pre)s!rule!10!match = exists %(vsrv_pre)s!rule!10!match!match_any = 1 %(vsrv_pre)s!rule!10!match!match_only_files = 1 %(vsrv_pre)s!rule!10!match!match_index_files = 0 %(vsrv_pre)s!rule!10!handler = common %(vsrv_pre)s!rule!10!expiration = time %(vsrv_pre)s!rule!10!expiration!time = 7d %(vsrv_pre)s!rule!1!match = default %(vsrv_pre)s!rule!1!encoder!gzip = 1 """ CONFIG_VSRV_FCGI = """ %(vsrv_pre)s!rule!1!handler = fcgi %(vsrv_pre)s!rule!1!handler!error_handler = 1 %(vsrv_pre)s!rule!1!handler!check_file = 0 %(vsrv_pre)s!rule!1!handler!balancer = round_robin """ CONFIG_VSRV_PROXY = """ %(vsrv_pre)s!rule!1!handler = proxy %(vsrv_pre)s!rule!1!handler!balancer = round_robin %(vsrv_pre)s!rule!1!handler!in_allow_keepalive = 1 """ CONFIG_VSRV_CHILD = """ %(vsrv_pre)s!rule!1!handler!balancer!source!%(src_instance)d = %(src_num)d """ CONFIG_RULES = """ %(rule_pre_plus2)s!match = directory %(rule_pre_plus2)s!match!directory = %(webdir)s %(rule_pre_plus2)s!match!final = 0 %(rule_pre_plus2)s!document_root = %(ror_dir)s/public %(rule_pre_plus1)s!match = and %(rule_pre_plus1)s!match!left = directory %(rule_pre_plus1)s!match!left!directory = %(webdir)s %(rule_pre_plus1)s!match!right = exists %(rule_pre_plus1)s!match!right!match_any = 1 %(rule_pre_plus1)s!match!right!match_only_files = 1 %(rule_pre_plus1)s!match!right!match_index_files = 0 %(rule_pre_plus1)s!handler = common %(rule_pre_plus1)s!expiration = time %(rule_pre_plus1)s!expiration!time = 7d %(rule_pre)s!match = directory %(rule_pre)s!match!directory = %(webdir)s %(rule_pre)s!encoder!gzip = 1 """ CONFIG_RULES_FCGI = """ %(rule_pre)s!handler = fcgi %(rule_pre)s!handler!error_handler = 1 %(rule_pre)s!handler!check_file = 0 %(rule_pre)s!handler!balancer = round_robin """ CONFIG_RULES_PROXY = """ %(rule_pre)s!handler = proxy %(rule_pre)s!handler!balancer = round_robin %(rule_pre)s!handler!in_allow_keepalive = 1 """ CONFIG_RULES_CHILD = """ %(rule_pre)s!handler!balancer!source!%(src_instance)d = %(src_num)d """ class Commit: def Commit_VServer (self): # Create the new Virtual Server vsrv_pre = CTK.cfg.get_next_entry_prefix('vserver') CTK.cfg['%s!nick'%(vsrv_pre)] = CTK.cfg.get_val('%s!host'%(PREFIX)) Wizard.CloneLogsCfg_Apply ('%s!logs_as_vsrv'%(PREFIX), vsrv_pre) # Incoming info ror_dir = CTK.cfg.get_val('%s!ror_dir'%(PREFIX)) new_host = CTK.cfg.get_val('%s!new_host'%(PREFIX)) ror_env = CTK.cfg.get_val('%s!ror_env'%(PREFIX)) ror_method = CTK.cfg.get_val('%s!ror_method'%(PREFIX)) # Locals src_num, src_pre = cfg_source_get_next () # Deployment method distinction CONFIG = CONFIG_VSRV SRC = SOURCE if ror_method == 'fcgi': # Check whether dispatch.fcgi is present dispatcher = Dispatcher() if not dispatcher._fcgi_ok(): return {'ret':'error'} CONFIG += CONFIG_VSRV_FCGI SRC += SOURCE_FCGI else: CONFIG += CONFIG_VSRV_PROXY SRC += SOURCE_PROXY # Add the new main rules config = CONFIG % (locals()) # Add the Information Sources free_port = cfg_source_find_free_port() for i in range(ROR_CHILD_PROCS): src_instance = i + 1 src_port = i + free_port config += SRC % (locals()) if ror_env: config += SOURCE_ENV % (locals()) config += CONFIG_VSRV_CHILD % (locals()) src_num += 1 # Apply the configuration CTK.cfg.apply_chunk (config) # Usual Static Files Wizard.AddUsualStaticFiles ("%s!rule!500" % (vsrv_pre)) # Clean up CTK.cfg.normalize ('%s!rule'%(vsrv_pre)) CTK.cfg.normalize ('vserver') del (CTK.cfg[PREFIX]) return CTK.cfg_reply_ajax_ok() def Commit_Rule (self): # Incoming info ror_dir = CTK.cfg.get_val('%s!ror_dir'%(PREFIX)) webdir = CTK.cfg.get_val('%s!new_webdir'%(PREFIX)) ror_env = CTK.cfg.get_val('%s!ror_env'%(PREFIX)) ror_method = CTK.cfg.get_val('%s!ror_method'%(PREFIX)) vsrv_num = CTK.cfg.get_val('%s!vsrv_num'%(PREFIX)) # Locals vsrv_pre = 'vserver!%s'%(vsrv_num) rule_num, rule_pre = cfg_vsrv_rule_get_next (vsrv_pre) src_num, src_pre = cfg_source_get_next () new_host = CTK.cfg.get_val ("%s!nick"%(vsrv_pre)) rule_pre_plus2 = "%s!rule!%d" % (vsrv_pre, rule_num + 2) rule_pre_plus1 = "%s!rule!%d" % (vsrv_pre, rule_num + 1) # Deployment method distinction CONFIG = CONFIG_RULES SRC = SOURCE if ror_method == 'fcgi': # Check whether dispatch.fcgi is present dispatcher = Dispatcher() if not dispatcher._fcgi_ok(): return {'ret':'error'} CONFIG += CONFIG_RULES_FCGI SRC += SOURCE_FCGI else: CONFIG += CONFIG_RULES_PROXY SRC += SOURCE_PROXY # Add the new rules config = CONFIG % (locals()) # Add the Information Sources free_port = cfg_source_find_free_port() for i in range(ROR_CHILD_PROCS): src_instance = i + 1 src_port = i + free_port config += SRC % (locals()) if ror_env: config += SOURCE_ENV % (locals()) config += CONFIG_RULES_CHILD % (locals()) src_num += 1 # Apply the configuration CTK.cfg.apply_chunk (config) # Clean up CTK.cfg.normalize ('%s!rule'%(vsrv_pre)) del (CTK.cfg[PREFIX]) return CTK.cfg_reply_ajax_ok() def __call__ (self): if CTK.post.pop('final'): # Apply POST CTK.cfg_apply_post() # VServer or Rule? if CTK.cfg.get_val ('%s!vsrv_num'%(PREFIX)): return self.Commit_Rule() return self.Commit_VServer() return CTK.cfg_apply_post() class WebDirectory: def __call__ (self): table = CTK.PropsTable() table.Add (_('Web Directory'), CTK.TextCfg ('%s!new_webdir'%(PREFIX), False, {'value': '/project', 'class': 'noauto'}), _(NOTE_NEW_DIR)) submit = CTK.Submitter (URL_APPLY) submit += CTK.Hidden('final', '1') submit += table cont = CTK.Container() cont += CTK.RawHTML ('<h2>%s</h2>' %(_(NOTE_WEBDIR_H1))) cont += submit cont += CTK.DruidButtonsPanel_PrevCreate_Auto() return cont.Render().toStr() class Host: def __call__ (self): table = CTK.PropsTable() table.Add (_('New Host Name'), CTK.TextCfg ('%s!new_host'%(PREFIX), False, {'value': 'www.example.com', 'class': 'noauto'}), _(NOTE_NEW_HOST)) table.Add (_('Use Same Logs as'), Wizard.CloneLogsCfg('%s!logs_as_vsrv'%(PREFIX)), _(Wizard.CloneLogsCfg.NOTE)) submit = CTK.Submitter (URL_APPLY) submit += CTK.Hidden('final', '1') submit += table cont = CTK.Container() cont += CTK.RawHTML ('<h2>%s</h2>' %(_(NOTE_HOST_H1))) cont += Dispatcher () cont += submit cont += CTK.DruidButtonsPanel_PrevCreate_Auto() return cont.Render().toStr() class Dispatcher (CTK.Container): def __init__ (self): CTK.Container.__init__ (self) if self._fcgi_ok(): return if self.errors.has_key('dispatch.fcgi'): if self.errors.has_key('dispatch.fcgi.example'): message = _(ERROR_DISPATCH) + _(ERROR_EXAMPLE) else: message = _(ERROR_DISPATCH) + _(ERROR_RAILS23) self += CTK.Notice ('important-information', CTK.RawHTML (message)) def _fcgi_ok (self): ror_dir = CTK.cfg.get_val('%s!ror_dir'%(PREFIX)) # Check whether dispatch.fcgi is present if not os.path.exists (os.path.join (ror_dir, "public/dispatch.fcgi")): self.errors = {'dispatch.fcgi': 'Not found'} if os.path.exists (os.path.join (ror_dir, "public/dispatch.fcgi.example")): self.errors['dispatch.fcgi.example'] = True return False return True class LocalSource: def __call__ (self): # Trim deployment options if needed if not path_find_binary (DEFAULT_BINS): RAILS_METHOD.remove(('fcgi', 'FastCGI')) submit = CTK.Submitter (URL_APPLY) table = CTK.PropsTable() submit += table table.Add (_('Project Directory'), CTK.TextCfg ('%s!ror_dir'%(PREFIX), False), _(NOTE_ROR_DIR)) table.Add (_('RAILS_ENV environment'), CTK.ComboCfg ('%s!ror_env'%(PREFIX), trans (RAILS_ENV), {'class': 'noauto'}), _(NOTE_ENV)) if len(RAILS_METHOD) > 1: table.Add (_('Deployment method'), CTK.ComboCfg ('%s!ror_method'%(PREFIX), trans (RAILS_METHOD), {'class': 'noauto'}), _(NOTE_METHOD)) else: submit += CTK.Hidden('%s!ror_env'%(PREFIX), RAILS_METHOD[0][0]) cont = CTK.Container() cont += CTK.RawHTML ('<h2>%s</h2>' %(_(NOTE_LOCAL_H1))) cont += submit cont += CTK.DruidButtonsPanel_PrevNext_Auto() return cont.Render().toStr() class Welcome: def __call__ (self): cont = CTK.Container() cont += CTK.RawHTML ('<h2>%s</h2>' %(_(NOTE_WELCOME_H1))) cont += Wizard.Icon ('rails', {'class': 'wizard-descr'}) box = CTK.Box ({'class': 'wizard-welcome'}) box += CTK.RawHTML ('<p>%s</p>' %(_(NOTE_WELCOME_P1))) box += CTK.RawHTML ('<p>%s</p>' %(_(NOTE_WELCOME_P2))) box += Wizard.CookBookBox ('cookbook_ror') cont += box # Send the VServer num if it's a Rule tmp = re.findall (r'^/wizard/vserver/(\d+)/', CTK.request.url) if tmp: submit = CTK.Submitter (URL_APPLY) submit += CTK.Hidden('%s!vsrv_num'%(PREFIX), tmp[0]) cont += submit cont += CTK.DruidButtonsPanel_Next_Auto() return cont.Render().toStr() def is_ror_dir (path): path = validations.is_local_dir_exists (path) try: manage = os.path.join (path, "script/server") validations.is_local_file_exists (manage) except: try: manage = os.path.join (path, "script/rails") validations.is_local_file_exists (manage) except: raise ValueError, _(ERROR_NO_ROR) return path VALS = [ ('%s!ror_dir' %(PREFIX), validations.is_not_empty), ('%s!new_host'%(PREFIX), validations.is_not_empty), ("%s!ror_dir" %(PREFIX), is_ror_dir), ("%s!new_host"%(PREFIX), validations.is_new_vserver_nick) ] # VServer CTK.publish ('^/wizard/vserver/rails$', Welcome) CTK.publish ('^/wizard/vserver/rails/2$', LocalSource) CTK.publish ('^/wizard/vserver/rails/3$', Host) # Rule CTK.publish ('^/wizard/vserver/(\d+)/rails$', Welcome) CTK.publish ('^/wizard/vserver/(\d+)/rails/2$', LocalSource) CTK.publish ('^/wizard/vserver/(\d+)/rails/3$', WebDirectory) # Common CTK.publish (r'^%s$'%(URL_APPLY), Commit, method="POST", validation=VALS)
gpl-2.0
chen0510566/MissionPlanner
Lib/fpformat.py
66
4844
"""General floating point formatting functions. Functions: fix(x, digits_behind) sci(x, digits_behind) Each takes a number or a string and a number of digits as arguments. Parameters: x: number to be formatted; or a string resembling a number digits_behind: number of digits behind the decimal point """ from warnings import warnpy3k warnpy3k("the fpformat module has been removed in Python 3.0", stacklevel=2) del warnpy3k import re __all__ = ["fix","sci","NotANumber"] # Compiled regular expression to "decode" a number decoder = re.compile(r'^([-+]?)0*(\d*)((?:\.\d*)?)(([eE][-+]?\d+)?)$') # \0 the whole thing # \1 leading sign or empty # \2 digits left of decimal point # \3 fraction (empty or begins with point) # \4 exponent part (empty or begins with 'e' or 'E') try: class NotANumber(ValueError): pass except TypeError: NotANumber = 'fpformat.NotANumber' def extract(s): """Return (sign, intpart, fraction, expo) or raise an exception: sign is '+' or '-' intpart is 0 or more digits beginning with a nonzero fraction is 0 or more digits expo is an integer""" res = decoder.match(s) if res is None: raise NotANumber, s sign, intpart, fraction, exppart = res.group(1,2,3,4) if sign == '+': sign = '' if fraction: fraction = fraction[1:] if exppart: expo = int(exppart[1:]) else: expo = 0 return sign, intpart, fraction, expo def unexpo(intpart, fraction, expo): """Remove the exponent by changing intpart and fraction.""" if expo > 0: # Move the point left f = len(fraction) intpart, fraction = intpart + fraction[:expo], fraction[expo:] if expo > f: intpart = intpart + '0'*(expo-f) elif expo < 0: # Move the point right i = len(intpart) intpart, fraction = intpart[:expo], intpart[expo:] + fraction if expo < -i: fraction = '0'*(-expo-i) + fraction return intpart, fraction def roundfrac(intpart, fraction, digs): """Round or extend the fraction to size digs.""" f = len(fraction) if f <= digs: return intpart, fraction + '0'*(digs-f) i = len(intpart) if i+digs < 0: return '0'*-digs, '' total = intpart + fraction nextdigit = total[i+digs] if nextdigit >= '5': # Hard case: increment last digit, may have carry! n = i + digs - 1 while n >= 0: if total[n] != '9': break n = n-1 else: total = '0' + total i = i+1 n = 0 total = total[:n] + chr(ord(total[n]) + 1) + '0'*(len(total)-n-1) intpart, fraction = total[:i], total[i:] if digs >= 0: return intpart, fraction[:digs] else: return intpart[:digs] + '0'*-digs, '' def fix(x, digs): """Format x as [-]ddd.ddd with 'digs' digits after the point and at least one digit before. If digs <= 0, the point is suppressed.""" if type(x) != type(''): x = repr(x) try: sign, intpart, fraction, expo = extract(x) except NotANumber: return x intpart, fraction = unexpo(intpart, fraction, expo) intpart, fraction = roundfrac(intpart, fraction, digs) while intpart and intpart[0] == '0': intpart = intpart[1:] if intpart == '': intpart = '0' if digs > 0: return sign + intpart + '.' + fraction else: return sign + intpart def sci(x, digs): """Format x as [-]d.dddE[+-]ddd with 'digs' digits after the point and exactly one digit before. If digs is <= 0, one digit is kept and the point is suppressed.""" if type(x) != type(''): x = repr(x) sign, intpart, fraction, expo = extract(x) if not intpart: while fraction and fraction[0] == '0': fraction = fraction[1:] expo = expo - 1 if fraction: intpart, fraction = fraction[0], fraction[1:] expo = expo - 1 else: intpart = '0' else: expo = expo + len(intpart) - 1 intpart, fraction = intpart[0], intpart[1:] + fraction digs = max(0, digs) intpart, fraction = roundfrac(intpart, fraction, digs) if len(intpart) > 1: intpart, fraction, expo = \ intpart[0], intpart[1:] + fraction[:-1], \ expo + len(intpart) - 1 s = sign + intpart if digs > 0: s = s + '.' + fraction e = repr(abs(expo)) e = '0'*(3-len(e)) + e if expo < 0: e = '-' + e else: e = '+' + e return s + 'e' + e def test(): """Interactive test run.""" try: while 1: x, digs = input('Enter (x, digs): ') print x, fix(x, digs), sci(x, digs) except (EOFError, KeyboardInterrupt): pass
gpl-3.0
liamgh/liamgreenhughes-sl4a-tf101
python/src/Demo/rpc/mountclient.py
42
6632
# Mount RPC client -- RFC 1094 (NFS), Appendix A # This module demonstrates how to write your own RPC client in Python. # When this example was written, there was no RPC compiler for # Python. Without such a compiler, you must first create classes # derived from Packer and Unpacker to handle the data types for the # server you want to interface to. You then write the client class. # If you want to support both the TCP and the UDP version of a # protocol, use multiple inheritance as shown below. import rpc from rpc import Packer, Unpacker, TCPClient, UDPClient # Program number and version for the mount protocol MOUNTPROG = 100005 MOUNTVERS = 1 # Size of the 'fhandle' opaque structure FHSIZE = 32 # Packer derived class for Mount protocol clients. # The only thing we need to pack beyond basic types is an 'fhandle' class MountPacker(Packer): def pack_fhandle(self, fhandle): self.pack_fopaque(FHSIZE, fhandle) # Unpacker derived class for Mount protocol clients. # The important types we need to unpack are fhandle, fhstatus, # mountlist and exportlist; mountstruct, exportstruct and groups are # used to unpack components of mountlist and exportlist and the # corresponding functions are passed as function argument to the # generic unpack_list function. class MountUnpacker(Unpacker): def unpack_fhandle(self): return self.unpack_fopaque(FHSIZE) def unpack_fhstatus(self): status = self.unpack_uint() if status == 0: fh = self.unpack_fhandle() else: fh = None return status, fh def unpack_mountlist(self): return self.unpack_list(self.unpack_mountstruct) def unpack_mountstruct(self): hostname = self.unpack_string() directory = self.unpack_string() return (hostname, directory) def unpack_exportlist(self): return self.unpack_list(self.unpack_exportstruct) def unpack_exportstruct(self): filesys = self.unpack_string() groups = self.unpack_groups() return (filesys, groups) def unpack_groups(self): return self.unpack_list(self.unpack_string) # These are the procedures specific to the Mount client class. # Think of this as a derived class of either TCPClient or UDPClient. class PartialMountClient: # This method is called by Client.__init__ to initialize # self.packer and self.unpacker def addpackers(self): self.packer = MountPacker() self.unpacker = MountUnpacker('') # This method is called by Client.__init__ to bind the socket # to a particular network interface and port. We use the # default network interface, but if we're running as root, # we want to bind to a reserved port def bindsocket(self): import os try: uid = os.getuid() except AttributeError: uid = 1 if uid == 0: port = rpc.bindresvport(self.sock, '') # 'port' is not used else: self.sock.bind(('', 0)) # This function is called to cough up a suitable # authentication object for a call to procedure 'proc'. def mkcred(self): if self.cred is None: self.cred = rpc.AUTH_UNIX, rpc.make_auth_unix_default() return self.cred # The methods Mnt, Dump etc. each implement one Remote # Procedure Call. This is done by calling self.make_call() # with as arguments: # # - the procedure number # - the arguments (or None) # - the "packer" function for the arguments (or None) # - the "unpacker" function for the return value (or None) # # The packer and unpacker function, if not None, *must* be # methods of self.packer and self.unpacker, respectively. # A value of None means that there are no arguments or is no # return value, respectively. # # The return value from make_call() is the return value from # the remote procedure call, as unpacked by the "unpacker" # function, or None if the unpacker function is None. # # (Even if you expect a result of None, you should still # return the return value from make_call(), since this may be # needed by a broadcasting version of the class.) # # If the call fails, make_call() raises an exception # (this includes time-outs and invalid results). # # Note that (at least with the UDP protocol) there is no # guarantee that a call is executed at most once. When you do # get a reply, you know it has been executed at least once; # when you don't get a reply, you know nothing. def Mnt(self, directory): return self.make_call(1, directory, \ self.packer.pack_string, \ self.unpacker.unpack_fhstatus) def Dump(self): return self.make_call(2, None, \ None, self.unpacker.unpack_mountlist) def Umnt(self, directory): return self.make_call(3, directory, \ self.packer.pack_string, None) def Umntall(self): return self.make_call(4, None, None, None) def Export(self): return self.make_call(5, None, \ None, self.unpacker.unpack_exportlist) # We turn the partial Mount client into a full one for either protocol # by use of multiple inheritance. (In general, when class C has base # classes B1...Bn, if x is an instance of class C, methods of x are # searched first in C, then in B1, then in B2, ..., finally in Bn.) class TCPMountClient(PartialMountClient, TCPClient): def __init__(self, host): TCPClient.__init__(self, host, MOUNTPROG, MOUNTVERS) class UDPMountClient(PartialMountClient, UDPClient): def __init__(self, host): UDPClient.__init__(self, host, MOUNTPROG, MOUNTVERS) # A little test program for the Mount client. This takes a host as # command line argument (default the local machine), prints its export # list, and attempts to mount and unmount each exported files system. # An optional first argument of -t or -u specifies the protocol to use # (TCP or UDP), default is UDP. def test(): import sys if sys.argv[1:] and sys.argv[1] == '-t': C = TCPMountClient del sys.argv[1] elif sys.argv[1:] and sys.argv[1] == '-u': C = UDPMountClient del sys.argv[1] else: C = UDPMountClient if sys.argv[1:]: host = sys.argv[1] else: host = '' mcl = C(host) list = mcl.Export() for item in list: print item try: mcl.Mnt(item[0]) except: print 'Sorry' continue mcl.Umnt(item[0])
apache-2.0
maxrosan/NS-3-support-for-OBS
waf-tools/shellcmd.py
137
12146
# Copyright (C) 2008 Gustavo J. A. M. Carneiro <gjcarneiro@gmail.com> # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA import shlex import subprocess import sys import re import os env_var_rx = re.compile(r"^([a-zA-Z0-9_]+)=(\S+)$") def debug(message): print >> sys.stderr, message if sys.platform == 'win32': dev_null = open("NUL:", "w") else: dev_null = open("/dev/null", "w") fcntl = fd = fl = None try: import fcntl except ImportError: pass else: fd = dev_null.fileno() fl = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, fl | fcntl.FD_CLOEXEC) del fcntl, fd, fl def _open_out_file(filename): if filename in ['NUL:', '/dev/null']: return dev_null else: return open(filename, 'wb') class Node(object): pass class Op(Node): pass class Pipe(Op): pass class And(Op): pass class Or(Op): pass class Command(Node): class PIPE(object): pass # PIPE is a constant class STDOUT(object): pass # PIPE is a constant def __init__(self, name): super(Command, self).__init__() self.name = name # command name self.argv = [name] # command argv self.stdin = None self.stdout = None self.stderr = None self.env_vars = None def __repr__(self): return "Command(%r, argv=%r, stdin=%r, stdout=%r, stderr=%r)" \ % (self.name, self.argv, self.stdin, self.stdout, self.stderr) class Chdir(Node): def __init__(self): super(Chdir, self).__init__() self.dir = None def __repr__(self): return "Chdir(%r)" \ % (self.dir) class Pipeline(object): def __init__(self): self.current_command = None self.pipeline = [] def _commit_command(self): assert self.current_command is not None self.pipeline.append(self.current_command) self.current_command = None def get_abbreviated_command(self): l = [] for node in self.pipeline: if isinstance(node, Command): l.append(node.name) if isinstance(node, Chdir): l.append('cd %s' % node.dir) elif isinstance(node, Pipe): l.append('|') elif isinstance(node, And): l.append('&&') elif isinstance(node, And): l.append('||') return ' '.join(l) def parse(self, command): self.current_command = None self.pipeline = [] if isinstance(command, list): tokens = list(command) else: tokens = shlex.split(command) debug("command: shlex: %r" % (tokens,)) BEGIN, COMMAND, CHDIR, STDERR, STDOUT, STDIN = range(6) state = BEGIN self.current_command = None env_vars = dict() while tokens: token = tokens.pop(0) if state == BEGIN: env_var_match = env_var_rx.match(token) if env_var_match is not None: env_vars[env_var_match.group(1)] = env_var_match.group(2) else: assert self.current_command is None if token == 'cd': self.current_command = Chdir() assert not env_vars state = CHDIR else: self.current_command = Command(token) if env_vars: self.current_command.env_vars = env_vars env_vars = dict() state = COMMAND elif state == COMMAND: if token == '>': state = STDOUT elif token == '2>': state = STDERR elif token == '2>&1': assert self.current_command.stderr is None self.current_command.stderr = Command.STDOUT elif token == '<': state = STDIN elif token == '|': assert self.current_command.stdout is None self.current_command.stdout = Command.PIPE self._commit_command() self.pipeline.append(Pipe()) state = BEGIN elif token == '&&': self._commit_command() self.pipeline.append(And()) state = BEGIN elif token == '||': self._commit_command() self.pipeline.append(Or()) state = BEGIN else: self.current_command.argv.append(token) elif state == CHDIR: if token == '&&': self._commit_command() self.pipeline.append(And()) state = BEGIN else: assert self.current_command.dir is None self.current_command.dir = token elif state == STDOUT: assert self.current_command.stdout is None self.current_command.stdout = token state = COMMAND elif state == STDERR: assert self.current_command.stderr is None self.current_command.stderr = token state = COMMAND elif state == STDIN: assert self.current_command.stdin is None self.current_command.stdin = token state = COMMAND self._commit_command() return self.pipeline def _exec_piped_commands(self, commands): retvals = [] for cmd in commands: retvals.append(cmd.wait()) retval = 0 for r in retvals: if r: retval = retvals[-1] break return retval def run(self, verbose=False): pipeline = list(self.pipeline) files_to_close = [] piped_commands = [] piped_commands_display = [] BEGIN, PIPE = range(2) state = BEGIN cwd = '.' while pipeline: node = pipeline.pop(0) if isinstance(node, Chdir): next_op = pipeline.pop(0) assert isinstance(next_op, And) cwd = os.path.join(cwd, node.dir) if verbose: piped_commands_display.append("cd %s &&" % node.dir) continue assert isinstance(node, (Command, Chdir)) cmd = node if verbose: if cmd.env_vars: env_vars_str = ' '.join(['%s=%s' % (key, val) for key, val in cmd.env_vars.iteritems()]) piped_commands_display.append("%s %s" % (env_vars_str, ' '.join(cmd.argv))) else: piped_commands_display.append(' '.join(cmd.argv)) if state == PIPE: stdin = piped_commands[-1].stdout elif cmd.stdin is not None: stdin = open(cmd.stdin, "r") if verbose: piped_commands_display.append('< %s' % cmd.stdin) files_to_close.append(stdin) else: stdin = None if cmd.stdout is None: stdout = None elif cmd.stdout is Command.PIPE: stdout = subprocess.PIPE else: stdout = _open_out_file(cmd.stdout) files_to_close.append(stdout) if verbose: piped_commands_display.append('> %s' % cmd.stdout) if cmd.stderr is None: stderr = None elif cmd.stderr is Command.PIPE: stderr = subprocess.PIPE elif cmd.stderr is Command.STDOUT: stderr = subprocess.STDOUT if verbose: piped_commands_display.append('2>&1') else: stderr = _open_out_file(cmd.stderr) files_to_close.append(stderr) if verbose: piped_commands_display.append('2> %s' % cmd.stderr) if cmd.env_vars: env = dict(os.environ) env.update(cmd.env_vars) else: env = None if cwd == '.': proc_cwd = None else: proc_cwd = cwd debug("command: subprocess.Popen(argv=%r, stdin=%r, stdout=%r, stderr=%r, env_vars=%r, cwd=%r)" % (cmd.argv, stdin, stdout, stderr, cmd.env_vars, proc_cwd)) proc = subprocess.Popen(cmd.argv, stdin=stdin, stdout=stdout, stderr=stderr, env=env, cwd=proc_cwd) del stdin, stdout, stderr piped_commands.append(proc) try: next_node = pipeline.pop(0) except IndexError: try: retval = self._exec_piped_commands(piped_commands) if verbose: print "%s: exit code %i" % (' '.join(piped_commands_display), retval) finally: for f in files_to_close: if f is not dev_null: f.close() files_to_close = [] return retval else: if isinstance(next_node, Pipe): state = PIPE piped_commands_display.append('|') elif isinstance(next_node, Or): try: this_retval = self._exec_piped_commands(piped_commands) finally: for f in files_to_close: if f is not dev_null: f.close() files_to_close = [] if this_retval == 0: if verbose: print "%s: exit code %i (|| is short-circuited)" % (' '.join(piped_commands_display), retval) return this_retval if verbose: print "%s: exit code %i (|| proceeds)" % (' '.join(piped_commands_display), retval) state = BEGIN piped_commands = [] piped_commands_display = [] elif isinstance(next_node, And): try: this_retval = self._exec_piped_commands(piped_commands) finally: for f in files_to_close: if f is not dev_null: f.close() files_to_close = [] if this_retval != 0: if verbose: print "%s: exit code %i (&& is short-circuited)" % (' '.join(piped_commands_display), retval) return this_retval if verbose: print "%s: exit code %i (&& proceeds)" % (' '.join(piped_commands_display), retval) state = BEGIN piped_commands = [] piped_commands_display = [] def _main(): pipeline = Pipeline() pipeline.parse('./foo.py 2>&1 < xxx | cat && ls') print pipeline.run() if __name__ == '__main__': _main()
gpl-2.0
XiaodunServerGroup/ddyedx
lms/djangoapps/debug/management/commands/dump_xml_courses.py
19
2377
""" Export all xml courses in a diffable format. This command loads all of the xml courses in the configured DATA_DIR. For each of the courses, it loops through all of the modules, and dumps each as a separate output file containing the json representation of each of its fields (including those fields that are set as default values). """ from __future__ import print_function import json from path import path from django.core.management.base import BaseCommand, CommandError from django.conf import settings from xmodule.modulestore.xml import XMLModuleStore class Command(BaseCommand): """ Django management command to export diffable representations of all xml courses """ help = '''Dump the in-memory representation of all xml courses in a diff-able format''' args = '<export path>' def handle(self, *args, **options): if len(args) != 1: raise CommandError('Must called with arguments: {}'.format(self.args)) xml_module_store = XMLModuleStore( data_dir=settings.DATA_DIR, default_class='xmodule.hidden_module.HiddenDescriptor', load_error_modules=True, xblock_mixins=settings.XBLOCK_MIXINS, xblock_select=settings.XBLOCK_SELECT_FUNCTION, ) export_dir = path(args[0]) for course_id, course_modules in xml_module_store.modules.iteritems(): course_path = course_id.replace('/', '_') for location, descriptor in course_modules.iteritems(): location_path = location.url().replace('/', '_') data = {} for field_name, field in descriptor.fields.iteritems(): try: data[field_name] = field.read_json(descriptor) except Exception as exc: # pylint: disable=broad-except data[field_name] = { '$type': str(type(exc)), '$value': descriptor._field_data.get(descriptor, field_name) # pylint: disable=protected-access } outdir = export_dir / course_path outdir.makedirs_p() with open(outdir / location_path + '.json', 'w') as outfile: json.dump(data, outfile, sort_keys=True, indent=4) print('', file=outfile)
agpl-3.0
sometallgit/AutoUploader
Python27/Lib/site-packages/pip/_vendor/webencodings/labels.py
512
8979
""" webencodings.labels ~~~~~~~~~~~~~~~~~~~ Map encoding labels to their name. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ # XXX Do not edit! # This file is automatically generated by mklabels.py LABELS = { 'unicode-1-1-utf-8': 'utf-8', 'utf-8': 'utf-8', 'utf8': 'utf-8', '866': 'ibm866', 'cp866': 'ibm866', 'csibm866': 'ibm866', 'ibm866': 'ibm866', 'csisolatin2': 'iso-8859-2', 'iso-8859-2': 'iso-8859-2', 'iso-ir-101': 'iso-8859-2', 'iso8859-2': 'iso-8859-2', 'iso88592': 'iso-8859-2', 'iso_8859-2': 'iso-8859-2', 'iso_8859-2:1987': 'iso-8859-2', 'l2': 'iso-8859-2', 'latin2': 'iso-8859-2', 'csisolatin3': 'iso-8859-3', 'iso-8859-3': 'iso-8859-3', 'iso-ir-109': 'iso-8859-3', 'iso8859-3': 'iso-8859-3', 'iso88593': 'iso-8859-3', 'iso_8859-3': 'iso-8859-3', 'iso_8859-3:1988': 'iso-8859-3', 'l3': 'iso-8859-3', 'latin3': 'iso-8859-3', 'csisolatin4': 'iso-8859-4', 'iso-8859-4': 'iso-8859-4', 'iso-ir-110': 'iso-8859-4', 'iso8859-4': 'iso-8859-4', 'iso88594': 'iso-8859-4', 'iso_8859-4': 'iso-8859-4', 'iso_8859-4:1988': 'iso-8859-4', 'l4': 'iso-8859-4', 'latin4': 'iso-8859-4', 'csisolatincyrillic': 'iso-8859-5', 'cyrillic': 'iso-8859-5', 'iso-8859-5': 'iso-8859-5', 'iso-ir-144': 'iso-8859-5', 'iso8859-5': 'iso-8859-5', 'iso88595': 'iso-8859-5', 'iso_8859-5': 'iso-8859-5', 'iso_8859-5:1988': 'iso-8859-5', 'arabic': 'iso-8859-6', 'asmo-708': 'iso-8859-6', 'csiso88596e': 'iso-8859-6', 'csiso88596i': 'iso-8859-6', 'csisolatinarabic': 'iso-8859-6', 'ecma-114': 'iso-8859-6', 'iso-8859-6': 'iso-8859-6', 'iso-8859-6-e': 'iso-8859-6', 'iso-8859-6-i': 'iso-8859-6', 'iso-ir-127': 'iso-8859-6', 'iso8859-6': 'iso-8859-6', 'iso88596': 'iso-8859-6', 'iso_8859-6': 'iso-8859-6', 'iso_8859-6:1987': 'iso-8859-6', 'csisolatingreek': 'iso-8859-7', 'ecma-118': 'iso-8859-7', 'elot_928': 'iso-8859-7', 'greek': 'iso-8859-7', 'greek8': 'iso-8859-7', 'iso-8859-7': 'iso-8859-7', 'iso-ir-126': 'iso-8859-7', 'iso8859-7': 'iso-8859-7', 'iso88597': 'iso-8859-7', 'iso_8859-7': 'iso-8859-7', 'iso_8859-7:1987': 'iso-8859-7', 'sun_eu_greek': 'iso-8859-7', 'csiso88598e': 'iso-8859-8', 'csisolatinhebrew': 'iso-8859-8', 'hebrew': 'iso-8859-8', 'iso-8859-8': 'iso-8859-8', 'iso-8859-8-e': 'iso-8859-8', 'iso-ir-138': 'iso-8859-8', 'iso8859-8': 'iso-8859-8', 'iso88598': 'iso-8859-8', 'iso_8859-8': 'iso-8859-8', 'iso_8859-8:1988': 'iso-8859-8', 'visual': 'iso-8859-8', 'csiso88598i': 'iso-8859-8-i', 'iso-8859-8-i': 'iso-8859-8-i', 'logical': 'iso-8859-8-i', 'csisolatin6': 'iso-8859-10', 'iso-8859-10': 'iso-8859-10', 'iso-ir-157': 'iso-8859-10', 'iso8859-10': 'iso-8859-10', 'iso885910': 'iso-8859-10', 'l6': 'iso-8859-10', 'latin6': 'iso-8859-10', 'iso-8859-13': 'iso-8859-13', 'iso8859-13': 'iso-8859-13', 'iso885913': 'iso-8859-13', 'iso-8859-14': 'iso-8859-14', 'iso8859-14': 'iso-8859-14', 'iso885914': 'iso-8859-14', 'csisolatin9': 'iso-8859-15', 'iso-8859-15': 'iso-8859-15', 'iso8859-15': 'iso-8859-15', 'iso885915': 'iso-8859-15', 'iso_8859-15': 'iso-8859-15', 'l9': 'iso-8859-15', 'iso-8859-16': 'iso-8859-16', 'cskoi8r': 'koi8-r', 'koi': 'koi8-r', 'koi8': 'koi8-r', 'koi8-r': 'koi8-r', 'koi8_r': 'koi8-r', 'koi8-u': 'koi8-u', 'csmacintosh': 'macintosh', 'mac': 'macintosh', 'macintosh': 'macintosh', 'x-mac-roman': 'macintosh', 'dos-874': 'windows-874', 'iso-8859-11': 'windows-874', 'iso8859-11': 'windows-874', 'iso885911': 'windows-874', 'tis-620': 'windows-874', 'windows-874': 'windows-874', 'cp1250': 'windows-1250', 'windows-1250': 'windows-1250', 'x-cp1250': 'windows-1250', 'cp1251': 'windows-1251', 'windows-1251': 'windows-1251', 'x-cp1251': 'windows-1251', 'ansi_x3.4-1968': 'windows-1252', 'ascii': 'windows-1252', 'cp1252': 'windows-1252', 'cp819': 'windows-1252', 'csisolatin1': 'windows-1252', 'ibm819': 'windows-1252', 'iso-8859-1': 'windows-1252', 'iso-ir-100': 'windows-1252', 'iso8859-1': 'windows-1252', 'iso88591': 'windows-1252', 'iso_8859-1': 'windows-1252', 'iso_8859-1:1987': 'windows-1252', 'l1': 'windows-1252', 'latin1': 'windows-1252', 'us-ascii': 'windows-1252', 'windows-1252': 'windows-1252', 'x-cp1252': 'windows-1252', 'cp1253': 'windows-1253', 'windows-1253': 'windows-1253', 'x-cp1253': 'windows-1253', 'cp1254': 'windows-1254', 'csisolatin5': 'windows-1254', 'iso-8859-9': 'windows-1254', 'iso-ir-148': 'windows-1254', 'iso8859-9': 'windows-1254', 'iso88599': 'windows-1254', 'iso_8859-9': 'windows-1254', 'iso_8859-9:1989': 'windows-1254', 'l5': 'windows-1254', 'latin5': 'windows-1254', 'windows-1254': 'windows-1254', 'x-cp1254': 'windows-1254', 'cp1255': 'windows-1255', 'windows-1255': 'windows-1255', 'x-cp1255': 'windows-1255', 'cp1256': 'windows-1256', 'windows-1256': 'windows-1256', 'x-cp1256': 'windows-1256', 'cp1257': 'windows-1257', 'windows-1257': 'windows-1257', 'x-cp1257': 'windows-1257', 'cp1258': 'windows-1258', 'windows-1258': 'windows-1258', 'x-cp1258': 'windows-1258', 'x-mac-cyrillic': 'x-mac-cyrillic', 'x-mac-ukrainian': 'x-mac-cyrillic', 'chinese': 'gbk', 'csgb2312': 'gbk', 'csiso58gb231280': 'gbk', 'gb2312': 'gbk', 'gb_2312': 'gbk', 'gb_2312-80': 'gbk', 'gbk': 'gbk', 'iso-ir-58': 'gbk', 'x-gbk': 'gbk', 'gb18030': 'gb18030', 'hz-gb-2312': 'hz-gb-2312', 'big5': 'big5', 'big5-hkscs': 'big5', 'cn-big5': 'big5', 'csbig5': 'big5', 'x-x-big5': 'big5', 'cseucpkdfmtjapanese': 'euc-jp', 'euc-jp': 'euc-jp', 'x-euc-jp': 'euc-jp', 'csiso2022jp': 'iso-2022-jp', 'iso-2022-jp': 'iso-2022-jp', 'csshiftjis': 'shift_jis', 'ms_kanji': 'shift_jis', 'shift-jis': 'shift_jis', 'shift_jis': 'shift_jis', 'sjis': 'shift_jis', 'windows-31j': 'shift_jis', 'x-sjis': 'shift_jis', 'cseuckr': 'euc-kr', 'csksc56011987': 'euc-kr', 'euc-kr': 'euc-kr', 'iso-ir-149': 'euc-kr', 'korean': 'euc-kr', 'ks_c_5601-1987': 'euc-kr', 'ks_c_5601-1989': 'euc-kr', 'ksc5601': 'euc-kr', 'ksc_5601': 'euc-kr', 'windows-949': 'euc-kr', 'csiso2022kr': 'iso-2022-kr', 'iso-2022-kr': 'iso-2022-kr', 'utf-16be': 'utf-16be', 'utf-16': 'utf-16le', 'utf-16le': 'utf-16le', 'x-user-defined': 'x-user-defined', }
mit
alawnchen/djangoproject.com
docs/search.py
8
7056
# -*- coding: utf-8 -*- from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator from django.utils.html import strip_tags from django.utils.text import unescape_entities from elasticsearch.helpers import streaming_bulk from elasticsearch_dsl import DocType, Long, Nested, Object, String, analysis from elasticsearch_dsl.connections import connections from .models import Document, document_url class SearchPaginator(Paginator): """ A better paginator for search results The normal Paginator does a .count() query and then a slice. Since ES results contain the total number of results, we can take an optimistic slice and then adjust the count. """ def validate_number(self, number): """ Validates the given 1-based page number. This class overrides the default behavior and ignores the upper bound. """ try: number = int(number) except (TypeError, ValueError): raise PageNotAnInteger('That page number is not an integer') if number < 1: raise EmptyPage('That page number is less than 1') return number def page(self, number): """ Returns a page object. This class overrides the default behavior and ignores "orphans" and assigns the count from the ES result to the Paginator. """ number = self.validate_number(number) bottom = (number - 1) * self.per_page top = bottom + self.per_page # Force the search to evaluate and then attach the count. We want to # avoid an extra useless query even if there are no results, so we # directly fetch the count from hits. result = self.object_list[bottom:top].execute() page = Page(result.hits, number, self) # Update the `_count`. self._count = page.object_list.total # Also store the aggregations, if any. if hasattr(result, 'aggregations'): page.aggregations = result.aggregations # Now that we have the count validate that the page number isn't higher # than the possible number of pages and adjust accordingly. if number > self.num_pages: if number == 1 and self.allow_empty_first_page: pass else: raise EmptyPage('That page contains no results') return page class ImprovedDocType(DocType): @classmethod def index_all(cls, using=None, delete=False, **kwargs): def actions_generator(): for obj in cls.index_queryset().iterator(): doc_dict = cls.from_django(obj).to_dict() doc_dict['_id'] = obj.id yield doc_dict client = connections.get_connection(using or cls._doc_type.using) if delete: client.indices.delete(index=cls._doc_type.index, ignore=[400, 404]) cls._doc_type.init() for ok, item in streaming_bulk(client, actions_generator(), index=cls._doc_type.index, doc_type=cls._doc_type.name, raise_on_error=True, refresh=True, **kwargs): yield ok, item @classmethod def index_queryset(cls): return cls.model._default_manager.all() @classmethod def index_object(cls, obj): return cls.from_django(obj).save() @classmethod def unindex_object(cls, obj): return cls.get(id=obj.pk).delete() @classmethod def from_django(cls, obj): raise NotImplementedError('You must define a from_django classmethod ' 'to map ORM object fields to ES fields') analysis.Tokenizer._builtins = analysis.TOKENIZERS = frozenset(( 'keyword', 'standard', 'path_hierarchy', 'whitespace' )) class PathHierarchyTokenizer(analysis.Tokenizer): name = 'path_hierarchy' class WhitespaceTokenizer(analysis.Tokenizer): name = 'whitespace' path_analyzer = analysis.CustomAnalyzer('path', tokenizer='path_hierarchy', filter=['lowercase']) lower_whitespace_analyzer = analysis.analyzer('lower_whitespace', tokenizer='whitespace', filter=['lowercase', 'stop'], char_filter=['html_strip']) class DocumentDocType(ImprovedDocType): """ The main documentation doc type to be used for searching. It stores a bit of meta data so we don't have to hit the db when rendering search results. The search view will be using the 'lang' and 'version' fields of the document's release to filter the search results, depending which was found in the URL. The breadcrumbs are shown under the search result title. """ model = Document id = Long() title = String(analyzer=lower_whitespace_analyzer, boost=1.2) path = String(index='no', analyzer=path_analyzer) content = String(analyzer=lower_whitespace_analyzer) content_raw = String(index_options='offsets') release = Object(properties={ 'id': Long(), 'version': String(index='not_analyzed'), 'lang': String(index='not_analyzed'), }) breadcrumbs = Nested(properties={ 'title': String(index='not_analyzed'), 'path': String(index='not_analyzed'), }) class Meta: index = 'docs' doc_type = 'document' @classmethod def index_queryset(cls): qs = super(DocumentDocType, cls).index_queryset() return ( # don't index the module pages since source code is hard to # combine with full text search qs.exclude(path__startswith='_modules') # not the crazy big flattened index of the CBVs .exclude(path__startswith='ref/class-based-views/flattened-index') .select_related('release')) @classmethod def from_django(cls, obj): # turns HTML entities into unicode characters again and removes # all HTML tags, aka "plain text" versio of the document raw_body = strip_tags(unescape_entities(obj.body).replace(u'¶', '')) doc = cls(path=obj.path, title=obj.title, content=obj.body, content_raw=raw_body, meta={'id': obj.id}) doc.release = { 'id': obj.release.id, 'lang': obj.release.lang, 'version': obj.release.version, } breadcrumbs = [] for breadcrumb in cls.model.objects.breadcrumbs(obj): breadcrumbs.append({ 'title': breadcrumb.title, 'path': breadcrumb.path, }) doc.breadcrumbs = breadcrumbs return doc def get_absolute_url(self): return document_url(self)
bsd-3-clause
joeythesaint/yocto-autobuilder
lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/conch/test/test_helper.py
12
18162
# -*- test-case-name: twisted.conch.test.test_helper -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. from twisted.conch.insults import helper from twisted.conch.insults.insults import G0, G1, G2, G3 from twisted.conch.insults.insults import modes, privateModes from twisted.conch.insults.insults import NORMAL, BOLD, UNDERLINE, BLINK, REVERSE_VIDEO from twisted.trial import unittest WIDTH = 80 HEIGHT = 24 class BufferTestCase(unittest.TestCase): def setUp(self): self.term = helper.TerminalBuffer() self.term.connectionMade() def testInitialState(self): self.assertEqual(self.term.width, WIDTH) self.assertEqual(self.term.height, HEIGHT) self.assertEqual(str(self.term), '\n' * (HEIGHT - 1)) self.assertEqual(self.term.reportCursorPosition(), (0, 0)) def test_initialPrivateModes(self): """ Verify that only DEC Auto Wrap Mode (DECAWM) and DEC Text Cursor Enable Mode (DECTCEM) are initially in the Set Mode (SM) state. """ self.assertEqual( {privateModes.AUTO_WRAP: True, privateModes.CURSOR_MODE: True}, self.term.privateModes) def test_carriageReturn(self): """ C{"\r"} moves the cursor to the first column in the current row. """ self.term.cursorForward(5) self.term.cursorDown(3) self.assertEqual(self.term.reportCursorPosition(), (5, 3)) self.term.insertAtCursor("\r") self.assertEqual(self.term.reportCursorPosition(), (0, 3)) def test_linefeed(self): """ C{"\n"} moves the cursor to the next row without changing the column. """ self.term.cursorForward(5) self.assertEqual(self.term.reportCursorPosition(), (5, 0)) self.term.insertAtCursor("\n") self.assertEqual(self.term.reportCursorPosition(), (5, 1)) def test_newline(self): """ C{write} transforms C{"\n"} into C{"\r\n"}. """ self.term.cursorForward(5) self.term.cursorDown(3) self.assertEqual(self.term.reportCursorPosition(), (5, 3)) self.term.write("\n") self.assertEqual(self.term.reportCursorPosition(), (0, 4)) def test_setPrivateModes(self): """ Verify that L{helper.TerminalBuffer.setPrivateModes} changes the Set Mode (SM) state to "set" for the private modes it is passed. """ expected = self.term.privateModes.copy() self.term.setPrivateModes([privateModes.SCROLL, privateModes.SCREEN]) expected[privateModes.SCROLL] = True expected[privateModes.SCREEN] = True self.assertEqual(expected, self.term.privateModes) def test_resetPrivateModes(self): """ Verify that L{helper.TerminalBuffer.resetPrivateModes} changes the Set Mode (SM) state to "reset" for the private modes it is passed. """ expected = self.term.privateModes.copy() self.term.resetPrivateModes([privateModes.AUTO_WRAP, privateModes.CURSOR_MODE]) del expected[privateModes.AUTO_WRAP] del expected[privateModes.CURSOR_MODE] self.assertEqual(expected, self.term.privateModes) def testCursorDown(self): self.term.cursorDown(3) self.assertEqual(self.term.reportCursorPosition(), (0, 3)) self.term.cursorDown() self.assertEqual(self.term.reportCursorPosition(), (0, 4)) self.term.cursorDown(HEIGHT) self.assertEqual(self.term.reportCursorPosition(), (0, HEIGHT - 1)) def testCursorUp(self): self.term.cursorUp(5) self.assertEqual(self.term.reportCursorPosition(), (0, 0)) self.term.cursorDown(20) self.term.cursorUp(1) self.assertEqual(self.term.reportCursorPosition(), (0, 19)) self.term.cursorUp(19) self.assertEqual(self.term.reportCursorPosition(), (0, 0)) def testCursorForward(self): self.term.cursorForward(2) self.assertEqual(self.term.reportCursorPosition(), (2, 0)) self.term.cursorForward(2) self.assertEqual(self.term.reportCursorPosition(), (4, 0)) self.term.cursorForward(WIDTH) self.assertEqual(self.term.reportCursorPosition(), (WIDTH, 0)) def testCursorBackward(self): self.term.cursorForward(10) self.term.cursorBackward(2) self.assertEqual(self.term.reportCursorPosition(), (8, 0)) self.term.cursorBackward(7) self.assertEqual(self.term.reportCursorPosition(), (1, 0)) self.term.cursorBackward(1) self.assertEqual(self.term.reportCursorPosition(), (0, 0)) self.term.cursorBackward(1) self.assertEqual(self.term.reportCursorPosition(), (0, 0)) def testCursorPositioning(self): self.term.cursorPosition(3, 9) self.assertEqual(self.term.reportCursorPosition(), (3, 9)) def testSimpleWriting(self): s = "Hello, world." self.term.write(s) self.assertEqual( str(self.term), s + '\n' + '\n' * (HEIGHT - 2)) def testOvertype(self): s = "hello, world." self.term.write(s) self.term.cursorBackward(len(s)) self.term.resetModes([modes.IRM]) self.term.write("H") self.assertEqual( str(self.term), ("H" + s[1:]) + '\n' + '\n' * (HEIGHT - 2)) def testInsert(self): s = "ello, world." self.term.write(s) self.term.cursorBackward(len(s)) self.term.setModes([modes.IRM]) self.term.write("H") self.assertEqual( str(self.term), ("H" + s) + '\n' + '\n' * (HEIGHT - 2)) def testWritingInTheMiddle(self): s = "Hello, world." self.term.cursorDown(5) self.term.cursorForward(5) self.term.write(s) self.assertEqual( str(self.term), '\n' * 5 + (self.term.fill * 5) + s + '\n' + '\n' * (HEIGHT - 7)) def testWritingWrappedAtEndOfLine(self): s = "Hello, world." self.term.cursorForward(WIDTH - 5) self.term.write(s) self.assertEqual( str(self.term), s[:5].rjust(WIDTH) + '\n' + s[5:] + '\n' + '\n' * (HEIGHT - 3)) def testIndex(self): self.term.index() self.assertEqual(self.term.reportCursorPosition(), (0, 1)) self.term.cursorDown(HEIGHT) self.assertEqual(self.term.reportCursorPosition(), (0, HEIGHT - 1)) self.term.index() self.assertEqual(self.term.reportCursorPosition(), (0, HEIGHT - 1)) def testReverseIndex(self): self.term.reverseIndex() self.assertEqual(self.term.reportCursorPosition(), (0, 0)) self.term.cursorDown(2) self.assertEqual(self.term.reportCursorPosition(), (0, 2)) self.term.reverseIndex() self.assertEqual(self.term.reportCursorPosition(), (0, 1)) def test_nextLine(self): """ C{nextLine} positions the cursor at the beginning of the row below the current row. """ self.term.nextLine() self.assertEqual(self.term.reportCursorPosition(), (0, 1)) self.term.cursorForward(5) self.assertEqual(self.term.reportCursorPosition(), (5, 1)) self.term.nextLine() self.assertEqual(self.term.reportCursorPosition(), (0, 2)) def testSaveCursor(self): self.term.cursorDown(5) self.term.cursorForward(7) self.assertEqual(self.term.reportCursorPosition(), (7, 5)) self.term.saveCursor() self.term.cursorDown(7) self.term.cursorBackward(3) self.assertEqual(self.term.reportCursorPosition(), (4, 12)) self.term.restoreCursor() self.assertEqual(self.term.reportCursorPosition(), (7, 5)) def testSingleShifts(self): self.term.singleShift2() self.term.write('Hi') ch = self.term.getCharacter(0, 0) self.assertEqual(ch[0], 'H') self.assertEqual(ch[1].charset, G2) ch = self.term.getCharacter(1, 0) self.assertEqual(ch[0], 'i') self.assertEqual(ch[1].charset, G0) self.term.singleShift3() self.term.write('!!') ch = self.term.getCharacter(2, 0) self.assertEqual(ch[0], '!') self.assertEqual(ch[1].charset, G3) ch = self.term.getCharacter(3, 0) self.assertEqual(ch[0], '!') self.assertEqual(ch[1].charset, G0) def testShifting(self): s1 = "Hello" s2 = "World" s3 = "Bye!" self.term.write("Hello\n") self.term.shiftOut() self.term.write("World\n") self.term.shiftIn() self.term.write("Bye!\n") g = G0 h = 0 for s in (s1, s2, s3): for i in range(len(s)): ch = self.term.getCharacter(i, h) self.assertEqual(ch[0], s[i]) self.assertEqual(ch[1].charset, g) g = g == G0 and G1 or G0 h += 1 def testGraphicRendition(self): self.term.selectGraphicRendition(BOLD, UNDERLINE, BLINK, REVERSE_VIDEO) self.term.write('W') self.term.selectGraphicRendition(NORMAL) self.term.write('X') self.term.selectGraphicRendition(BLINK) self.term.write('Y') self.term.selectGraphicRendition(BOLD) self.term.write('Z') ch = self.term.getCharacter(0, 0) self.assertEqual(ch[0], 'W') self.failUnless(ch[1].bold) self.failUnless(ch[1].underline) self.failUnless(ch[1].blink) self.failUnless(ch[1].reverseVideo) ch = self.term.getCharacter(1, 0) self.assertEqual(ch[0], 'X') self.failIf(ch[1].bold) self.failIf(ch[1].underline) self.failIf(ch[1].blink) self.failIf(ch[1].reverseVideo) ch = self.term.getCharacter(2, 0) self.assertEqual(ch[0], 'Y') self.failUnless(ch[1].blink) self.failIf(ch[1].bold) self.failIf(ch[1].underline) self.failIf(ch[1].reverseVideo) ch = self.term.getCharacter(3, 0) self.assertEqual(ch[0], 'Z') self.failUnless(ch[1].blink) self.failUnless(ch[1].bold) self.failIf(ch[1].underline) self.failIf(ch[1].reverseVideo) def testColorAttributes(self): s1 = "Merry xmas" s2 = "Just kidding" self.term.selectGraphicRendition(helper.FOREGROUND + helper.RED, helper.BACKGROUND + helper.GREEN) self.term.write(s1 + "\n") self.term.selectGraphicRendition(NORMAL) self.term.write(s2 + "\n") for i in range(len(s1)): ch = self.term.getCharacter(i, 0) self.assertEqual(ch[0], s1[i]) self.assertEqual(ch[1].charset, G0) self.assertEqual(ch[1].bold, False) self.assertEqual(ch[1].underline, False) self.assertEqual(ch[1].blink, False) self.assertEqual(ch[1].reverseVideo, False) self.assertEqual(ch[1].foreground, helper.RED) self.assertEqual(ch[1].background, helper.GREEN) for i in range(len(s2)): ch = self.term.getCharacter(i, 1) self.assertEqual(ch[0], s2[i]) self.assertEqual(ch[1].charset, G0) self.assertEqual(ch[1].bold, False) self.assertEqual(ch[1].underline, False) self.assertEqual(ch[1].blink, False) self.assertEqual(ch[1].reverseVideo, False) self.assertEqual(ch[1].foreground, helper.WHITE) self.assertEqual(ch[1].background, helper.BLACK) def testEraseLine(self): s1 = 'line 1' s2 = 'line 2' s3 = 'line 3' self.term.write('\n'.join((s1, s2, s3)) + '\n') self.term.cursorPosition(1, 1) self.term.eraseLine() self.assertEqual( str(self.term), s1 + '\n' + '\n' + s3 + '\n' + '\n' * (HEIGHT - 4)) def testEraseToLineEnd(self): s = 'Hello, world.' self.term.write(s) self.term.cursorBackward(5) self.term.eraseToLineEnd() self.assertEqual( str(self.term), s[:-5] + '\n' + '\n' * (HEIGHT - 2)) def testEraseToLineBeginning(self): s = 'Hello, world.' self.term.write(s) self.term.cursorBackward(5) self.term.eraseToLineBeginning() self.assertEqual( str(self.term), s[-4:].rjust(len(s)) + '\n' + '\n' * (HEIGHT - 2)) def testEraseDisplay(self): self.term.write('Hello world\n') self.term.write('Goodbye world\n') self.term.eraseDisplay() self.assertEqual( str(self.term), '\n' * (HEIGHT - 1)) def testEraseToDisplayEnd(self): s1 = "Hello world" s2 = "Goodbye world" self.term.write('\n'.join((s1, s2, ''))) self.term.cursorPosition(5, 1) self.term.eraseToDisplayEnd() self.assertEqual( str(self.term), s1 + '\n' + s2[:5] + '\n' + '\n' * (HEIGHT - 3)) def testEraseToDisplayBeginning(self): s1 = "Hello world" s2 = "Goodbye world" self.term.write('\n'.join((s1, s2))) self.term.cursorPosition(5, 1) self.term.eraseToDisplayBeginning() self.assertEqual( str(self.term), '\n' + s2[6:].rjust(len(s2)) + '\n' + '\n' * (HEIGHT - 3)) def testLineInsertion(self): s1 = "Hello world" s2 = "Goodbye world" self.term.write('\n'.join((s1, s2))) self.term.cursorPosition(7, 1) self.term.insertLine() self.assertEqual( str(self.term), s1 + '\n' + '\n' + s2 + '\n' + '\n' * (HEIGHT - 4)) def testLineDeletion(self): s1 = "Hello world" s2 = "Middle words" s3 = "Goodbye world" self.term.write('\n'.join((s1, s2, s3))) self.term.cursorPosition(9, 1) self.term.deleteLine() self.assertEqual( str(self.term), s1 + '\n' + s3 + '\n' + '\n' * (HEIGHT - 3)) class FakeDelayedCall: called = False cancelled = False def __init__(self, fs, timeout, f, a, kw): self.fs = fs self.timeout = timeout self.f = f self.a = a self.kw = kw def active(self): return not (self.cancelled or self.called) def cancel(self): self.cancelled = True # self.fs.calls.remove(self) def call(self): self.called = True self.f(*self.a, **self.kw) class FakeScheduler: def __init__(self): self.calls = [] def callLater(self, timeout, f, *a, **kw): self.calls.append(FakeDelayedCall(self, timeout, f, a, kw)) return self.calls[-1] class ExpectTestCase(unittest.TestCase): def setUp(self): self.term = helper.ExpectableBuffer() self.term.connectionMade() self.fs = FakeScheduler() def testSimpleString(self): result = [] d = self.term.expect("hello world", timeout=1, scheduler=self.fs) d.addCallback(result.append) self.term.write("greeting puny earthlings\n") self.failIf(result) self.term.write("hello world\n") self.failUnless(result) self.assertEqual(result[0].group(), "hello world") self.assertEqual(len(self.fs.calls), 1) self.failIf(self.fs.calls[0].active()) def testBrokenUpString(self): result = [] d = self.term.expect("hello world") d.addCallback(result.append) self.failIf(result) self.term.write("hello ") self.failIf(result) self.term.write("worl") self.failIf(result) self.term.write("d") self.failUnless(result) self.assertEqual(result[0].group(), "hello world") def testMultiple(self): result = [] d1 = self.term.expect("hello ") d1.addCallback(result.append) d2 = self.term.expect("world") d2.addCallback(result.append) self.failIf(result) self.term.write("hello") self.failIf(result) self.term.write(" ") self.assertEqual(len(result), 1) self.term.write("world") self.assertEqual(len(result), 2) self.assertEqual(result[0].group(), "hello ") self.assertEqual(result[1].group(), "world") def testSynchronous(self): self.term.write("hello world") result = [] d = self.term.expect("hello world") d.addCallback(result.append) self.failUnless(result) self.assertEqual(result[0].group(), "hello world") def testMultipleSynchronous(self): self.term.write("goodbye world") result = [] d1 = self.term.expect("bye") d1.addCallback(result.append) d2 = self.term.expect("world") d2.addCallback(result.append) self.assertEqual(len(result), 2) self.assertEqual(result[0].group(), "bye") self.assertEqual(result[1].group(), "world") def _cbTestTimeoutFailure(self, res): self.assert_(hasattr(res, 'type')) self.assertEqual(res.type, helper.ExpectationTimeout) def testTimeoutFailure(self): d = self.term.expect("hello world", timeout=1, scheduler=self.fs) d.addBoth(self._cbTestTimeoutFailure) self.fs.calls[0].call() def testOverlappingTimeout(self): self.term.write("not zoomtastic") result = [] d1 = self.term.expect("hello world", timeout=1, scheduler=self.fs) d1.addBoth(self._cbTestTimeoutFailure) d2 = self.term.expect("zoom") d2.addCallback(result.append) self.fs.calls[0].call() self.assertEqual(len(result), 1) self.assertEqual(result[0].group(), "zoom")
gpl-2.0
genkilife/qemu-linaro
scripts/qapi-visit.py
18
14554
# # QAPI visitor generator # # Copyright IBM, Corp. 2011 # # Authors: # Anthony Liguori <aliguori@us.ibm.com> # Michael Roth <mdroth@linux.vnet.ibm.com> # # This work is licensed under the terms of the GNU GPL, version 2. # See the COPYING file in the top-level directory. from ordereddict import OrderedDict from qapi import * import sys import os import getopt import errno def generate_visit_struct_fields(name, field_prefix, fn_prefix, members, base = None): substructs = [] ret = '' if not fn_prefix: full_name = name else: full_name = "%s_%s" % (name, fn_prefix) for argname, argentry, optional, structured in parse_args(members): if structured: if not fn_prefix: nested_fn_prefix = argname else: nested_fn_prefix = "%s_%s" % (fn_prefix, argname) nested_field_prefix = "%s%s." % (field_prefix, argname) ret += generate_visit_struct_fields(name, nested_field_prefix, nested_fn_prefix, argentry) ret += mcgen(''' static void visit_type_%(full_name)s_fields(Visitor *m, %(name)s ** obj, Error **errp) { Error *err = NULL; ''', name=name, full_name=full_name) push_indent() if base: ret += mcgen(''' visit_start_implicit_struct(m, (void**) &(*obj)->%(c_name)s, sizeof(%(type)s), &err); if (!err) { visit_type_%(type)s_fields(m, &(*obj)->%(c_prefix)s%(c_name)s, &err); error_propagate(errp, err); err = NULL; visit_end_implicit_struct(m, &err); } ''', c_prefix=c_var(field_prefix), type=type_name(base), c_name=c_var('base')) for argname, argentry, optional, structured in parse_args(members): if optional: ret += mcgen(''' visit_start_optional(m, &(*obj)->%(c_prefix)shas_%(c_name)s, "%(name)s", &err); if ((*obj)->%(prefix)shas_%(c_name)s) { ''', c_prefix=c_var(field_prefix), prefix=field_prefix, c_name=c_var(argname), name=argname) push_indent() if structured: ret += generate_visit_struct_body(full_name, argname, argentry) else: ret += mcgen(''' visit_type_%(type)s(m, &(*obj)->%(c_prefix)s%(c_name)s, "%(name)s", &err); ''', c_prefix=c_var(field_prefix), prefix=field_prefix, type=type_name(argentry), c_name=c_var(argname), name=argname) if optional: pop_indent() ret += mcgen(''' } visit_end_optional(m, &err); ''') pop_indent() ret += mcgen(''' error_propagate(errp, err); } ''') return ret def generate_visit_struct_body(field_prefix, name, members): ret = mcgen(''' if (!error_is_set(errp)) { ''') push_indent() if not field_prefix: full_name = name else: full_name = "%s_%s" % (field_prefix, name) if len(field_prefix): ret += mcgen(''' Error **errp = &err; /* from outer scope */ Error *err = NULL; visit_start_struct(m, NULL, "", "%(name)s", 0, &err); ''', name=name) else: ret += mcgen(''' Error *err = NULL; visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), &err); ''', name=name) ret += mcgen(''' if (!err) { if (*obj) { visit_type_%(name)s_fields(m, obj, &err); error_propagate(errp, err); err = NULL; } ''', name=full_name) pop_indent() ret += mcgen(''' /* Always call end_struct if start_struct succeeded. */ visit_end_struct(m, &err); } error_propagate(errp, err); } ''') return ret def generate_visit_struct(expr): name = expr['type'] members = expr['data'] base = expr.get('base') ret = generate_visit_struct_fields(name, "", "", members, base) ret += mcgen(''' void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp) { ''', name=name) push_indent() ret += generate_visit_struct_body("", name, members) pop_indent() ret += mcgen(''' } ''') return ret def generate_visit_list(name, members): return mcgen(''' void visit_type_%(name)sList(Visitor *m, %(name)sList ** obj, const char *name, Error **errp) { GenericList *i, **prev = (GenericList **)obj; Error *err = NULL; if (!error_is_set(errp)) { visit_start_list(m, name, &err); if (!err) { for (; (i = visit_next_list(m, prev, &err)) != NULL; prev = &i) { %(name)sList *native_i = (%(name)sList *)i; visit_type_%(name)s(m, &native_i->value, NULL, &err); } error_propagate(errp, err); err = NULL; /* Always call end_list if start_list succeeded. */ visit_end_list(m, &err); } error_propagate(errp, err); } } ''', name=name) def generate_visit_enum(name, members): return mcgen(''' void visit_type_%(name)s(Visitor *m, %(name)s * obj, const char *name, Error **errp) { visit_type_enum(m, (int *)obj, %(name)s_lookup, "%(name)s", name, errp); } ''', name=name) def generate_visit_anon_union(name, members): ret = mcgen(''' void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp) { Error *err = NULL; if (!error_is_set(errp)) { visit_start_implicit_struct(m, (void**) obj, sizeof(%(name)s), &err); visit_get_next_type(m, (int*) &(*obj)->kind, %(name)s_qtypes, name, &err); switch ((*obj)->kind) { ''', name=name) # For anon union, always use the default enum type automatically generated # as "'%sKind' % (name)" disc_type = '%sKind' % (name) for key in members: assert (members[key] in builtin_types or find_struct(members[key]) or find_union(members[key])), "Invalid anonymous union member" enum_full_value = generate_enum_full_value(disc_type, key) ret += mcgen(''' case %(enum_full_value)s: visit_type_%(c_type)s(m, &(*obj)->%(c_name)s, name, &err); break; ''', enum_full_value = enum_full_value, c_type = type_name(members[key]), c_name = c_fun(key)) ret += mcgen(''' default: abort(); } error_propagate(errp, err); err = NULL; visit_end_implicit_struct(m, &err); } } ''') return ret def generate_visit_union(expr): name = expr['union'] members = expr['data'] base = expr.get('base') discriminator = expr.get('discriminator') if discriminator == {}: assert not base return generate_visit_anon_union(name, members) enum_define = discriminator_find_enum_define(expr) if enum_define: # Use the enum type as discriminator ret = "" disc_type = enum_define['enum_name'] else: # There will always be a discriminator in the C switch code, by default it # is an enum type generated silently as "'%sKind' % (name)" ret = generate_visit_enum('%sKind' % name, members.keys()) disc_type = '%sKind' % (name) if base: base_fields = find_struct(base)['data'] if discriminator: base_fields = base_fields.copy() del base_fields[discriminator] ret += generate_visit_struct_fields(name, "", "", base_fields) ret += mcgen(''' void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp) { Error *err = NULL; if (!error_is_set(errp)) { visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), &err); if (!err) { if (*obj) { ''', name=name) push_indent() push_indent() push_indent() if base: ret += mcgen(''' visit_type_%(name)s_fields(m, obj, &err); ''', name=name) pop_indent() if not discriminator: disc_key = "type" else: disc_key = discriminator ret += mcgen(''' visit_type_%(disc_type)s(m, &(*obj)->kind, "%(disc_key)s", &err); if (!err) { switch ((*obj)->kind) { ''', disc_type = disc_type, disc_key = disc_key) for key in members: if not discriminator: fmt = 'visit_type_%(c_type)s(m, &(*obj)->%(c_name)s, "data", &err);' else: fmt = '''visit_start_implicit_struct(m, (void**) &(*obj)->%(c_name)s, sizeof(%(c_type)s), &err); if (!err) { visit_type_%(c_type)s_fields(m, &(*obj)->%(c_name)s, &err); error_propagate(errp, err); err = NULL; visit_end_implicit_struct(m, &err); }''' enum_full_value = generate_enum_full_value(disc_type, key) ret += mcgen(''' case %(enum_full_value)s: ''' + fmt + ''' break; ''', enum_full_value = enum_full_value, c_type=type_name(members[key]), c_name=c_fun(key)) ret += mcgen(''' default: abort(); } } error_propagate(errp, err); err = NULL; } ''') pop_indent() ret += mcgen(''' /* Always call end_struct if start_struct succeeded. */ visit_end_struct(m, &err); } error_propagate(errp, err); } ''') pop_indent(); ret += mcgen(''' } ''') return ret def generate_declaration(name, members, genlist=True, builtin_type=False): ret = "" if not builtin_type: ret += mcgen(''' void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp); ''', name=name) if genlist: ret += mcgen(''' void visit_type_%(name)sList(Visitor *m, %(name)sList ** obj, const char *name, Error **errp); ''', name=name) return ret def generate_enum_declaration(name, members, genlist=True): ret = "" if genlist: ret += mcgen(''' void visit_type_%(name)sList(Visitor *m, %(name)sList ** obj, const char *name, Error **errp); ''', name=name) return ret def generate_decl_enum(name, members, genlist=True): return mcgen(''' void visit_type_%(name)s(Visitor *m, %(name)s * obj, const char *name, Error **errp); ''', name=name) try: opts, args = getopt.gnu_getopt(sys.argv[1:], "chbp:o:", ["source", "header", "builtins", "prefix=", "output-dir="]) except getopt.GetoptError, err: print str(err) sys.exit(1) output_dir = "" prefix = "" c_file = 'qapi-visit.c' h_file = 'qapi-visit.h' do_c = False do_h = False do_builtins = False for o, a in opts: if o in ("-p", "--prefix"): prefix = a elif o in ("-o", "--output-dir"): output_dir = a + "/" elif o in ("-c", "--source"): do_c = True elif o in ("-h", "--header"): do_h = True elif o in ("-b", "--builtins"): do_builtins = True if not do_c and not do_h: do_c = True do_h = True c_file = output_dir + prefix + c_file h_file = output_dir + prefix + h_file try: os.makedirs(output_dir) except os.error, e: if e.errno != errno.EEXIST: raise def maybe_open(really, name, opt): if really: return open(name, opt) else: import StringIO return StringIO.StringIO() fdef = maybe_open(do_c, c_file, 'w') fdecl = maybe_open(do_h, h_file, 'w') fdef.write(mcgen(''' /* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */ /* * schema-defined QAPI visitor functions * * Copyright IBM, Corp. 2011 * * Authors: * Anthony Liguori <aliguori@us.ibm.com> * * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. * See the COPYING.LIB file in the top-level directory. * */ #include "qemu-common.h" #include "%(header)s" ''', header=basename(h_file))) fdecl.write(mcgen(''' /* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */ /* * schema-defined QAPI visitor function * * Copyright IBM, Corp. 2011 * * Authors: * Anthony Liguori <aliguori@us.ibm.com> * * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. * See the COPYING.LIB file in the top-level directory. * */ #ifndef %(guard)s #define %(guard)s #include "qapi/visitor.h" #include "%(prefix)sqapi-types.h" ''', prefix=prefix, guard=guardname(h_file))) exprs = parse_schema(sys.stdin) # to avoid header dependency hell, we always generate declarations # for built-in types in our header files and simply guard them fdecl.write(guardstart("QAPI_VISIT_BUILTIN_VISITOR_DECL")) for typename in builtin_types: fdecl.write(generate_declaration(typename, None, genlist=True, builtin_type=True)) fdecl.write(guardend("QAPI_VISIT_BUILTIN_VISITOR_DECL")) # ...this doesn't work for cases where we link in multiple objects that # have the functions defined, so we use -b option to provide control # over these cases if do_builtins: for typename in builtin_types: fdef.write(generate_visit_list(typename, None)) for expr in exprs: if expr.has_key('type'): ret = generate_visit_struct(expr) ret += generate_visit_list(expr['type'], expr['data']) fdef.write(ret) ret = generate_declaration(expr['type'], expr['data']) fdecl.write(ret) elif expr.has_key('union'): ret = generate_visit_union(expr) ret += generate_visit_list(expr['union'], expr['data']) fdef.write(ret) enum_define = discriminator_find_enum_define(expr) ret = "" if not enum_define: ret = generate_decl_enum('%sKind' % expr['union'], expr['data'].keys()) ret += generate_declaration(expr['union'], expr['data']) fdecl.write(ret) elif expr.has_key('enum'): ret = generate_visit_list(expr['enum'], expr['data']) ret += generate_visit_enum(expr['enum'], expr['data']) fdef.write(ret) ret = generate_decl_enum(expr['enum'], expr['data']) ret += generate_enum_declaration(expr['enum'], expr['data']) fdecl.write(ret) fdecl.write(''' #endif ''') fdecl.flush() fdecl.close() fdef.flush() fdef.close()
gpl-2.0
amohanta/thug
src/DOM/W3C/HTML/HTMLIFrameElement.py
8
1114
#!/usr/bin/env python import logging from .HTMLElement import HTMLElement from .attr_property import attr_property log = logging.getLogger("Thug") class HTMLIFrameElement(HTMLElement): def __init__(self, doc, tag): HTMLElement.__init__(self, doc, tag) align = attr_property("align") frameBorder = attr_property("frameborder") height = attr_property("height") longDesc = attr_property("longdesc") marginHeight = attr_property("marginheight") marginWidth = attr_property("marginwidth") name = attr_property("name") scrolling = attr_property("scrolling") src = attr_property("src") width = attr_property("width") # Introduced in DOM Level 2 @property def contentDocument(self): return self.doc if self.doc else None @property def contentWindow(self): if self.id in log.ThugLogging.windows: return log.ThugLogging.windows[self.id] if self.doc is None: return None return getattr(self.doc, 'window', None)
gpl-2.0
p0cisk/Quantum-GIS
python/plugins/processing/gui/RenderingStyles.py
4
2746
# -*- coding: utf-8 -*- """ *************************************************************************** RenderingStyles.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ from builtins import object __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from processing.tools.system import userFolder class RenderingStyles(object): styles = {} @staticmethod def addAlgStylesAndSave(algname, styles): RenderingStyles.styles[algname] = styles RenderingStyles.saveSettings() @staticmethod def configFile(): return os.path.join(userFolder(), 'processing_qgis_styles.conf') @staticmethod def loadStyles(): if not os.path.isfile(RenderingStyles.configFile()): return lines = open(RenderingStyles.configFile()) line = lines.readline().strip('\n') while line != '': tokens = line.split('|') if tokens[0] in list(RenderingStyles.styles.keys()): RenderingStyles.styles[tokens[0]][tokens[1]] = tokens[2] else: alg = {} alg[tokens[1]] = tokens[2] RenderingStyles.styles[tokens[0]] = alg line = lines.readline().strip('\n') lines.close() @staticmethod def saveSettings(): fout = open(RenderingStyles.configFile(), 'w') for alg in list(RenderingStyles.styles.keys()): for out in list(RenderingStyles.styles[alg].keys()): fout.write(alg + '|' + out + '|' + RenderingStyles.styles[alg][out] + '\n') fout.close() @staticmethod def getStyle(algname, outputname): if algname in RenderingStyles.styles: if outputname in RenderingStyles.styles[algname]: return RenderingStyles.styles[algname][outputname] return None
gpl-2.0
rmetzger/flink
flink-python/pyflink/dataset/__init__.py
20
1234
################################################################################ # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ """ Important classes of Flink Batch API: - :class:`ExecutionEnvironment`: The ExecutionEnvironment is the context in which a batch program is executed. """ from pyflink.dataset.execution_environment import ExecutionEnvironment __all__ = ['ExecutionEnvironment']
apache-2.0
pymanopt/pymanopt
tests/test_multi_tools.py
2
2999
import numpy as np from numpy import linalg as la, random as rnd, testing as np_testing from scipy.linalg import expm, logm from pymanopt.tools.multi import (multiexp, multieye, multilog, multiprod, multisym, multitransp) from ._test import TestCase class TestMulti(TestCase): def setUp(self): self.m = 40 self.n = 50 self.p = 40 self.k = 10 def test_multiprod_singlemat(self): # Two random matrices A (m x n) and B (n x p) A = rnd.randn(self.m, self.n) B = rnd.randn(self.n, self.p) # Compare the products. np_testing.assert_allclose(A.dot(B), multiprod(A, B)) def test_multiprod(self): # Two random arrays of matrices A (k x m x n) and B (k x n x p) A = rnd.randn(self.k, self.m, self.n) B = rnd.randn(self.k, self.n, self.p) C = np.zeros((self.k, self.m, self.p)) for i in range(self.k): C[i] = A[i].dot(B[i]) np_testing.assert_allclose(C, multiprod(A, B)) def test_multitransp_singlemat(self): A = rnd.randn(self.m, self.n) np_testing.assert_array_equal(A.T, multitransp(A)) def test_multitransp(self): A = rnd.randn(self.k, self.m, self.n) C = np.zeros((self.k, self.n, self.m)) for i in range(self.k): C[i] = A[i].T np_testing.assert_array_equal(C, multitransp(A)) def test_multisym(self): A = rnd.randn(self.k, self.m, self.m) C = np.zeros((self.k, self.m, self.m)) for i in range(self.k): C[i] = .5 * (A[i] + A[i].T) np.testing.assert_allclose(C, multisym(A)) def test_multieye(self): A = np.zeros((self.k, self.n, self.n)) for i in range(self.k): A[i] = np.eye(self.n) np_testing.assert_allclose(A, multieye(self.k, self.n)) def test_multilog_singlemat(self): a = np.diag(rnd.rand(self.m)) q, r = la.qr(rnd.randn(self.m, self.m)) # A is a positive definite matrix A = q.dot(a.dot(q.T)) np_testing.assert_allclose(multilog(A, pos_def=True), logm(A)) def test_multilog(self): A = np.zeros((self.k, self.m, self.m)) L = np.zeros((self.k, self.m, self.m)) for i in range(self.k): a = np.diag(rnd.rand(self.m)) q, r = la.qr(rnd.randn(self.m, self.m)) A[i] = q.dot(a.dot(q.T)) L[i] = logm(A[i]) np_testing.assert_allclose(multilog(A, pos_def=True), L) def test_multiexp_singlemat(self): # A is a positive definite matrix A = rnd.randn(self.m, self.m) A = A + A.T np_testing.assert_allclose(multiexp(A, sym=True), expm(A)) def test_multiexp(self): A = multisym(rnd.randn(self.k, self.m, self.m)) e = np.zeros((self.k, self.m, self.m)) for i in range(self.k): e[i] = expm(A[i]) np_testing.assert_allclose(multiexp(A, sym=True), e)
bsd-3-clause
danieljaouen/ansible
lib/ansible/modules/windows/win_path.py
52
3082
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2016, Red Hat | Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # This is a windows documentation stub. Actual code lives in the .ps1 # file of the same name ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'core'} DOCUMENTATION = ''' --- module: win_path version_added: "2.3" short_description: Manage Windows path environment variables description: - Allows element-based ordering, addition, and removal of Windows path environment variables. options: name: description: - Target path environment variable name. default: PATH elements: description: - A single path element, or a list of path elements (ie, directories) to add or remove. - When multiple elements are included in the list (and C(state) is C(present)), the elements are guaranteed to appear in the same relative order in the resultant path value. - Variable expansions (eg, C(%VARNAME%)) are allowed, and are stored unexpanded in the target path element. - Any existing path elements not mentioned in C(elements) are always preserved in their current order. - New path elements are appended to the path, and existing path elements may be moved closer to the end to satisfy the requested ordering. - Paths are compared in a case-insensitive fashion, and trailing backslashes are ignored for comparison purposes. However, note that trailing backslashes in YAML require quotes. required: yes state: description: - Whether the path elements specified in C(elements) should be present or absent. choices: [ absent, present ] scope: description: - The level at which the environment variable specified by C(name) should be managed (either for the current user or global machine scope). choices: [ machine, user ] default: machine author: - Matt Davis (@nitzmahone) notes: - This module is for modifying indidvidual elements of path-like environment variables. For general-purpose management of other environment vars, use the M(win_environment) module. - This module does not broadcast change events. This means that the minority of windows applications which can have their environment changed without restarting will not be notified and therefore will need restarting to pick up new environment settings. User level environment variables will require an interactive user to log out and in again before they become available. ''' EXAMPLES = r''' - name: Ensure that system32 and Powershell are present on the global system path, and in the specified order win_path: elements: - '%SystemRoot%\system32' - '%SystemRoot%\system32\WindowsPowerShell\v1.0' - name: Ensure that C:\Program Files\MyJavaThing is not on the current user's CLASSPATH win_path: name: CLASSPATH elements: C:\Program Files\MyJavaThing scope: user state: absent '''
gpl-3.0
kenshay/ImageScripter
ProgramData/SystemFiles/Python/Lib/site-packages/jupyter_client/localinterfaces.py
12
7872
"""Utilities for identifying local IP addresses.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import os import re import socket import subprocess from subprocess import Popen, PIPE from warnings import warn LOCAL_IPS = [] PUBLIC_IPS = [] LOCALHOST = '' def _uniq_stable(elems): """uniq_stable(elems) -> list Return from an iterable, a list of all the unique elements in the input, maintaining the order in which they first appear. From ipython_genutils.data """ seen = set() return [x for x in elems if x not in seen and not seen.add(x)] def _get_output(cmd): """Get output of a command, raising IOError if it fails""" startupinfo = None if os.name == 'nt': startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW p = Popen(cmd, stdout=PIPE, stderr=PIPE, startupinfo=startupinfo) stdout, stderr = p.communicate() if p.returncode: raise IOError("Failed to run %s: %s" % (cmd, stderr.decode('utf8', 'replace'))) return stdout.decode('utf8', 'replace') def _only_once(f): """decorator to only run a function once""" f.called = False def wrapped(**kwargs): if f.called: return ret = f(**kwargs) f.called = True return ret return wrapped def _requires_ips(f): """decorator to ensure load_ips has been run before f""" def ips_loaded(*args, **kwargs): _load_ips() return f(*args, **kwargs) return ips_loaded # subprocess-parsing ip finders class NoIPAddresses(Exception): pass def _populate_from_list(addrs): """populate local and public IPs from flat list of all IPs""" if not addrs: raise NoIPAddresses global LOCALHOST public_ips = [] local_ips = [] for ip in addrs: local_ips.append(ip) if not ip.startswith('127.'): public_ips.append(ip) elif not LOCALHOST: LOCALHOST = ip if not LOCALHOST: LOCALHOST = '127.0.0.1' local_ips.insert(0, LOCALHOST) local_ips.extend(['0.0.0.0', '']) LOCAL_IPS[:] = _uniq_stable(local_ips) PUBLIC_IPS[:] = _uniq_stable(public_ips) _ifconfig_ipv4_pat = re.compile(r'inet\b.*?(\d+\.\d+\.\d+\.\d+)', re.IGNORECASE) def _load_ips_ifconfig(): """load ip addresses from `ifconfig` output (posix)""" try: out = _get_output('ifconfig') except (IOError, OSError): # no ifconfig, it's usually in /sbin and /sbin is not on everyone's PATH out = _get_output('/sbin/ifconfig') lines = out.splitlines() addrs = [] for line in lines: m = _ifconfig_ipv4_pat.match(line.strip()) if m: addrs.append(m.group(1)) _populate_from_list(addrs) def _load_ips_ip(): """load ip addresses from `ip addr` output (Linux)""" out = _get_output(['ip', '-f', 'inet', 'addr']) lines = out.splitlines() addrs = [] for line in lines: blocks = line.lower().split() if (len(blocks) >= 2) and (blocks[0] == 'inet'): addrs.append(blocks[1].split('/')[0]) _populate_from_list(addrs) _ipconfig_ipv4_pat = re.compile(r'ipv4.*?(\d+\.\d+\.\d+\.\d+)$', re.IGNORECASE) def _load_ips_ipconfig(): """load ip addresses from `ipconfig` output (Windows)""" out = _get_output('ipconfig') lines = out.splitlines() addrs = [] for line in lines: m = _ipconfig_ipv4_pat.match(line.strip()) if m: addrs.append(m.group(1)) _populate_from_list(addrs) def _load_ips_netifaces(): """load ip addresses with netifaces""" import netifaces global LOCALHOST local_ips = [] public_ips = [] # list of iface names, 'lo0', 'eth0', etc. for iface in netifaces.interfaces(): # list of ipv4 addrinfo dicts ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, []) for entry in ipv4s: addr = entry.get('addr') if not addr: continue if not (iface.startswith('lo') or addr.startswith('127.')): public_ips.append(addr) elif not LOCALHOST: LOCALHOST = addr local_ips.append(addr) if not LOCALHOST: # we never found a loopback interface (can this ever happen?), assume common default LOCALHOST = '127.0.0.1' local_ips.insert(0, LOCALHOST) local_ips.extend(['0.0.0.0', '']) LOCAL_IPS[:] = _uniq_stable(local_ips) PUBLIC_IPS[:] = _uniq_stable(public_ips) def _load_ips_gethostbyname(): """load ip addresses with socket.gethostbyname_ex This can be slow. """ global LOCALHOST try: LOCAL_IPS[:] = socket.gethostbyname_ex('localhost')[2] except socket.error: # assume common default LOCAL_IPS[:] = ['127.0.0.1'] try: hostname = socket.gethostname() PUBLIC_IPS[:] = socket.gethostbyname_ex(hostname)[2] # try hostname.local, in case hostname has been short-circuited to loopback if not hostname.endswith('.local') and all(ip.startswith('127') for ip in PUBLIC_IPS): PUBLIC_IPS[:] = socket.gethostbyname_ex(socket.gethostname() + '.local')[2] except socket.error: pass finally: PUBLIC_IPS[:] = _uniq_stable(PUBLIC_IPS) LOCAL_IPS.extend(PUBLIC_IPS) # include all-interface aliases: 0.0.0.0 and '' LOCAL_IPS.extend(['0.0.0.0', '']) LOCAL_IPS[:] = _uniq_stable(LOCAL_IPS) LOCALHOST = LOCAL_IPS[0] def _load_ips_dumb(): """Fallback in case of unexpected failure""" global LOCALHOST LOCALHOST = '127.0.0.1' LOCAL_IPS[:] = [LOCALHOST, '0.0.0.0', ''] PUBLIC_IPS[:] = [] @_only_once def _load_ips(suppress_exceptions=True): """load the IPs that point to this machine This function will only ever be called once. It will use netifaces to do it quickly if available. Then it will fallback on parsing the output of ifconfig / ip addr / ipconfig, as appropriate. Finally, it will fallback on socket.gethostbyname_ex, which can be slow. """ try: # first priority, use netifaces try: return _load_ips_netifaces() except ImportError: pass # second priority, parse subprocess output (how reliable is this?) if os.name == 'nt': try: return _load_ips_ipconfig() except (IOError, NoIPAddresses): pass else: try: return _load_ips_ip() except (IOError, OSError, NoIPAddresses): pass try: return _load_ips_ifconfig() except (IOError, OSError, NoIPAddresses): pass # lowest priority, use gethostbyname return _load_ips_gethostbyname() except Exception as e: if not suppress_exceptions: raise # unexpected error shouldn't crash, load dumb default values instead. warn("Unexpected error discovering local network interfaces: %s" % e) _load_ips_dumb() @_requires_ips def local_ips(): """return the IP addresses that point to this machine""" return LOCAL_IPS @_requires_ips def public_ips(): """return the IP addresses for this machine that are visible to other machines""" return PUBLIC_IPS @_requires_ips def localhost(): """return ip for localhost (almost always 127.0.0.1)""" return LOCALHOST @_requires_ips def is_local_ip(ip): """does `ip` point to this machine?""" return ip in LOCAL_IPS @_requires_ips def is_public_ip(ip): """is `ip` a publicly visible address?""" return ip in PUBLIC_IPS
gpl-3.0
certik/sympy-oldcore
sympy/plotting/pyglet/window/xlib/xinerama.py
5
4026
# ---------------------------------------------------------------------------- # pyglet # Copyright (c) 2006-2007 Alex Holkner # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of the pyglet nor the names of its # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------------- '''Wrapper for Xinerama Generated with: tools/genwrappers.py Do not modify this file. ''' __docformat__ = 'restructuredtext' __version__ = '$Id: xinerama.py 1033 2007-07-13 03:38:16Z Alex.Holkner $' import ctypes from ctypes import * import pyglet.lib _lib = pyglet.lib.load_library('Xinerama') _int_types = (c_int16, c_int32) if hasattr(ctypes, 'c_int64'): # Some builds of ctypes apparently do not have c_int64 # defined; it's a pretty good bet that these builds do not # have 64-bit pointers. _int_types += (ctypes.c_int64,) for t in _int_types: if sizeof(t) == sizeof(c_size_t): c_ptrdiff_t = t class c_void(Structure): # c_void_p is a buggy return type, converting to int, so # POINTER(None) == c_void_p is actually written as # POINTER(c_void), so it can be treated as a real pointer. _fields_ = [('dummy', c_int)] import pyglet.gl.glx import pyglet.window.xlib.xlib class struct_anon_181(Structure): __slots__ = [ 'screen_number', 'x_org', 'y_org', 'width', 'height', ] struct_anon_181._fields_ = [ ('screen_number', c_int), ('x_org', c_short), ('y_org', c_short), ('width', c_short), ('height', c_short), ] XineramaScreenInfo = struct_anon_181 # /usr/include/X11/extensions/Xinerama.h:40 Display = pyglet.gl.glx.Display # /usr/include/X11/extensions/Xinerama.h:44 XineramaQueryExtension = _lib.XineramaQueryExtension XineramaQueryExtension.restype = c_int XineramaQueryExtension.argtypes = [POINTER(Display), POINTER(c_int), POINTER(c_int)] # /usr/include/X11/extensions/Xinerama.h:50 XineramaQueryVersion = _lib.XineramaQueryVersion XineramaQueryVersion.restype = c_int XineramaQueryVersion.argtypes = [POINTER(Display), POINTER(c_int), POINTER(c_int)] # /usr/include/X11/extensions/Xinerama.h:56 XineramaIsActive = _lib.XineramaIsActive XineramaIsActive.restype = c_int XineramaIsActive.argtypes = [POINTER(Display)] # /usr/include/X11/extensions/Xinerama.h:67 XineramaQueryScreens = _lib.XineramaQueryScreens XineramaQueryScreens.restype = POINTER(XineramaScreenInfo) XineramaQueryScreens.argtypes = [POINTER(Display), POINTER(c_int)] __all__ = ['XineramaScreenInfo', 'XineramaQueryExtension', 'XineramaQueryVersion', 'XineramaIsActive', 'XineramaQueryScreens']
bsd-3-clause
Geoion/urllib3
dummyserver/server.py
5
8100
#!/usr/bin/env python """ Dummy server used for unit testing. """ from __future__ import print_function import errno import logging import os import random import string import sys import threading import socket import warnings from urllib3.exceptions import HTTPWarning from tornado.platform.auto import set_close_exec import tornado.httpserver import tornado.ioloop import tornado.web log = logging.getLogger(__name__) CERTS_PATH = os.path.join(os.path.dirname(__file__), 'certs') DEFAULT_CERTS = { 'certfile': os.path.join(CERTS_PATH, 'server.crt'), 'keyfile': os.path.join(CERTS_PATH, 'server.key'), } NO_SAN_CERTS = { 'certfile': os.path.join(CERTS_PATH, 'server.no_san.crt'), 'keyfile': DEFAULT_CERTS['keyfile'] } DEFAULT_CA = os.path.join(CERTS_PATH, 'cacert.pem') DEFAULT_CA_BAD = os.path.join(CERTS_PATH, 'client_bad.pem') NO_SAN_CA = os.path.join(CERTS_PATH, 'cacert.no_san.pem') DEFAULT_CA_DIR = os.path.join(CERTS_PATH, 'ca_path_test') def _has_ipv6(host): """ Returns True if the system can bind an IPv6 address. """ sock = None has_ipv6 = False if socket.has_ipv6: # has_ipv6 returns true if cPython was compiled with IPv6 support. # It does not tell us if the system has IPv6 support enabled. To # determine that we must bind to an IPv6 address. # https://github.com/shazow/urllib3/pull/611 # https://bugs.python.org/issue658327 try: sock = socket.socket(socket.AF_INET6) sock.bind((host, 0)) has_ipv6 = True except: pass if sock: sock.close() return has_ipv6 # Some systems may have IPv6 support but DNS may not be configured # properly. We can not count that localhost will resolve to ::1 on all # systems. See https://github.com/shazow/urllib3/pull/611 and # https://bugs.python.org/issue18792 HAS_IPV6_AND_DNS = _has_ipv6('localhost') HAS_IPV6 = _has_ipv6('::1') # Different types of servers we have: class NoIPv6Warning(HTTPWarning): "IPv6 is not available" pass class SocketServerThread(threading.Thread): """ :param socket_handler: Callable which receives a socket argument for one request. :param ready_event: Event which gets set when the socket handler is ready to receive requests. """ def __init__(self, socket_handler, host='localhost', port=8081, ready_event=None): threading.Thread.__init__(self) self.daemon = True self.socket_handler = socket_handler self.host = host self.ready_event = ready_event def _start_server(self): if HAS_IPV6_AND_DNS: sock = socket.socket(socket.AF_INET6) else: warnings.warn("No IPv6 support. Falling back to IPv4.", NoIPv6Warning) sock = socket.socket(socket.AF_INET) if sys.platform != 'win32': sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((self.host, 0)) self.port = sock.getsockname()[1] # Once listen() returns, the server socket is ready sock.listen(0) if self.ready_event: self.ready_event.set() self.socket_handler(sock) sock.close() def run(self): self.server = self._start_server() # FIXME: there is a pull request patching bind_sockets in Tornado directly. # If it gets merged and released we can drop this and use # `tornado.netutil.bind_sockets` again. # https://github.com/facebook/tornado/pull/977 def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128, flags=None): """Creates listening sockets bound to the given port and address. Returns a list of socket objects (multiple sockets are returned if the given address maps to multiple IP addresses, which is most common for mixed IPv4 and IPv6 use). Address may be either an IP address or hostname. If it's a hostname, the server will listen on all IP addresses associated with the name. Address may be an empty string or None to listen on all available interfaces. Family may be set to either `socket.AF_INET` or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise both will be used if available. The ``backlog`` argument has the same meaning as for `socket.listen() <socket.socket.listen>`. ``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like ``socket.AI_PASSIVE | socket.AI_NUMERICHOST``. """ sockets = [] if address == "": address = None if not HAS_IPV6 and family == socket.AF_UNSPEC: # Python can be compiled with --disable-ipv6, which causes # operations on AF_INET6 sockets to fail, but does not # automatically exclude those results from getaddrinfo # results. # http://bugs.python.org/issue16208 family = socket.AF_INET if flags is None: flags = socket.AI_PASSIVE binded_port = None for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_STREAM, 0, flags)): af, socktype, proto, canonname, sockaddr = res try: sock = socket.socket(af, socktype, proto) except socket.error as e: if e.args[0] == errno.EAFNOSUPPORT: continue raise set_close_exec(sock.fileno()) if os.name != 'nt': sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if af == socket.AF_INET6: # On linux, ipv6 sockets accept ipv4 too by default, # but this makes it impossible to bind to both # 0.0.0.0 in ipv4 and :: in ipv6. On other systems, # separate sockets *must* be used to listen for both ipv4 # and ipv6. For consistency, always disable ipv4 on our # ipv6 sockets and use a separate ipv4 socket when needed. # # Python 2.x on windows doesn't have IPPROTO_IPV6. if hasattr(socket, "IPPROTO_IPV6"): sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) # automatic port allocation with port=None # should bind on the same port on IPv4 and IPv6 host, requested_port = sockaddr[:2] if requested_port == 0 and binded_port is not None: sockaddr = tuple([host, binded_port] + list(sockaddr[2:])) sock.setblocking(0) sock.bind(sockaddr) binded_port = sock.getsockname()[1] sock.listen(backlog) sockets.append(sock) return sockets def run_tornado_app(app, io_loop, certs, scheme, host): if scheme == 'https': http_server = tornado.httpserver.HTTPServer(app, ssl_options=certs, io_loop=io_loop) else: http_server = tornado.httpserver.HTTPServer(app, io_loop=io_loop) sockets = bind_sockets(None, address=host) port = sockets[0].getsockname()[1] http_server.add_sockets(sockets) return http_server, port def run_loop_in_thread(io_loop): t = threading.Thread(target=io_loop.start) t.start() return t def get_unreachable_address(): while True: host = ''.join(random.choice(string.ascii_lowercase) for _ in range(60)) sockaddr = (host, 54321) # check if we are really "lucky" and hit an actual server try: s = socket.create_connection(sockaddr) except socket.error: return sockaddr else: s.close() if __name__ == '__main__': # For debugging dummyserver itself - python -m dummyserver.server from .testcase import TestingApp host = '127.0.0.1' io_loop = tornado.ioloop.IOLoop() app = tornado.web.Application([(r".*", TestingApp)]) server, port = run_tornado_app(app, io_loop, None, 'http', host) server_thread = run_loop_in_thread(io_loop) print("Listening on http://{host}:{port}".format(host=host, port=port))
mit
robofit/ar-table-itable
art_projected_gui/src/art_projected_gui/items/icon_item.py
6
1099
#!/usr/bin/env python from PyQt4 import QtGui, QtCore, QtSvg from item import Item import rospy class IconItem(Item): def __init__( self, scene, x, y, w, h, fn, fixed=False): self.img = None self.w = 0 self.h = 0 super(IconItem, self).__init__(scene, x, y) self.icon = QtSvg.QGraphicsSvgItem(fn, self) self.setCacheMode(QtGui.QGraphicsItem.ItemCoordinateCache) self.setZValue(100) self.fixed = fixed self.setFlag(QtGui.QGraphicsItem.ItemIsMovable, True) self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable, True) self.w = self.m2pix(w) self.h = self.m2pix(h) self.icon.setScale(min(self.boundingRect().height() / self.icon.boundingRect().height(), self.boundingRect().width() / self.icon.boundingRect().width())) def boundingRect(self): return QtCore.QRectF(0, 0, self.w, self.h) def paint(self, painter, option, widget): pass
lgpl-2.1
wshallum/ansible
lib/ansible/modules/network/cumulus/cl_interface.py
1
15111
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com> # # This file is part of Ansible # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: cl_interface version_added: "2.1" author: "Cumulus Networks (@CumulusNetworks)" short_description: Configures a front panel port, loopback or management port on Cumulus Linux. description: - Configures a front panel, sub-interface, SVI, management or loopback port on a Cumulus Linux switch. For bridge ports use the cl_bridge module. For bond ports use the cl_bond module. When configuring bridge related features like the "vid" option, please follow the guidelines for configuring "vlan aware" bridging. For more details review the Layer2 Interface Guide at U(http://docs.cumulusnetworks.com) options: name: description: - Name of the interface. required: true alias_name: description: - Description of the port. ipv4: description: - List of IPv4 addresses to configure on the interface. In the form I(X.X.X.X/YY). ipv6: description: - List of IPv6 addresses to configure on the interface. In the form I(X:X:X::X/YYY). addr_method: description: - Address method. choices: - loopback - dhcp speed: description: - Set speed of the swp(front panel) or management(eth0) interface. speed is in MB. mtu: description: - Set MTU. Configure Jumbo Frame by setting MTU to I(9000). virtual_ip: description: - Define IPv4 virtual IP used by the Cumulus Linux VRR feature. virtual_mac: description: - Define Ethernet mac associated with Cumulus Linux VRR feature. vids: description: - In vlan-aware mode, lists VLANs defined under the interface. mstpctl_bpduguard: description: - Enables BPDU Guard on a port in vlan-aware mode. mstpctl_portnetwork: description: - Enables bridge assurance in vlan-aware mode. mstpctl_portadminedge: description: - Enables admin edge port. clagd_enable: description: - Enables the clagd daemon. This command should only be applied to the clag peerlink interface. clagd_priority: description: - Integer that changes the role the switch has in the clag domain. The lower priority switch will assume the primary role. The number can be between 0 and 65535. clagd_peer_ip: description: - IP address of the directly connected peer switch interface. clagd_sys_mac: description: - Clagd system mac address. Recommended to use the range starting with 44:38:39:ff. Needs to be the same between 2 Clag switches. pvid: description: - In vlan-aware mode, defines vlan that is the untagged vlan. location: description: - Interface directory location default: - '/etc/network/interfaces.d' requirements: [ Alternate Debian network interface manager - \ ifupdown2 @ github.com/CumulusNetworks/ifupdown2 ] notes: - As this module writes the interface directory location, ensure that ``/etc/network/interfaces`` has a 'source /etc/network/interfaces.d/\*' or whatever path is mentioned in the ``location`` attribute. - For the config to be activated, i.e installed in the kernel, "service networking reload" needs be be executed. See EXAMPLES section. ''' EXAMPLES = ''' # Options ['virtual_mac', 'virtual_ip'] are required together - name: Configure a front panel port with an IP cl_interface: name: swp1 ipv4: 10.1.1.1/24 notify: reload networking - name: Configure front panel to use DHCP cl_interface: name: swp2 addr_family: dhcp notify: reload networking - name: Configure a SVI for vlan 100 interface with an IP cl_interface: name: bridge.100 ipv4: 10.1.1.1/24 notify: reload networking - name: Configure subinterface with an IP cl_interface: name: bond0.100 alias_name: my bond ipv4: 10.1.1.1/24 notify: reload networking # define cl_interfaces once in tasks # then write interfaces in variables file # with just the options you want. - name: Create interfaces cl_interface: name: '{{ item.key }}' ipv4: '{{ item.value.ipv4 | default(omit) }}' ipv6: '{{ item.value.ipv6 | default(omit) }}' alias_name: '{{ item.value.alias_name | default(omit) }}' addr_method: '{{ item.value.addr_method | default(omit) }}' speed: '{{ item.value.link_speed | default(omit) }}' mtu: '{{ item.value.mtu | default(omit) }}' clagd_enable: '{{ item.value.clagd_enable | default(omit) }}' clagd_peer_ip: '{{ item.value.clagd_peer_ip | default(omit) }}' clagd_sys_mac: '{{ item.value.clagd_sys_mac | default(omit) }}' clagd_priority: '{{ item.value.clagd_priority | default(omit) }}' vids: '{{ item.value.vids | default(omit) }}' virtual_ip: '{{ item.value.virtual_ip | default(omit) }}' virtual_mac: '{{ item.value.virtual_mac | default(omit) }}' mstpctl_portnetwork: "{{ item.value.mstpctl_portnetwork | default('no') }}" mstpctl_portadminedge: "{{ item.value.mstpctl_portadminedge | default('no') }}" mstpctl_bpduguard: "{{ item.value.mstpctl_bpduguard | default('no') }}" with_dict: '{{ cl_interfaces }}' notify: reload networking # In vars file # ============ cl_interfaces: swp1: alias_name: uplink to isp ipv4: 10.1.1.1/24 swp2: alias_name: l2 trunk connection vids: - 1 - 50 swp3: speed: 1000 alias_name: connects to 1G link ########## # br0 interface is configured by cl_bridge ########## br0.100: alias_name: SVI for vlan 100 ipv4: 10.2.2.2/24 ipv6: '10:2:2::2/127' virtual_ip: 10.2.2.254 virtual_mac: 00:00:5E:00:10:10 ''' RETURN = ''' changed: description: whether the interface was changed returned: changed type: bool sample: True msg: description: human-readable report of success or failure returned: always type: string sample: "interface bond0 config updated" ''' # handy helper for calling system calls. # calls AnsibleModule.run_command and prints a more appropriate message # exec_path - path to file to execute, with all its arguments. # E.g "/sbin/ip -o link show" # failure_msg - what message to print on failure def run_cmd(module, exec_path): (_rc, out, _err) = module.run_command(exec_path) if _rc > 0: if re.search('cannot find interface', _err): return '[{}]' failure_msg = "Failed; %s Error: %s" % (exec_path, _err) module.fail_json(msg=failure_msg) else: return out def current_iface_config(module): # due to a bug in ifquery, have to check for presence of interface file # and not rely solely on ifquery. when bug is fixed, this check can be # removed _ifacename = module.params.get('name') _int_dir = module.params.get('location') module.custom_current_config = {} if os.path.exists(_int_dir + '/' + _ifacename): _cmd = "/sbin/ifquery -o json %s" % (module.params.get('name')) module.custom_current_config = module.from_json( run_cmd(module, _cmd))[0] def build_address(module): # if addr_method == 'dhcp', dont add IP address if module.params.get('addr_method') == 'dhcp': return _ipv4 = module.params.get('ipv4') _ipv6 = module.params.get('ipv6') _addresslist = [] if _ipv4 and len(_ipv4) > 0: _addresslist += _ipv4 if _ipv6 and len(_ipv6) > 0: _addresslist += _ipv6 if len(_addresslist) > 0: module.custom_desired_config['config']['address'] = ' '.join( _addresslist) def build_vids(module): _vids = module.params.get('vids') if _vids and len(_vids) > 0: module.custom_desired_config['config']['bridge-vids'] = ' '.join(_vids) def build_pvid(module): _pvid = module.params.get('pvid') if _pvid: module.custom_desired_config['config']['bridge-pvid'] = str(_pvid) def build_speed(module): _speed = module.params.get('speed') if _speed: module.custom_desired_config['config']['link-speed'] = str(_speed) module.custom_desired_config['config']['link-duplex'] = 'full' def conv_bool_to_str(_value): if isinstance(_value, bool): if _value is True: return 'yes' else: return 'no' return _value def build_generic_attr(module, _attr): _value = module.params.get(_attr) _value = conv_bool_to_str(_value) if _value: module.custom_desired_config['config'][ re.sub('_', '-', _attr)] = str(_value) def build_alias_name(module): alias_name = module.params.get('alias_name') if alias_name: module.custom_desired_config['config']['alias'] = alias_name def build_addr_method(module): _addr_method = module.params.get('addr_method') if _addr_method: module.custom_desired_config['addr_family'] = 'inet' module.custom_desired_config['addr_method'] = _addr_method def build_vrr(module): _virtual_ip = module.params.get('virtual_ip') _virtual_mac = module.params.get('virtual_mac') vrr_config = [] if _virtual_ip: vrr_config.append(_virtual_mac) vrr_config.append(_virtual_ip) module.custom_desired_config.get('config')['address-virtual'] = \ ' '.join(vrr_config) def build_desired_iface_config(module): """ take parameters defined and build ifupdown2 compatible hash """ module.custom_desired_config = { 'addr_family': None, 'auto': True, 'config': {}, 'name': module.params.get('name') } build_addr_method(module) build_address(module) build_vids(module) build_pvid(module) build_speed(module) build_alias_name(module) build_vrr(module) for _attr in ['mtu', 'mstpctl_portnetwork', 'mstpctl_portadminedge', 'mstpctl_bpduguard', 'clagd_enable', 'clagd_priority', 'clagd_peer_ip', 'clagd_sys_mac', 'clagd_args']: build_generic_attr(module, _attr) def config_dict_changed(module): """ return true if 'config' dict in hash is different between desired and current config """ current_config = module.custom_current_config.get('config') desired_config = module.custom_desired_config.get('config') return current_config != desired_config def config_changed(module): """ returns true if config has changed """ if config_dict_changed(module): return True # check if addr_method is changed return module.custom_desired_config.get('addr_method') != \ module.custom_current_config.get('addr_method') def replace_config(module): temp = tempfile.NamedTemporaryFile() desired_config = module.custom_desired_config # by default it will be something like /etc/network/interfaces.d/swp1 final_location = module.params.get('location') + '/' + \ module.params.get('name') final_text = '' _fh = open(final_location, 'w') # make sure to put hash in array or else ifquery will fail # write to temp file try: temp.write(module.jsonify([desired_config])) # need to seek to 0 so that data is written to tempfile. temp.seek(0) _cmd = "/sbin/ifquery -a -i %s -t json" % (temp.name) final_text = run_cmd(module, _cmd) finally: temp.close() try: _fh.write(final_text) finally: _fh.close() def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True, type='str'), ipv4=dict(type='list'), ipv6=dict(type='list'), alias_name=dict(type='str'), addr_method=dict(type='str', choices=['', 'loopback', 'dhcp']), speed=dict(type='str'), mtu=dict(type='str'), virtual_ip=dict(type='str'), virtual_mac=dict(type='str'), vids=dict(type='list'), pvid=dict(type='str'), mstpctl_portnetwork=dict(type='bool', choices=BOOLEANS), mstpctl_portadminedge=dict(type='bool', choices=BOOLEANS), mstpctl_bpduguard=dict(type='bool', choices=BOOLEANS), clagd_enable=dict(type='bool', choices=BOOLEANS), clagd_priority=dict(type='str'), clagd_peer_ip=dict(type='str'), clagd_sys_mac=dict(type='str'), clagd_args=dict(type='str'), location=dict(type='str', default='/etc/network/interfaces.d') ), required_together=[ ['virtual_ip', 'virtual_mac'], ['clagd_enable', 'clagd_priority', 'clagd_peer_ip', 'clagd_sys_mac'] ] ) # if using the jinja default filter, this resolves to # create an list with an empty string ['']. The following # checks all lists and removes it, so that functions expecting # an empty list, get this result. May upstream this fix into # the AnsibleModule code to have it check for this. for k, _param in module.params.iteritems(): if isinstance(_param, list): module.params[k] = [x for x in _param if x] _location = module.params.get('location') if not os.path.exists(_location): _msg = "%s does not exist." % (_location) module.fail_json(msg=_msg) return # for testing purposes only ifacename = module.params.get('name') _changed = False _msg = "interface %s config not changed" % (ifacename) current_iface_config(module) build_desired_iface_config(module) if config_changed(module): replace_config(module) _msg = "interface %s config updated" % (ifacename) _changed = True module.exit_json(changed=_changed, msg=_msg) # import module snippets from ansible.module_utils.basic import * import tempfile import os if __name__ == '__main__': main()
gpl-3.0
vrenaville/ngo-addons-backport
addons/l10n_ma/__openerp__.py
170
2154
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (c) 2010 kazacube (http://kazacube.com). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name' : 'Maroc - Accounting', 'version' : '1.0', 'author' : 'kazacube', 'category' : 'Localization/Account Charts', 'description': """ This is the base module to manage the accounting chart for Maroc. ================================================================= Ce Module charge le modèle du plan de comptes standard Marocain et permet de générer les états comptables aux normes marocaines (Bilan, CPC (comptes de produits et charges), balance générale à 6 colonnes, Grand livre cumulatif...). L'intégration comptable a été validé avec l'aide du Cabinet d'expertise comptable Seddik au cours du troisième trimestre 2010.""", 'website': 'http://www.kazacube.com', 'depends' : ['base', 'account'], 'data' : [ 'security/ir.model.access.csv', 'account_type.xml', 'account_pcg_morocco.xml', 'l10n_ma_wizard.xml', 'l10n_ma_tax.xml', 'l10n_ma_journal.xml', ], 'demo' : [], 'auto_install': False, 'installable': True, 'images': ['images/config_chart_l10n_ma.jpeg','images/l10n_ma_chart.jpeg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
mikehcox/gtest
test/gtest_env_var_test.py
2408
3487
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that Google Test correctly parses environment variables.""" __author__ = 'wan@google.com (Zhanyong Wan)' import os import gtest_test_utils IS_WINDOWS = os.name == 'nt' IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux' COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_') environ = os.environ.copy() def AssertEq(expected, actual): if expected != actual: print 'Expected: %s' % (expected,) print ' Actual: %s' % (actual,) raise AssertionError def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def GetFlag(flag): """Runs gtest_env_var_test_ and returns its output.""" args = [COMMAND] if flag is not None: args += [flag] return gtest_test_utils.Subprocess(args, env=environ).output def TestFlag(flag, test_val, default_val): """Verifies that the given flag is affected by the corresponding env var.""" env_var = 'GTEST_' + flag.upper() SetEnvVar(env_var, test_val) AssertEq(test_val, GetFlag(flag)) SetEnvVar(env_var, None) AssertEq(default_val, GetFlag(flag)) class GTestEnvVarTest(gtest_test_utils.TestCase): def testEnvVarAffectsFlag(self): """Tests that environment variable should affect the corresponding flag.""" TestFlag('break_on_failure', '1', '0') TestFlag('color', 'yes', 'auto') TestFlag('filter', 'FooTest.Bar', '*') TestFlag('output', 'xml:tmp/foo.xml', '') TestFlag('print_time', '0', '1') TestFlag('repeat', '999', '1') TestFlag('throw_on_failure', '1', '0') TestFlag('death_test_style', 'threadsafe', 'fast') TestFlag('catch_exceptions', '0', '1') if IS_LINUX: TestFlag('death_test_use_fork', '1', '0') TestFlag('stack_trace_depth', '0', '100') if __name__ == '__main__': gtest_test_utils.Main()
bsd-3-clause
ghorn/rawesome
studies/makeOctaveCarousel.py
2
2408
# Copyright 2012-2013 Greg Horn # # This file is part of rawesome. # # rawesome is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # rawesome is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with rawesome. If not, see <http://www.gnu.org/licenses/>. from casadi import pi import rawe from carouselSteadyState import getSteadyState if __name__=='__main__': from conf import conf print "creating model..." dae = rawe.models.carousel(conf) dae.convertToOde() name = 'carouselOde' # write the file that computes f,MM in 0 == f(x,u,p) + MM(x,u,p)*[xdot;z] blah = dae.octaveSimGen(name) f = open(name+'_modelAndJacob.m','w') f.write(blah) f.close() # write the file that computes xdot,z from f,MM f = open(name+'_xDotAndZ.m','w') f.write('''\ function [xDot,z] = %(name)s_xDotAndZ(x,u,p) [f,MM] = %(name)s_modelAndJacob(x,u,p); xDotAndZ = -(MM\\f); xDot = xDotAndZ(1:%(nx)d); z = xDotAndZ(%(nx)d+1:end); end ''' % {'name':name,'nx':len(dae.xNames())}) f.close() steadyState = getSteadyState(dae,conf,2*pi,1.2) x = {} lines = [] lines.append('function [x,z,u,p] = carouselOde_steadyState()') lines.append('x = zeros('+str(len(dae.xNames()))+',1);') lines.append('u = zeros('+str(len(dae.uNames()))+',1);') lines.append('p = zeros('+str(len(dae.pNames()))+',1);') for k,name in enumerate(dae.xNames()): lines.append('x('+str(k+1)+') = '+str(steadyState[name])+'; % '+name) for k,name in enumerate(dae.zNames()): lines.append('z('+str(k+1)+') = '+str(steadyState[name])+'; % '+name) for k,name in enumerate(dae.uNames()): lines.append('u('+str(k+1)+') = '+str(steadyState[name])+'; % '+name) for k,name in enumerate(dae.pNames()): lines.append('p('+str(k+1)+') = '+str(steadyState[name])+'; % '+name) lines.append('end') f = open('carouselOde_steadyState.m','w') f.write('\n'.join(lines)) f.close()
lgpl-3.0
kidswong999/Arduino
arduino-core/src/processing/app/i18n/python/requests/packages/charade/euctwfreq.py
3133
34872
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # EUCTW frequency table # Converted from big5 work # by Taiwan's Mandarin Promotion Council # <http:#www.edu.tw:81/mandr/> # 128 --> 0.42261 # 256 --> 0.57851 # 512 --> 0.74851 # 1024 --> 0.89384 # 2048 --> 0.97583 # # Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98 # Random Distribution Ration = 512/(5401-512)=0.105 # # Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75 # Char to FreqOrder table , EUCTW_TABLE_SIZE = 8102 EUCTWCharToFreqOrder = ( 1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742 3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758 1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774 63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790 3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806 4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822 7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838 630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854 179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870 995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886 2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902 1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918 3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934 706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950 1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966 3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982 2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998 437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014 3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030 1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046 7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062 266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078 7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094 1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110 32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126 188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142 3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158 3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174 324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190 2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206 2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222 314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238 287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254 3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270 1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286 1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302 1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318 2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334 265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350 4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366 1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382 7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398 2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414 383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430 98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446 523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462 710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478 7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494 379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510 1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526 585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542 690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558 7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574 1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590 544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606 3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622 4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638 3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654 279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670 610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686 1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702 4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718 3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734 3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750 2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766 7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782 3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798 7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814 1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830 2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846 1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862 78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878 1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894 4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910 3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926 534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942 165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958 626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974 2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990 7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006 1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022 2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038 1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054 1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070 7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086 7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102 7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118 3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134 4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150 1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166 7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182 2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198 7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214 3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230 3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246 7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262 2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278 7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294 862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310 4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326 2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342 7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358 3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374 2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390 2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406 294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422 2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438 1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454 1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470 2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486 1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502 7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518 7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534 2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550 4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566 1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582 7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598 829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614 4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630 375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646 2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662 444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678 1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694 1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710 730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726 3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742 3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758 1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774 3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790 7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806 7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822 1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838 2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854 1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870 3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886 2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902 3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918 2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934 4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950 4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966 3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982 97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998 3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014 424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030 3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046 3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062 3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078 1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094 7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110 199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126 7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142 1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158 391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174 4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190 3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206 397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222 2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238 2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254 3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270 1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286 4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302 2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318 1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334 1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350 2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366 3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382 1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398 7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414 1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430 4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446 1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462 135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478 1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494 3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510 3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526 2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542 1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558 4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574 660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590 7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606 2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622 3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638 4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654 790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670 7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686 7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702 1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718 4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734 3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750 2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766 3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782 3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798 2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814 1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830 4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846 3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862 3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878 2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894 4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910 7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926 3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942 2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958 3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974 1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990 2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006 3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022 4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038 2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054 2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070 7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086 1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102 2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118 1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134 3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150 4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166 2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182 3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198 3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214 2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230 4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246 2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262 3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278 4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294 7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310 3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326 194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342 1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358 4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374 1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390 4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406 7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422 510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438 7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454 2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470 1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486 1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502 3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518 509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534 552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550 478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566 3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582 2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598 751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614 7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630 1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646 3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662 7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678 1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694 7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710 4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726 1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742 2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758 2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774 4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790 802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806 809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822 3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838 3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854 1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870 2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886 7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902 1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918 1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934 3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950 919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966 1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982 4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998 7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014 2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030 3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046 516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062 1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078 2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094 2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110 7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126 7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142 7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158 2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174 2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190 1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206 4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222 3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238 3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254 4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270 4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286 2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302 2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318 7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334 4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350 7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366 2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382 1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398 3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414 4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430 2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446 120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462 2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478 1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494 2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510 2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526 4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542 7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558 1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574 3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590 7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606 1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622 8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638 2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654 8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670 2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686 2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702 8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718 8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734 8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750 408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766 8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782 4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798 3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814 8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830 1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846 8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862 425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878 1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894 479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910 4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926 1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942 4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958 1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974 433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990 3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006 4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022 8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038 938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054 3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070 890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086 2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102 #Everything below is of no interest for detection purpose 2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118 2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134 8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150 8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166 8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182 8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198 8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214 8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230 8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246 8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262 8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278 8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294 8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310 8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326 8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342 8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358 8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374 8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390 8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406 8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422 8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438 8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454 8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470 8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486 8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502 8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518 8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534 8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550 8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566 8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582 8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598 8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614 8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630 8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646 8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662 8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678 8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694 8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710 8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726 8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742 # flake8: noqa
lgpl-2.1
p0psicles/SickRage
lib/sqlalchemy/dialects/drizzle/base.py
79
14993
# drizzle/base.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file> # Copyright (C) 2010-2011 Monty Taylor <mordred@inaugust.com> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: drizzle :name: Drizzle Drizzle is a variant of MySQL. Unlike MySQL, Drizzle's default storage engine is InnoDB (transactions, foreign-keys) rather than MyISAM. For more `Notable Differences <http://docs.drizzle.org/mysql_differences.html>`_, visit the `Drizzle Documentation <http://docs.drizzle.org/index.html>`_. The SQLAlchemy Drizzle dialect leans heavily on the MySQL dialect, so much of the :doc:`SQLAlchemy MySQL <mysql>` documentation is also relevant. """ from sqlalchemy import exc from sqlalchemy import log from sqlalchemy import types as sqltypes from sqlalchemy.engine import reflection from sqlalchemy.dialects.mysql import base as mysql_dialect from sqlalchemy.types import DATE, DATETIME, BOOLEAN, TIME, \ BLOB, BINARY, VARBINARY class _NumericType(object): """Base for Drizzle numeric types.""" def __init__(self, **kw): super(_NumericType, self).__init__(**kw) class _FloatType(_NumericType, sqltypes.Float): def __init__(self, precision=None, scale=None, asdecimal=True, **kw): if isinstance(self, (REAL, DOUBLE)) and \ ( (precision is None and scale is not None) or (precision is not None and scale is None) ): raise exc.ArgumentError( "You must specify both precision and scale or omit " "both altogether.") super(_FloatType, self).__init__(precision=precision, asdecimal=asdecimal, **kw) self.scale = scale class _StringType(mysql_dialect._StringType): """Base for Drizzle string types.""" def __init__(self, collation=None, binary=False, **kw): kw['national'] = False super(_StringType, self).__init__(collation=collation, binary=binary, **kw) class NUMERIC(_NumericType, sqltypes.NUMERIC): """Drizzle NUMERIC type.""" __visit_name__ = 'NUMERIC' def __init__(self, precision=None, scale=None, asdecimal=True, **kw): """Construct a NUMERIC. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. """ super(NUMERIC, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) class DECIMAL(_NumericType, sqltypes.DECIMAL): """Drizzle DECIMAL type.""" __visit_name__ = 'DECIMAL' def __init__(self, precision=None, scale=None, asdecimal=True, **kw): """Construct a DECIMAL. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. """ super(DECIMAL, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) class DOUBLE(_FloatType): """Drizzle DOUBLE type.""" __visit_name__ = 'DOUBLE' def __init__(self, precision=None, scale=None, asdecimal=True, **kw): """Construct a DOUBLE. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. """ super(DOUBLE, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) class REAL(_FloatType, sqltypes.REAL): """Drizzle REAL type.""" __visit_name__ = 'REAL' def __init__(self, precision=None, scale=None, asdecimal=True, **kw): """Construct a REAL. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. """ super(REAL, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) class FLOAT(_FloatType, sqltypes.FLOAT): """Drizzle FLOAT type.""" __visit_name__ = 'FLOAT' def __init__(self, precision=None, scale=None, asdecimal=False, **kw): """Construct a FLOAT. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. """ super(FLOAT, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) def bind_processor(self, dialect): return None class INTEGER(sqltypes.INTEGER): """Drizzle INTEGER type.""" __visit_name__ = 'INTEGER' def __init__(self, **kw): """Construct an INTEGER.""" super(INTEGER, self).__init__(**kw) class BIGINT(sqltypes.BIGINT): """Drizzle BIGINTEGER type.""" __visit_name__ = 'BIGINT' def __init__(self, **kw): """Construct a BIGINTEGER.""" super(BIGINT, self).__init__(**kw) class TIME(mysql_dialect.TIME): """Drizzle TIME type.""" class TIMESTAMP(sqltypes.TIMESTAMP): """Drizzle TIMESTAMP type.""" __visit_name__ = 'TIMESTAMP' class TEXT(_StringType, sqltypes.TEXT): """Drizzle TEXT type, for text up to 2^16 characters.""" __visit_name__ = 'TEXT' def __init__(self, length=None, **kw): """Construct a TEXT. :param length: Optional, if provided the server may optimize storage by substituting the smallest TEXT type sufficient to store ``length`` characters. :param collation: Optional, a column-level collation for this string value. Takes precedence to 'binary' short-hand. :param binary: Defaults to False: short-hand, pick the binary collation type that matches the column's character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data. """ super(TEXT, self).__init__(length=length, **kw) class VARCHAR(_StringType, sqltypes.VARCHAR): """Drizzle VARCHAR type, for variable-length character data.""" __visit_name__ = 'VARCHAR' def __init__(self, length=None, **kwargs): """Construct a VARCHAR. :param collation: Optional, a column-level collation for this string value. Takes precedence to 'binary' short-hand. :param binary: Defaults to False: short-hand, pick the binary collation type that matches the column's character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data. """ super(VARCHAR, self).__init__(length=length, **kwargs) class CHAR(_StringType, sqltypes.CHAR): """Drizzle CHAR type, for fixed-length character data.""" __visit_name__ = 'CHAR' def __init__(self, length=None, **kwargs): """Construct a CHAR. :param length: Maximum data length, in characters. :param binary: Optional, use the default binary collation for the national character set. This does not affect the type of data stored, use a BINARY type for binary data. :param collation: Optional, request a particular collation. Must be compatible with the national character set. """ super(CHAR, self).__init__(length=length, **kwargs) class ENUM(mysql_dialect.ENUM): """Drizzle ENUM type.""" def __init__(self, *enums, **kw): """Construct an ENUM. Example: Column('myenum', ENUM("foo", "bar", "baz")) :param enums: The range of valid values for this ENUM. Values will be quoted when generating the schema according to the quoting flag (see below). :param strict: Defaults to False: ensure that a given value is in this ENUM's range of permissible values when inserting or updating rows. Note that Drizzle will not raise a fatal error if you attempt to store an out of range value- an alternate value will be stored instead. (See Drizzle ENUM documentation.) :param collation: Optional, a column-level collation for this string value. Takes precedence to 'binary' short-hand. :param binary: Defaults to False: short-hand, pick the binary collation type that matches the column's character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data. :param quoting: Defaults to 'auto': automatically determine enum value quoting. If all enum values are surrounded by the same quoting character, then use 'quoted' mode. Otherwise, use 'unquoted' mode. 'quoted': values in enums are already quoted, they will be used directly when generating the schema - this usage is deprecated. 'unquoted': values in enums are not quoted, they will be escaped and surrounded by single quotes when generating the schema. Previous versions of this type always required manually quoted values to be supplied; future versions will always quote the string literals for you. This is a transitional option. """ super(ENUM, self).__init__(*enums, **kw) class _DrizzleBoolean(sqltypes.Boolean): def get_dbapi_type(self, dbapi): return dbapi.NUMERIC colspecs = { sqltypes.Numeric: NUMERIC, sqltypes.Float: FLOAT, sqltypes.Time: TIME, sqltypes.Enum: ENUM, sqltypes.Boolean: _DrizzleBoolean, } # All the types we have in Drizzle ischema_names = { 'BIGINT': BIGINT, 'BINARY': BINARY, 'BLOB': BLOB, 'BOOLEAN': BOOLEAN, 'CHAR': CHAR, 'DATE': DATE, 'DATETIME': DATETIME, 'DECIMAL': DECIMAL, 'DOUBLE': DOUBLE, 'ENUM': ENUM, 'FLOAT': FLOAT, 'INT': INTEGER, 'INTEGER': INTEGER, 'NUMERIC': NUMERIC, 'TEXT': TEXT, 'TIME': TIME, 'TIMESTAMP': TIMESTAMP, 'VARBINARY': VARBINARY, 'VARCHAR': VARCHAR, } class DrizzleCompiler(mysql_dialect.MySQLCompiler): def visit_typeclause(self, typeclause): type_ = typeclause.type.dialect_impl(self.dialect) if isinstance(type_, sqltypes.Integer): return 'INTEGER' else: return super(DrizzleCompiler, self).visit_typeclause(typeclause) def visit_cast(self, cast, **kwargs): type_ = self.process(cast.typeclause) if type_ is None: return self.process(cast.clause) return 'CAST(%s AS %s)' % (self.process(cast.clause), type_) class DrizzleDDLCompiler(mysql_dialect.MySQLDDLCompiler): pass class DrizzleTypeCompiler(mysql_dialect.MySQLTypeCompiler): def _extend_numeric(self, type_, spec): return spec def _extend_string(self, type_, defaults, spec): """Extend a string-type declaration with standard SQL COLLATE annotations and Drizzle specific extensions. """ def attr(name): return getattr(type_, name, defaults.get(name)) if attr('collation'): collation = 'COLLATE %s' % type_.collation elif attr('binary'): collation = 'BINARY' else: collation = None return ' '.join([c for c in (spec, collation) if c is not None]) def visit_NCHAR(self, type): raise NotImplementedError("Drizzle does not support NCHAR") def visit_NVARCHAR(self, type): raise NotImplementedError("Drizzle does not support NVARCHAR") def visit_FLOAT(self, type_): if type_.scale is not None and type_.precision is not None: return "FLOAT(%s, %s)" % (type_.precision, type_.scale) else: return "FLOAT" def visit_BOOLEAN(self, type_): return "BOOLEAN" def visit_BLOB(self, type_): return "BLOB" class DrizzleExecutionContext(mysql_dialect.MySQLExecutionContext): pass class DrizzleIdentifierPreparer(mysql_dialect.MySQLIdentifierPreparer): pass @log.class_logger class DrizzleDialect(mysql_dialect.MySQLDialect): """Details of the Drizzle dialect. Not used directly in application code. """ name = 'drizzle' _supports_cast = True supports_sequences = False supports_native_boolean = True supports_views = False default_paramstyle = 'format' colspecs = colspecs statement_compiler = DrizzleCompiler ddl_compiler = DrizzleDDLCompiler type_compiler = DrizzleTypeCompiler ischema_names = ischema_names preparer = DrizzleIdentifierPreparer def on_connect(self): """Force autocommit - Drizzle Bug#707842 doesn't set this properly""" def connect(conn): conn.autocommit(False) return connect @reflection.cache def get_table_names(self, connection, schema=None, **kw): """Return a Unicode SHOW TABLES from a given schema.""" if schema is not None: current_schema = schema else: current_schema = self.default_schema_name charset = 'utf8' rp = connection.execute("SHOW TABLES FROM %s" % self.identifier_preparer.quote_identifier(current_schema)) return [row[0] for row in self._compat_fetchall(rp, charset=charset)] @reflection.cache def get_view_names(self, connection, schema=None, **kw): raise NotImplementedError def _detect_casing(self, connection): """Sniff out identifier case sensitivity. Cached per-connection. This value can not change without a server restart. """ return 0 def _detect_collations(self, connection): """Pull the active COLLATIONS list from the server. Cached per-connection. """ collations = {} charset = self._connection_charset rs = connection.execute( 'SELECT CHARACTER_SET_NAME, COLLATION_NAME FROM' ' data_dictionary.COLLATIONS') for row in self._compat_fetchall(rs, charset): collations[row[0]] = row[1] return collations def _detect_ansiquotes(self, connection): """Detect and adjust for the ANSI_QUOTES sql mode.""" self._server_ansiquotes = False self._backslash_escapes = False
gpl-3.0
fsschneider/DeepOBS
deepobs/scripts/deepobs_plot_results.py
1
2904
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import argparse import deepobs def parse_args(): parser = argparse.ArgumentParser(description="Plotting tool for DeepOBS.") parser.add_argument("path", help="Path to the results folder") parser.add_argument( "--get_best_run", action="store_const", const=True, default=False, help="Return best hyperparameter setting per optimizer and testproblem." ) parser.add_argument( "--plot_lr_sensitivity", action="store_const", const=True, default=False, help="Plot 'sensitivity' plot for the learning rates.") parser.add_argument( "--plot_performance", action="store_const", const=True, default=False, help="Plot performance plot compared to the baselines.") parser.add_argument( "--plot_table", action="store_const", const=True, default=False, help= "Plot overall performance table including speed and hyperparameters.") parser.add_argument( "--full", action="store_const", const=True, default=False, help="Run a full analysis and plot all figures.") parser.add_argument( "--baseline_path", type=str, default="baselines_deepobs", help="Path to baseline folder.") return parser def read_args(): parser = parse_args() args = parser.parse_args() return args def main(path, get_best_run, plot_lr_sensitivity, plot_performance, plot_table, full, baseline_path): # Put all input arguments back into an args variable, so I can use it as # before (without the main function) args = argparse.Namespace(**locals()) # Parse whole baseline folder if args.baseline_path: print("Parsing baseline folder") deepobs.tensorflow.config.set_baseline_dir(args.baseline_path) baseline_parser = deepobs.analyzer.analyze_utils.Analyzer( deepobs.tensorflow.config.get_baseline_dir()) else: baseline_parser = None # Parse path folder print("Parsing results folder") folder_parser = deepobs.analyzer.analyze_utils.Analyzer(args.path) if args.get_best_run or args.full: deepobs.analyzer.analyze.get_best_run(folder_parser) if args.plot_lr_sensitivity or args.full: deepobs.analyzer.analyze.plot_lr_sensitivity(folder_parser, baseline_parser) if args.plot_performance or args.full: deepobs.analyzer.analyze.plot_performance(folder_parser, baseline_parser) if args.plot_table or args.full: deepobs.analyzer.analyze.plot_table(folder_parser, baseline_parser) if __name__ == '__main__': main(**vars(read_args()))
mit
rfdsp/usedbook
usedbook/views.py
1
5739
# -*- coding: utf-8 -*- from usedbook.models import WantedBook from django.shortcuts import get_object_or_404, render_to_response from django.template import RequestContext from django.http import HttpResponseRedirect, HttpResponse from django.core.urlresolvers import reverse import threading class offer: isbn='' name='' price='' realPrice='' save='' link='' id='' def __init__(self): pass class theOffersGetter(threading.Thread): def __init__(self,abook,result): threading.Thread.__init__(self) self.abook=abook self.result=result def run(self): from urllib2 import urlopen import re isbn=self.abook.isbn url=urlopen('http://used.aladin.co.kr/shop/UsedShop/wuseditemall.aspx?ISBN={POSISBN}'.format(POSISBN=isbn)) raw=url.read() decoded=raw.decode('cp949') title_re=re.compile(r"<a href='http://www.aladin.co.kr/shop/wproduct.aspx\?ISBN=.*?' class='p_topt01'>(.*?)</a>".decode('utf-8')) title=title_re.findall(decoded)[0] newbookretailPrice_re=re.compile(r'정가</td>\r\n <td valign="top" class="p_goodstd02">([\d,]*?)원'.decode('utf-8')) newbookPrice_re=re.compile(r'판매가</td>\r\n <td valign="top" class="p_goodstd02">\r\n <span class="p_new_price_phs">([\d,]*?)원'.decode('utf-8')) newbookMillage_re=re.compile(r'마일리지</td>\r\n <td valign="top" class="p_goodstd02">([\d,]*?)점'.decode('utf-8')) retailPrice=int(newbookretailPrice_re.findall(decoded)[0].replace(',','')) price=int(newbookPrice_re.findall(decoded)[0].replace(',','')) millage=int(newbookMillage_re.findall(decoded)[0].replace(',','')) realPrice=price-millage price_re=re.compile('(<a href="http://used.aladin.co.kr/shop/wproduct.aspx\?ItemId=.*?" class="bo".*?></a>)[\s\S]*?<span class="ss_p2"><b>([\d,]*)[\s\S]*?(<a.*?</a>)') pricelist=price_re.findall(decoded) theoffers=list() for link,price,seller in pricelist: price=int(price.replace(',','')) aoffer=offer() aoffer.seller=seller.replace('href="','href="http://www.aladin.co.kr') aoffer.name=title aoffer.price=price aoffer.isbn=isbn aoffer.link=link aoffer.realPrice=realPrice aoffer.id=self.abook.id aoffer.save=(realPrice-price)*100.0/realPrice theoffers.append(aoffer) self.result.append(theoffers) def index(request): wanted_book_list = WantedBook.objects.all().order_by('isbn') return render_to_response('usedbook/index.html', {'wanted_book_list': wanted_book_list}, context_instance=RequestContext(request)) def add(request): try: isbn=request.POST['isbn'] except : error_msg='ISBN값을 입력해 주십시요' else: if(len(isbn)!=10): error_msg='{0}은 ISBN-10이 아닙니다. ISBN-10을 입력해 주십시요.'.format(isbn) else: from urllib2 import urlopen import re url=urlopen('http://used.aladin.co.kr/shop/UsedShop/wuseditemall.aspx?ISBN={POSISBN}'.format(POSISBN=isbn)) raw=url.read() decoded=raw.decode('cp949') title_re=re.compile(r"<a href='http://www.aladin.co.kr/shop/wproduct.aspx\?ISBN=.*?' class='p_topt01'>(.*?)</a>".decode('utf-8')) title=title_re.findall(decoded,) if(not title): return HttpResponse('----ISBN-10(10자리) {POSISBN} 에 해당하는 유효한 상품정보가 없습니다.----'.format(POSISBN=isbn)) newbookretailPrice_re=re.compile(r'정가</td>\r\n <td valign="top" class="p_goodstd02">([\d,]*?)원'.decode('utf-8')) newbookPrice_re=re.compile(r'판매가</td>\r\n <td valign="top" class="p_goodstd02">\r\n <span class="p_new_price_phs">([\d,]*?)원'.decode('utf-8')) newbookMillage_re=re.compile(r'마일리지</td>\r\n <td valign="top" class="p_goodstd02">([\d,]*?)점'.decode('utf-8')) new_book=WantedBook(isbn=isbn,name=title[0]) new_book.retailPrice=int(newbookretailPrice_re.findall(decoded)[0].replace(',','')) new_book.price=int(newbookPrice_re.findall(decoded)[0].replace(',','')) new_book.millage=int(newbookMillage_re.findall(decoded)[0].replace(',','')) new_book.realPrice=new_book.price-new_book.millage try: new_book.save() except: return HttpResponse('글을 써넣다가 오류가 발생했습니다.') return HttpResponseRedirect(reverse('usedbook.views.index',args=())) def result(request): offers=dict() wanted_book_list = WantedBook.objects.all().order_by('isbn') offerlist=[] threads=map(lambda x:theOffersGetter(x,offerlist),wanted_book_list) map(lambda x: x.start(),threads) map(lambda x: x.join(),threads) for theoffers in offerlist: for offer in theoffers: offers[offer.seller]=offers.get(offer.seller,[]) offers[offer.seller].append(offer) offers=list(offers.items()) offers.sort(key=lambda x:len(x[1]),reverse=True) return render_to_response('usedbook/result.html', {'offers': offers}, context_instance=RequestContext(request)) def delete(request,wantedbook_id): book = get_object_or_404(WantedBook, pk=wantedbook_id) isbn=book.isbn error_msg='{0}제거합니다.'.format(isbn) book.delete() return HttpResponseRedirect(reverse('usedbook.views.index',args=()))
mit
dbaxa/django
tests/admin_inlines/admin.py
293
5354
from django import forms from django.contrib import admin from .models import ( Author, BinaryTree, CapoFamiglia, Chapter, ChildModel1, ChildModel2, Consigliere, EditablePKBook, ExtraTerrestrial, Fashionista, Holder, Holder2, Holder3, Holder4, Inner, Inner2, Inner3, Inner4Stacked, Inner4Tabular, NonAutoPKBook, Novel, ParentModelWithCustomPk, Poll, Profile, ProfileCollection, Question, ReadOnlyInline, ShoppingWeakness, Sighting, SomeChildModel, SomeParentModel, SottoCapo, Title, TitleCollection, ) site = admin.AdminSite(name="admin") class BookInline(admin.TabularInline): model = Author.books.through class NonAutoPKBookTabularInline(admin.TabularInline): model = NonAutoPKBook class NonAutoPKBookStackedInline(admin.StackedInline): model = NonAutoPKBook class EditablePKBookTabularInline(admin.TabularInline): model = EditablePKBook class EditablePKBookStackedInline(admin.StackedInline): model = EditablePKBook class AuthorAdmin(admin.ModelAdmin): inlines = [BookInline, NonAutoPKBookTabularInline, NonAutoPKBookStackedInline, EditablePKBookTabularInline, EditablePKBookStackedInline] class InnerInline(admin.StackedInline): model = Inner can_delete = False readonly_fields = ('readonly',) # For bug #13174 tests. class HolderAdmin(admin.ModelAdmin): class Media: js = ('my_awesome_admin_scripts.js',) class ReadOnlyInlineInline(admin.TabularInline): model = ReadOnlyInline readonly_fields = ['name'] class InnerInline2(admin.StackedInline): model = Inner2 class Media: js = ('my_awesome_inline_scripts.js',) class InnerInline3(admin.StackedInline): model = Inner3 class Media: js = ('my_awesome_inline_scripts.js',) class TitleForm(forms.ModelForm): def clean(self): cleaned_data = self.cleaned_data title1 = cleaned_data.get("title1") title2 = cleaned_data.get("title2") if title1 != title2: raise forms.ValidationError("The two titles must be the same") return cleaned_data class TitleInline(admin.TabularInline): model = Title form = TitleForm extra = 1 class Inner4StackedInline(admin.StackedInline): model = Inner4Stacked show_change_link = True class Inner4TabularInline(admin.TabularInline): model = Inner4Tabular show_change_link = True class Holder4Admin(admin.ModelAdmin): inlines = [Inner4StackedInline, Inner4TabularInline] class InlineWeakness(admin.TabularInline): model = ShoppingWeakness extra = 1 class QuestionInline(admin.TabularInline): model = Question readonly_fields = ['call_me'] def call_me(self, obj): return 'Callable in QuestionInline' class PollAdmin(admin.ModelAdmin): inlines = [QuestionInline] def call_me(self, obj): return 'Callable in PollAdmin' class ChapterInline(admin.TabularInline): model = Chapter readonly_fields = ['call_me'] def call_me(self, obj): return 'Callable in ChapterInline' class NovelAdmin(admin.ModelAdmin): inlines = [ChapterInline] class ConsigliereInline(admin.TabularInline): model = Consigliere class SottoCapoInline(admin.TabularInline): model = SottoCapo class ProfileInline(admin.TabularInline): model = Profile extra = 1 # admin for #18433 class ChildModel1Inline(admin.TabularInline): model = ChildModel1 class ChildModel2Inline(admin.StackedInline): model = ChildModel2 # admin for #19425 and #18388 class BinaryTreeAdmin(admin.TabularInline): model = BinaryTree def get_extra(self, request, obj=None, **kwargs): extra = 2 if obj: return extra - obj.binarytree_set.count() return extra def get_max_num(self, request, obj=None, **kwargs): max_num = 3 if obj: return max_num - obj.binarytree_set.count() return max_num # admin for #19524 class SightingInline(admin.TabularInline): model = Sighting # admin and form for #18263 class SomeChildModelForm(forms.ModelForm): class Meta: fields = '__all__' model = SomeChildModel widgets = { 'position': forms.HiddenInput, } class SomeChildModelInline(admin.TabularInline): model = SomeChildModel form = SomeChildModelForm site.register(TitleCollection, inlines=[TitleInline]) # Test bug #12561 and #12778 # only ModelAdmin media site.register(Holder, HolderAdmin, inlines=[InnerInline]) # ModelAdmin and Inline media site.register(Holder2, HolderAdmin, inlines=[InnerInline2]) # only Inline media site.register(Holder3, inlines=[InnerInline3]) site.register(Poll, PollAdmin) site.register(Novel, NovelAdmin) site.register(Fashionista, inlines=[InlineWeakness]) site.register(Holder4, Holder4Admin) site.register(Author, AuthorAdmin) site.register(CapoFamiglia, inlines=[ConsigliereInline, SottoCapoInline, ReadOnlyInlineInline]) site.register(ProfileCollection, inlines=[ProfileInline]) site.register(ParentModelWithCustomPk, inlines=[ChildModel1Inline, ChildModel2Inline]) site.register(BinaryTree, inlines=[BinaryTreeAdmin]) site.register(ExtraTerrestrial, inlines=[SightingInline]) site.register(SomeParentModel, inlines=[SomeChildModelInline]) site.register([Question, Inner4Stacked, Inner4Tabular])
bsd-3-clause
allenlavoie/tensorflow
tensorflow/contrib/kfac/examples/mnist.py
21
2576
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for loading MNIST into TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf __all__ = [ 'load_mnist', ] def load_mnist(data_dir, num_epochs, batch_size, flatten_images=True, use_fake_data=False): """Loads MNIST dataset into memory. Args: data_dir: string. Directory to read MNIST examples from. num_epochs: int. Number of passes to make over the dataset. batch_size: int. Number of examples per minibatch. flatten_images: bool. If True, [28, 28, 1]-shaped images are flattened into [784]-shaped vectors. use_fake_data: bool. If True, generate a synthetic dataset rather than reading MNIST in. Returns: examples: Tensor of shape [batch_size, 784] if 'flatten_images' is True, else [batch_size, 28, 28, 1]. Each row is one example. Values in [0, 1]. labels: Tensor of shape [batch_size]. Indices of integer corresponding to each example. Values in {0...9}. """ if use_fake_data: rng = np.random.RandomState(42) num_examples = batch_size * 4 images = rng.rand(num_examples, 28 * 28) if not flatten_images: images = np.reshape(images, [num_examples, 28, 28, 1]) labels = rng.randint(10, size=num_examples) else: mnist_data = tf.contrib.learn.datasets.mnist.read_data_sets( data_dir, reshape=flatten_images) num_examples = len(mnist_data.train.labels) images = mnist_data.train.images labels = mnist_data.train.labels dataset = tf.data.Dataset.from_tensor_slices((np.asarray( images, dtype=np.float32), np.asarray(labels, dtype=np.int64))) return (dataset.repeat(num_epochs).shuffle(num_examples).batch(batch_size) .make_one_shot_iterator().get_next())
apache-2.0
ArcherSys/ArcherSys
Lib/site-packages/jupyter_client/win_interrupt.py
13
1401
"""Use a Windows event to interrupt a child process like SIGINT. The child needs to explicitly listen for this - see ipykernel.parentpoller.ParentPollerWindows for a Python implementation. """ import ctypes def create_interrupt_event(): """Create an interrupt event handle. The parent process should call this to create the interrupt event that is passed to the child process. It should store this handle and use it with ``send_interrupt`` to interrupt the child process. """ # Create a security attributes struct that permits inheritance of the # handle by new processes. # FIXME: We can clean up this mess by requiring pywin32 for IPython. class SECURITY_ATTRIBUTES(ctypes.Structure): _fields_ = [ ("nLength", ctypes.c_int), ("lpSecurityDescriptor", ctypes.c_void_p), ("bInheritHandle", ctypes.c_int) ] sa = SECURITY_ATTRIBUTES() sa_p = ctypes.pointer(sa) sa.nLength = ctypes.sizeof(SECURITY_ATTRIBUTES) sa.lpSecurityDescriptor = 0 sa.bInheritHandle = 1 return ctypes.windll.kernel32.CreateEventA( sa_p, # lpEventAttributes False, # bManualReset False, # bInitialState '') # lpName def send_interrupt(interrupt_handle): """ Sends an interrupt event using the specified handle. """ ctypes.windll.kernel32.SetEvent(interrupt_handle)
mit
amyvmiwei/kbengine
kbe/src/lib/python/Lib/lib2to3/patcomp.py
93
7075
# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Pattern compiler. The grammer is taken from PatternGrammar.txt. The compiler compiles a pattern to a pytree.*Pattern instance. """ __author__ = "Guido van Rossum <guido@python.org>" # Python imports import io import os # Fairly local imports from .pgen2 import driver, literals, token, tokenize, parse, grammar # Really local imports from . import pytree from . import pygram # The pattern grammar file _PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "PatternGrammar.txt") class PatternSyntaxError(Exception): pass def tokenize_wrapper(input): """Tokenizes a string suppressing significant whitespace.""" skip = set((token.NEWLINE, token.INDENT, token.DEDENT)) tokens = tokenize.generate_tokens(io.StringIO(input).readline) for quintuple in tokens: type, value, start, end, line_text = quintuple if type not in skip: yield quintuple class PatternCompiler(object): def __init__(self, grammar_file=_PATTERN_GRAMMAR_FILE): """Initializer. Takes an optional alternative filename for the pattern grammar. """ self.grammar = driver.load_grammar(grammar_file) self.syms = pygram.Symbols(self.grammar) self.pygrammar = pygram.python_grammar self.pysyms = pygram.python_symbols self.driver = driver.Driver(self.grammar, convert=pattern_convert) def compile_pattern(self, input, debug=False, with_tree=False): """Compiles a pattern string to a nested pytree.*Pattern object.""" tokens = tokenize_wrapper(input) try: root = self.driver.parse_tokens(tokens, debug=debug) except parse.ParseError as e: raise PatternSyntaxError(str(e)) if with_tree: return self.compile_node(root), root else: return self.compile_node(root) def compile_node(self, node): """Compiles a node, recursively. This is one big switch on the node type. """ # XXX Optimize certain Wildcard-containing-Wildcard patterns # that can be merged if node.type == self.syms.Matcher: node = node.children[0] # Avoid unneeded recursion if node.type == self.syms.Alternatives: # Skip the odd children since they are just '|' tokens alts = [self.compile_node(ch) for ch in node.children[::2]] if len(alts) == 1: return alts[0] p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1) return p.optimize() if node.type == self.syms.Alternative: units = [self.compile_node(ch) for ch in node.children] if len(units) == 1: return units[0] p = pytree.WildcardPattern([units], min=1, max=1) return p.optimize() if node.type == self.syms.NegatedUnit: pattern = self.compile_basic(node.children[1:]) p = pytree.NegatedPattern(pattern) return p.optimize() assert node.type == self.syms.Unit name = None nodes = node.children if len(nodes) >= 3 and nodes[1].type == token.EQUAL: name = nodes[0].value nodes = nodes[2:] repeat = None if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater: repeat = nodes[-1] nodes = nodes[:-1] # Now we've reduced it to: STRING | NAME [Details] | (...) | [...] pattern = self.compile_basic(nodes, repeat) if repeat is not None: assert repeat.type == self.syms.Repeater children = repeat.children child = children[0] if child.type == token.STAR: min = 0 max = pytree.HUGE elif child.type == token.PLUS: min = 1 max = pytree.HUGE elif child.type == token.LBRACE: assert children[-1].type == token.RBRACE assert len(children) in (3, 5) min = max = self.get_int(children[1]) if len(children) == 5: max = self.get_int(children[3]) else: assert False if min != 1 or max != 1: pattern = pattern.optimize() pattern = pytree.WildcardPattern([[pattern]], min=min, max=max) if name is not None: pattern.name = name return pattern.optimize() def compile_basic(self, nodes, repeat=None): # Compile STRING | NAME [Details] | (...) | [...] assert len(nodes) >= 1 node = nodes[0] if node.type == token.STRING: value = str(literals.evalString(node.value)) return pytree.LeafPattern(_type_of_literal(value), value) elif node.type == token.NAME: value = node.value if value.isupper(): if value not in TOKEN_MAP: raise PatternSyntaxError("Invalid token: %r" % value) if nodes[1:]: raise PatternSyntaxError("Can't have details for token") return pytree.LeafPattern(TOKEN_MAP[value]) else: if value == "any": type = None elif not value.startswith("_"): type = getattr(self.pysyms, value, None) if type is None: raise PatternSyntaxError("Invalid symbol: %r" % value) if nodes[1:]: # Details present content = [self.compile_node(nodes[1].children[1])] else: content = None return pytree.NodePattern(type, content) elif node.value == "(": return self.compile_node(nodes[1]) elif node.value == "[": assert repeat is None subpattern = self.compile_node(nodes[1]) return pytree.WildcardPattern([[subpattern]], min=0, max=1) assert False, node def get_int(self, node): assert node.type == token.NUMBER return int(node.value) # Map named tokens to the type value for a LeafPattern TOKEN_MAP = {"NAME": token.NAME, "STRING": token.STRING, "NUMBER": token.NUMBER, "TOKEN": None} def _type_of_literal(value): if value[0].isalpha(): return token.NAME elif value in grammar.opmap: return grammar.opmap[value] else: return None def pattern_convert(grammar, raw_node_info): """Converts raw node information to a Node or Leaf instance.""" type, value, context, children = raw_node_info if children or type in grammar.number2symbol: return pytree.Node(type, children, context=context) else: return pytree.Leaf(type, value, context=context) def compile_pattern(pattern): return PatternCompiler().compile_pattern(pattern)
lgpl-3.0