repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
Archcady/mbed-os
tools/targets/REALTEK_RTL8195AM.py
4
9690
""" Realtek Semiconductor Corp. RTL8195A elf2bin script """ import sys, array, struct, os, re, subprocess import hashlib import shutil import time import binascii from tools.paths import TOOLS_BOOTLOADERS from tools.toolchains import TOOLCHAIN_PATHS # Constant Variables TAG = 0x81950001 VER = 0x81950001 CAMPAIGN = binascii.hexlify('FFFFFFFFFFFFFFFF') RAM2_HEADER = { 'tag': 0, 'ver': 0, 'timestamp': 0, 'size': 72, 'hash': 'FF', 'campaign': 'FF', 'crc32': 0xFFFFFFFF, } def format_number(number, width): # convert to string line = format(number, '0%dx' % (width)) if len(line) > width: print "[ERROR] 0x%s cannot fit in width %d" % (line, width) sys.exit(-1) # cut string to list & reverse line = [line[i:i+2] for i in range(0, len(line), 2)] line.reverse() return binascii.a2b_hex("".join(line)) def format_string(string): return binascii.a2b_hex(string) def write_number(value, width, output): output.write(format_number(value, width)) def write_string(value, width, output): output.write(format_string(value)) def append_image_file(image, output): input = open(image, "rb") output.write(input.read()) input.close() def write_padding_bytes(output_name, size): current_size = os.stat(output_name).st_size padcount = size - current_size if padcount < 0: print "[ERROR] image is larger than expected size" sys.exit(-1) output = open(output_name, "ab") output.write('\377' * padcount) output.close() def crc32_checksum(string): return binascii.crc32(string) & 0xFFFFFFFF def sha256_checksum(filename, block_size=65536): sha256 = hashlib.sha256() with open(filename, 'rb') as f: for block in iter(lambda: f.read(block_size), b''): sha256.update(block) return sha256.hexdigest() def epoch_timestamp(): epoch = int(time.time()) return epoch def find_symbol(toolchain, mapfile, symbol): ret = None HEX = '0x0{,8}(?P<addr>[0-9A-Fa-f]{8})' if toolchain == "GCC_ARM": SYM = re.compile(r'^\s+' + HEX + r'\s+' + symbol + '\r?$') elif toolchain in ["ARM_STD", "ARM", "ARM_MICRO"]: SYM = re.compile(r'^\s+' + HEX + r'\s+0x[0-9A-Fa-f]{8}\s+Code.*\s+i\.' + symbol + r'\s+.*$') elif toolchain == "IAR": SYM = re.compile(r'^' + symbol + r'\s+' + HEX + '\s+.*$') with open(mapfile, 'r') as infile: for line in infile: match = re.match(SYM, line) if match: ret = match.group("addr") if not ret: print "[ERROR] cannot find the address of symbol " + symbol return 0 return int(ret,16) | 1 def parse_load_segment_gcc(image_elf): # Program Headers: # Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align # LOAD 0x000034 0x10006000 0x10006000 0x026bc 0x026bc RW 0x8 # LOAD 0x0026f0 0x30000000 0x30000000 0x06338 0x06338 RWE 0x4 segment_list = [] cmd = os.path.join(TOOLCHAIN_PATHS['GCC_ARM'], 'arm-none-eabi-readelf') cmd = '"' + cmd + '"' + ' -l ' + image_elf for line in subprocess.check_output(cmd, shell=True, universal_newlines=True).split("\n"): if not line.startswith(" LOAD"): continue segment = line.split() if len(segment) != 8: continue offset = int(segment[1][2:], 16) addr = int(segment[2][2:], 16) size = int(segment[4][2:], 16) if addr != 0 and size != 0: segment_list.append((offset, addr, size)) return segment_list def parse_load_segment_armcc(image_elf): # ==================================== # # ** Program header #2 # # Type : PT_LOAD (1) # File Offset : 52 (0x34) # Virtual Addr : 0x30000000 # Physical Addr : 0x30000000 # Size in file : 27260 bytes (0x6a7c) # Size in memory: 42168 bytes (0xa4b8) # Flags : PF_X + PF_W + PF_R + PF_ARM_ENTRY (0x80000007) # Alignment : 8 # (offset, addr, size) = (0, 0, 0) segment_list = [] in_segment = False cmd = os.path.join(TOOLCHAIN_PATHS['ARM'], 'bin', 'fromelf') cmd = '"' + cmd + '"' + ' --text -v --only=none ' + image_elf for line in subprocess.check_output(cmd, shell=True, universal_newlines=True).split("\n"): if line == "": pass elif line.startswith("** Program header"): in_segment = True elif in_segment == False: pass elif line.startswith("============"): if addr != 0 and size != 0: segment_list.append((offset, addr, size)) in_segment = False (offset, addr, size) = (0, 0, 0) elif line.startswith(" Type"): if not re.match(r'\s+Type\s+:\s+PT_LOAD\s.*$', line): in_segment = False elif line.startswith(" File Offset"): match = re.match(r'^\s+File Offset\s+:\s+(?P<offset>\d+).*$', line) if match: offset = int(match.group("offset")) elif line.startswith(" Virtual Addr"): match = re.match(r'^\s+Virtual Addr\s+:\s+0x(?P<addr>[0-9a-f]+).*$', line) if match: addr = int(match.group("addr"), 16) elif line.startswith(" Size in file"): match = re.match(r'^\s+Size in file\s+:.*\(0x(?P<size>[0-9a-f]+)\).*$', line) if match: size = int(match.group("size"), 16) return segment_list def parse_load_segment_iar(image_elf): # SEGMENTS: # # Type Offset Virtual Physical File Sz Mem Sz Flags Align # ---- ------ ------- -------- ------- ------ ----- ----- # 0: load 0x34 0x10006000 0x10006000 0x26bc 0x26bc 0x6 WR 0x8 # 1: load 0x26f0 0x30000000 0x30000000 0x6338 0x6338 0x7 XWR 0x4 # # SECTIONS: # # Name Type Addr Offset Size Aln Lnk Inf ESz Flags # ---- ---- ---- ------ ---- --- --- --- --- ----- # 1: .shstrtab strtab 0xfc4d8 0x60 0x4 # 2: .strtab strtab 0xfc538 0xbb3f 0x4 segment_list = [] in_segment = False cmd = os.path.join(TOOLCHAIN_PATHS['IAR'], 'bin', 'ielfdumparm') cmd = '"' + cmd + '"' + ' ' + image_elf for line in subprocess.check_output(cmd, shell=True, universal_newlines=True).split("\n"): if line.startswith(" SEGMENTS:"): in_segment = True elif in_segment == False: pass elif line.startswith(" SECTIONS:"): break elif re.match(r'^\s+\w+:\s+load\s+.*$', line): segment = line.split() offset = int(segment[2][2:], 16) addr = int(segment[3][2:], 16) size = int(segment[5][2:], 16) if addr != 0 and size != 0: segment_list.append((offset, addr, size)) return segment_list def parse_load_segment(toolchain, image_elf): if toolchain == "GCC_ARM": return parse_load_segment_gcc(image_elf) elif toolchain in ["ARM_STD", "ARM", "ARM_MICRO"]: return parse_load_segment_armcc(image_elf) elif toolchain == "IAR": return parse_load_segment_iar(image_elf) else: return [] def create_payload(image_elf, ram2_bin, entry, segment): file_elf = open(image_elf, "rb") file_bin = open(ram2_bin, "wb") write_number(int(entry), 8, file_bin) write_number(int(len(segment)), 8, file_bin) write_number(0xFFFFFFFF, 8, file_bin) write_number(0xFFFFFFFF, 8, file_bin) for (offset, addr, size) in segment: file_elf.seek(offset) # write image header - size & addr write_number(addr, 8, file_bin) write_number(size, 8, file_bin) # write load segment file_bin.write(file_elf.read(size)) delta = size % 4 if delta != 0: padding = 4 - delta write_number(0x0, padding * 2, file_bin) file_bin.close() file_elf.close() def create_daplink(image_bin, ram1_bin, ram2_bin): # remove target binary file/path if os.path.isfile(image_bin): os.remove(image_bin) else: shutil.rmtree(image_bin) RAM2_HEADER['tag'] = format_number(TAG, 8) RAM2_HEADER['ver'] = format_number(VER, 8) RAM2_HEADER['timestamp'] = format_number(epoch_timestamp(), 16) RAM2_HEADER['size'] = format_number(os.stat(ram2_bin).st_size + 72, 8) RAM2_HEADER['hash'] = format_string(sha256_checksum(ram2_bin)) RAM2_HEADER['campaign'] = format_string(CAMPAIGN) output = open(image_bin, "wb") append_image_file(ram1_bin, output) line = "" for key in ['tag', 'ver', 'timestamp', 'size', 'hash', 'campaign']: line += RAM2_HEADER[key] output.write(RAM2_HEADER[key]) RAM2_HEADER['crc32'] = format_number(crc32_checksum(line), 8) output.write(RAM2_HEADER['crc32']) append_image_file(ram2_bin, output) output.close() # ---------------------------- # main function # ---------------------------- def rtl8195a_elf2bin(t_self, image_elf, image_bin): image_name = os.path.splitext(image_elf)[0] image_map = image_name + '.map' ram1_bin = os.path.join(TOOLS_BOOTLOADERS, "REALTEK_RTL8195AM", "ram_1.bin") ram2_bin = image_name + '-payload.bin' entry = find_symbol(t_self.name, image_map, "PLAT_Start") segment = parse_load_segment(t_self.name, image_elf) create_payload(image_elf, ram2_bin, entry, segment) create_daplink(image_bin, ram1_bin, ram2_bin)
apache-2.0
jl2012/litecoin
qa/rpc-tests/rawtransactions.py
17
7189
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test re-org scenarios with a mempool that contains transactions # that spend (directly or indirectly) coinbase transactions. # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * # Create one-input, one-output, no-fee transaction: class RawTransactionsTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 3 def setup_network(self, split=False): self.nodes = start_nodes(self.num_nodes, self.options.tmpdir) #connect to a local machine for debugging #url = "http://bitcoinrpc:DP6DvqZtqXarpeNWyN3LZTFchCCyCUuHwNF7E8pX99x1@%s:%d" % ('127.0.0.1', 19332) #proxy = AuthServiceProxy(url) #proxy.url = url # store URL on proxy for info #self.nodes.append(proxy) connect_nodes_bi(self.nodes,0,1) connect_nodes_bi(self.nodes,1,2) connect_nodes_bi(self.nodes,0,2) self.is_network_split=False self.sync_all() def run_test(self): #prepare some coins for multiple *rawtransaction commands self.nodes[2].generate(1) self.sync_all() self.nodes[0].generate(101) self.sync_all() self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0) self.sync_all() self.nodes[0].generate(5) self.sync_all() ######################################### # sendrawtransaction with missing input # ######################################### inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists outputs = { self.nodes[0].getnewaddress() : 4.998 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) rawtx = self.nodes[2].signrawtransaction(rawtx) try: rawtx = self.nodes[2].sendrawtransaction(rawtx['hex']) except JSONRPCException as e: assert("Missing inputs" in e.error['message']) else: assert(False) ######################### # RAW TX MULTISIG TESTS # ######################### # 2of2 test addr1 = self.nodes[2].getnewaddress() addr2 = self.nodes[2].getnewaddress() addr1Obj = self.nodes[2].validateaddress(addr1) addr2Obj = self.nodes[2].validateaddress(addr2) mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) mSigObjValid = self.nodes[2].validateaddress(mSigObj) #use balance deltas instead of absolute values bal = self.nodes[2].getbalance() # send 1.2 BTC to msig adr txId = self.nodes[0].sendtoaddress(mSigObj, 1.2) self.sync_all() self.nodes[0].generate(1) self.sync_all() assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance # 2of3 test from different nodes bal = self.nodes[2].getbalance() addr1 = self.nodes[1].getnewaddress() addr2 = self.nodes[2].getnewaddress() addr3 = self.nodes[2].getnewaddress() addr1Obj = self.nodes[1].validateaddress(addr1) addr2Obj = self.nodes[2].validateaddress(addr2) addr3Obj = self.nodes[2].validateaddress(addr3) mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']]) mSigObjValid = self.nodes[2].validateaddress(mSigObj) txId = self.nodes[0].sendtoaddress(mSigObj, 2.2) decTx = self.nodes[0].gettransaction(txId) rawTx = self.nodes[0].decoderawtransaction(decTx['hex']) sPK = rawTx['vout'][0]['scriptPubKey']['hex'] self.sync_all() self.nodes[0].generate(1) self.sync_all() #THIS IS A INCOMPLETE FEATURE #NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable txDetails = self.nodes[0].gettransaction(txId, True) rawTx = self.nodes[0].decoderawtransaction(txDetails['hex']) vout = False for outpoint in rawTx['vout']: if outpoint['value'] == Decimal('2.20000000'): vout = outpoint break bal = self.nodes[0].getbalance() inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex']}] outputs = { self.nodes[0].getnewaddress() : 2.19 } rawTx = self.nodes[2].createrawtransaction(inputs, outputs) rawTxPartialSigned = self.nodes[1].signrawtransaction(rawTx, inputs) assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx rawTxSigned = self.nodes[2].signrawtransaction(rawTx, inputs) assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys self.nodes[2].sendrawtransaction(rawTxSigned['hex']) rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex']) self.sync_all() self.nodes[0].generate(1) self.sync_all() assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}] outputs = { self.nodes[0].getnewaddress() : 1 } rawtx = self.nodes[0].createrawtransaction(inputs, outputs) decrawtx= self.nodes[0].decoderawtransaction(rawtx) assert_equal(decrawtx['vin'][0]['sequence'], 1000) inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}] outputs = { self.nodes[0].getnewaddress() : 1 } assert_raises(JSONRPCException, self.nodes[0].createrawtransaction, inputs, outputs) inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}] outputs = { self.nodes[0].getnewaddress() : 1 } assert_raises(JSONRPCException, self.nodes[0].createrawtransaction, inputs, outputs) inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}] outputs = { self.nodes[0].getnewaddress() : 1 } rawtx = self.nodes[0].createrawtransaction(inputs, outputs) decrawtx= self.nodes[0].decoderawtransaction(rawtx) assert_equal(decrawtx['vin'][0]['sequence'], 4294967294) if __name__ == '__main__': RawTransactionsTest().main()
mit
Sodki/ansible
test/units/parsing/test_unquote.py
298
1602
# coding: utf-8 # (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.parsing.quoting import unquote import pytest UNQUOTE_DATA = ( (u'1', u'1'), (u'\'1\'', u'1'), (u'"1"', u'1'), (u'"1 \'2\'"', u'1 \'2\''), (u'\'1 "2"\'', u'1 "2"'), (u'\'1 \'2\'\'', u'1 \'2\''), (u'"1\\"', u'"1\\"'), (u'\'1\\\'', u'\'1\\\''), (u'"1 \\"2\\" 3"', u'1 \\"2\\" 3'), (u'\'1 \\\'2\\\' 3\'', u'1 \\\'2\\\' 3'), (u'"', u'"'), (u'\'', u'\''), # Not entirely sure these are good but they match the current # behaviour (u'"1""2"', u'1""2'), (u'\'1\'\'2\'', u'1\'\'2'), (u'"1" 2 "3"', u'1" 2 "3'), (u'"1"\'2\'"3"', u'1"\'2\'"3'), ) @pytest.mark.parametrize("quoted, expected", UNQUOTE_DATA) def test_unquote(quoted, expected): assert unquote(quoted) == expected
gpl-3.0
nycholas/ask-undrgz
src/ask-undrgz/django/conf/locale/id/formats.py
80
1578
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # DATE_FORMAT = 'j N Y' DATETIME_FORMAT = "j N Y, G:i:s" TIME_FORMAT = 'G:i:s' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'd-m-Y' SHORT_DATETIME_FORMAT = 'd-m-Y G:i:s' FIRST_DAY_OF_WEEK = 1 #Monday DATE_INPUT_FORMATS = ( '%d-%m-%y', '%d/%m/%y', # '25-10-09' , 25/10/09' '%d-%m-%Y', '%d/%m/%Y', # '25-10-2009' , 25/10/2009' # '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006' # '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006' ) TIME_INPUT_FORMATS = ( '%H:%M:%S', # '14:30:59' '%H:%M', # '14:30' ) DATETIME_INPUT_FORMATS = ( '%d-%m-%Y %H:%M:%S', # '25-10-2009 14:30:59' '%d-%m-%Y %H:%M', # '25-10-2009 14:30' '%d-%m-%Y', # '25-10-2009' '%d-%m-%y %H:%M:%S', # '25-10-09' 14:30:59' '%d-%m-%y %H:%M', # '25-10-09' 14:30' '%d-%m-%y', # '25-10-09'' '%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59' '%m/%d/%y %H:%M', # '10/25/06 14:30' '%m/%d/%y', # '10/25/06' '%m/%d/%Y %H:%M:%S', # '25/10/2009 14:30:59' '%m/%d/%Y %H:%M', # '25/10/2009 14:30' '%m/%d/%Y', # '10/25/2009' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
bsd-3-clause
Tesora/tesora-horizon
openstack_dashboard/dashboards/admin/networks/ports/tests.py
8
19888
# Copyright 2012 NEC Corporation # Copyright 2015 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.core.urlresolvers import reverse from django import http from mox3.mox import IsA # noqa from openstack_dashboard import api from openstack_dashboard.test import helpers as test DETAIL_URL = 'horizon:admin:networks:ports:detail' NETWORKS_INDEX_URL = reverse('horizon:admin:networks:index') NETWORKS_DETAIL_URL = 'horizon:admin:networks:detail' class NetworkPortTests(test.BaseAdminViewTests): @test.create_stubs({api.neutron: ('port_get', 'is_extension_supported',)}) def test_port_detail(self): self._test_port_detail() @test.create_stubs({api.neutron: ('port_get', 'is_extension_supported',)}) def test_port_detail_with_mac_learning(self): self._test_port_detail(mac_learning=True) def _test_port_detail(self, mac_learning=False): port = self.ports.first() api.neutron.port_get(IsA(http.HttpRequest), port.id)\ .AndReturn(self.ports.first()) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'mac-learning')\ .MultipleTimes().AndReturn(mac_learning) self.mox.ReplayAll() res = self.client.get(reverse(DETAIL_URL, args=[port.id])) self.assertTemplateUsed(res, 'horizon/common/_detail.html') self.assertEqual(res.context['port'].id, port.id) @test.create_stubs({api.neutron: ('port_get',)}) def test_port_detail_exception(self): port = self.ports.first() api.neutron.port_get(IsA(http.HttpRequest), port.id)\ .AndRaise(self.exceptions.neutron) self.mox.ReplayAll() res = self.client.get(reverse(DETAIL_URL, args=[port.id])) redir_url = NETWORKS_INDEX_URL self.assertRedirectsNoFollow(res, redir_url) @test.create_stubs({api.neutron: ('network_get', 'is_extension_supported',)}) def test_port_create_get(self): self._test_port_create_get() @test.create_stubs({api.neutron: ('network_get', 'is_extension_supported',)}) def test_port_create_get_with_mac_learning(self): self._test_port_create_get(mac_learning=True) def _test_port_create_get(self, mac_learning=False, binding=False): network = self.networks.first() api.neutron.network_get(IsA(http.HttpRequest), network.id)\ .AndReturn(self.networks.first()) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'binding')\ .AndReturn(binding) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'mac-learning')\ .AndReturn(mac_learning) self.mox.ReplayAll() url = reverse('horizon:admin:networks:addport', args=[network.id]) res = self.client.get(url) self.assertTemplateUsed(res, 'admin/networks/ports/create.html') @test.create_stubs({api.neutron: ('network_get', 'is_extension_supported', 'port_create',)}) def test_port_create_post(self): self._test_port_create_post() @test.create_stubs({api.neutron: ('network_get', 'is_extension_supported', 'port_create',)}) def test_port_create_post_with_mac_learning(self): self._test_port_create_post(mac_learning=True, binding=False) def _test_port_create_post(self, mac_learning=False, binding=False): network = self.networks.first() port = self.ports.first() api.neutron.network_get(IsA(http.HttpRequest), network.id)\ .AndReturn(self.networks.first()) api.neutron.network_get(IsA(http.HttpRequest), network.id)\ .AndReturn(self.networks.first()) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'binding')\ .AndReturn(binding) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'mac-learning')\ .AndReturn(mac_learning) extension_kwargs = {} if binding: extension_kwargs['binding__vnic_type'] = \ port.binding__vnic_type if mac_learning: extension_kwargs['mac_learning_enabled'] = True api.neutron.port_create(IsA(http.HttpRequest), tenant_id=network.tenant_id, network_id=network.id, name=port.name, admin_state_up=port.admin_state_up, device_id=port.device_id, device_owner=port.device_owner, binding__host_id=port.binding__host_id, **extension_kwargs)\ .AndReturn(port) self.mox.ReplayAll() form_data = {'network_id': port.network_id, 'network_name': network.name, 'name': port.name, 'admin_state': port.admin_state_up, 'device_id': port.device_id, 'device_owner': port.device_owner, 'binding__host_id': port.binding__host_id} if binding: form_data['binding__vnic_type'] = port.binding__vnic_type if mac_learning: form_data['mac_state'] = True url = reverse('horizon:admin:networks:addport', args=[port.network_id]) res = self.client.post(url, form_data) self.assertNoFormErrors(res) redir_url = reverse(NETWORKS_DETAIL_URL, args=[port.network_id]) self.assertRedirectsNoFollow(res, redir_url) @test.create_stubs({api.neutron: ('network_get', 'port_create', 'is_extension_supported',)}) def test_port_create_post_exception(self): self._test_port_create_post_exception() @test.create_stubs({api.neutron: ('network_get', 'port_create', 'is_extension_supported',)}) def test_port_create_post_exception_with_mac_learning(self): self._test_port_create_post_exception(mac_learning=True) def _test_port_create_post_exception(self, mac_learning=False, binding=False): network = self.networks.first() port = self.ports.first() api.neutron.network_get(IsA(http.HttpRequest), network.id)\ .AndReturn(self.networks.first()) api.neutron.network_get(IsA(http.HttpRequest), network.id)\ .AndReturn(self.networks.first()) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'binding')\ .AndReturn(binding) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'mac-learning')\ .AndReturn(mac_learning) extension_kwargs = {} if binding: extension_kwargs['binding__vnic_type'] = port.binding__vnic_type if mac_learning: extension_kwargs['mac_learning_enabled'] = True api.neutron.port_create(IsA(http.HttpRequest), tenant_id=network.tenant_id, network_id=network.id, name=port.name, admin_state_up=port.admin_state_up, device_id=port.device_id, device_owner=port.device_owner, binding__host_id=port.binding__host_id, **extension_kwargs)\ .AndRaise(self.exceptions.neutron) self.mox.ReplayAll() form_data = {'network_id': port.network_id, 'network_name': network.name, 'name': port.name, 'admin_state': port.admin_state_up, 'mac_state': True, 'device_id': port.device_id, 'device_owner': port.device_owner, 'binding__host_id': port.binding__host_id} if binding: form_data['binding__vnic_type'] = port.binding__vnic_type if mac_learning: form_data['mac_learning_enabled'] = True url = reverse('horizon:admin:networks:addport', args=[port.network_id]) res = self.client.post(url, form_data) self.assertNoFormErrors(res) redir_url = reverse(NETWORKS_DETAIL_URL, args=[port.network_id]) self.assertRedirectsNoFollow(res, redir_url) @test.create_stubs({api.neutron: ('port_get', 'is_extension_supported',)}) def test_port_update_get(self): self._test_port_update_get() @test.create_stubs({api.neutron: ('port_get', 'is_extension_supported',)}) def test_port_update_get_with_mac_learning(self): self._test_port_update_get(mac_learning=True) def _test_port_update_get(self, mac_learning=False, binding=False): port = self.ports.first() api.neutron.port_get(IsA(http.HttpRequest), port.id)\ .AndReturn(port) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'binding')\ .AndReturn(binding) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'mac-learning')\ .AndReturn(mac_learning) self.mox.ReplayAll() url = reverse('horizon:admin:networks:editport', args=[port.network_id, port.id]) res = self.client.get(url) self.assertTemplateUsed(res, 'admin/networks/ports/update.html') @test.create_stubs({api.neutron: ('port_get', 'is_extension_supported', 'port_update')}) def test_port_update_post(self): self._test_port_update_post() @test.create_stubs({api.neutron: ('port_get', 'is_extension_supported', 'port_update')}) def test_port_update_post_with_mac_learning(self): self._test_port_update_post(mac_learning=True) def _test_port_update_post(self, mac_learning=False, binding=False): port = self.ports.first() api.neutron.port_get(IsA(http.HttpRequest), port.id)\ .AndReturn(port) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'binding')\ .AndReturn(binding) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'mac-learning')\ .AndReturn(mac_learning) extension_kwargs = {} if binding: extension_kwargs['binding__vnic_type'] = port.binding__vnic_type if mac_learning: extension_kwargs['mac_learning_enabled'] = True api.neutron.port_update(IsA(http.HttpRequest), port.id, name=port.name, admin_state_up=port.admin_state_up, device_id=port.device_id, device_owner=port.device_owner, binding__host_id=port.binding__host_id, **extension_kwargs)\ .AndReturn(port) self.mox.ReplayAll() form_data = {'network_id': port.network_id, 'port_id': port.id, 'name': port.name, 'admin_state': port.admin_state_up, 'device_id': port.device_id, 'device_owner': port.device_owner, 'binding__host_id': port.binding__host_id} if binding: form_data['binding__vnic_type'] = port.binding__vnic_type if mac_learning: form_data['mac_state'] = True url = reverse('horizon:admin:networks:editport', args=[port.network_id, port.id]) res = self.client.post(url, form_data) redir_url = reverse(NETWORKS_DETAIL_URL, args=[port.network_id]) self.assertRedirectsNoFollow(res, redir_url) @test.create_stubs({api.neutron: ('port_get', 'is_extension_supported', 'port_update')}) def test_port_update_post_exception(self): self._test_port_update_post_exception() @test.create_stubs({api.neutron: ('port_get', 'is_extension_supported', 'port_update')}) def test_port_update_post_exception_with_mac_learning(self): self._test_port_update_post_exception(mac_learning=True, binding=False) def _test_port_update_post_exception(self, mac_learning=False, binding=False): port = self.ports.first() api.neutron.port_get(IsA(http.HttpRequest), port.id)\ .AndReturn(port) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'binding')\ .AndReturn(binding) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'mac-learning')\ .AndReturn(mac_learning) extension_kwargs = {} if binding: extension_kwargs['binding__vnic_type'] = port.binding__vnic_type if mac_learning: extension_kwargs['mac_learning_enabled'] = True api.neutron.port_update(IsA(http.HttpRequest), port.id, name=port.name, admin_state_up=port.admin_state_up, device_id=port.device_id, device_owner=port.device_owner, binding__host_id=port.binding__host_id, **extension_kwargs)\ .AndRaise(self.exceptions.neutron) self.mox.ReplayAll() form_data = {'network_id': port.network_id, 'port_id': port.id, 'name': port.name, 'admin_state': port.admin_state_up, 'device_id': port.device_id, 'device_owner': port.device_owner, 'binding__host_id': port.binding__host_id} if binding: form_data['binding__vnic_type'] = port.binding__vnic_type if mac_learning: form_data['mac_state'] = True url = reverse('horizon:admin:networks:editport', args=[port.network_id, port.id]) res = self.client.post(url, form_data) redir_url = reverse(NETWORKS_DETAIL_URL, args=[port.network_id]) self.assertRedirectsNoFollow(res, redir_url) @test.create_stubs({api.neutron: ('port_delete', 'subnet_list', 'port_list', 'is_extension_supported', 'list_dhcp_agent_hosting_networks',)}) def test_port_delete(self): self._test_port_delete() @test.create_stubs({api.neutron: ('port_delete', 'subnet_list', 'port_list', 'is_extension_supported', 'list_dhcp_agent_hosting_networks',)}) def test_port_delete_with_mac_learning(self): self._test_port_delete(mac_learning=True) def _test_port_delete(self, mac_learning=False): port = self.ports.first() network_id = port.network_id api.neutron.list_dhcp_agent_hosting_networks(IsA(http.HttpRequest), network_id).\ AndReturn(self.agents.list()) api.neutron.port_delete(IsA(http.HttpRequest), port.id) api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\ .AndReturn([self.subnets.first()]) api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\ .AndReturn([self.ports.first()]) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'mac-learning')\ .AndReturn(mac_learning) self.mox.ReplayAll() form_data = {'action': 'ports__delete__%s' % port.id} url = reverse(NETWORKS_DETAIL_URL, args=[network_id]) res = self.client.post(url, form_data) self.assertRedirectsNoFollow(res, url) @test.create_stubs({api.neutron: ('port_delete', 'subnet_list', 'port_list', 'is_extension_supported', 'list_dhcp_agent_hosting_networks',)}) def test_port_delete_exception(self): self._test_port_delete_exception() @test.create_stubs({api.neutron: ('port_delete', 'subnet_list', 'port_list', 'is_extension_supported', 'list_dhcp_agent_hosting_networks')}) def test_port_delete_exception_with_mac_learning(self): self._test_port_delete_exception(mac_learning=True) def _test_port_delete_exception(self, mac_learning=False): port = self.ports.first() network_id = port.network_id api.neutron.list_dhcp_agent_hosting_networks(IsA(http.HttpRequest), network_id).\ AndReturn(self.agents.list()) api.neutron.port_delete(IsA(http.HttpRequest), port.id)\ .AndRaise(self.exceptions.neutron) api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\ .AndReturn([self.subnets.first()]) api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\ .AndReturn([self.ports.first()]) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'mac-learning')\ .AndReturn(mac_learning) self.mox.ReplayAll() form_data = {'action': 'ports__delete__%s' % port.id} url = reverse(NETWORKS_DETAIL_URL, args=[network_id]) res = self.client.post(url, form_data) self.assertRedirectsNoFollow(res, url)
apache-2.0
nichung/wwwflaskBlogrevA
env/lib/python2.7/site-packages/wtforms/csrf/core.py
174
3157
from wtforms.validators import ValidationError from wtforms.fields import HiddenField __all__ = ('CSRFTokenField', 'CSRF') class CSRFTokenField(HiddenField): """ A subclass of HiddenField designed for sending the CSRF token that is used for most CSRF protection schemes. Notably different from a normal field, this field always renders the current token regardless of the submitted value, and also will not be populated over to object data via populate_obj """ current_token = None def __init__(self, *args, **kw): self.csrf_impl = kw.pop('csrf_impl') super(CSRFTokenField, self).__init__(*args, **kw) def _value(self): """ We want to always return the current token on render, regardless of whether a good or bad token was passed. """ return self.current_token def populate_obj(self, *args): """ Don't populate objects with the CSRF token """ pass def pre_validate(self, form): """ Handle validation of this token field. """ self.csrf_impl.validate_csrf_token(form, self) def process(self, *args): super(CSRFTokenField, self).process(*args) self.current_token = self.csrf_impl.generate_csrf_token(self) class CSRF(object): field_class = CSRFTokenField def setup_form(self, form): """ Receive the form we're attached to and set up fields. The default implementation creates a single field of type :attr:`field_class` with name taken from the ``csrf_field_name`` of the class meta. :param form: The form instance we're attaching to. :return: A sequence of `(field_name, unbound_field)` 2-tuples which are unbound fields to be added to the form. """ meta = form.meta field_name = meta.csrf_field_name unbound_field = self.field_class( label='CSRF Token', csrf_impl=self ) return [(field_name, unbound_field)] def generate_csrf_token(self, csrf_token_field): """ Implementations must override this to provide a method with which one can get a CSRF token for this form. A CSRF token is usually a string that is generated deterministically based on some sort of user data, though it can be anything which you can validate on a subsequent request. :param csrf_token_field: The field which is being used for CSRF. :return: A generated CSRF string. """ raise NotImplementedError() def validate_csrf_token(self, form, field): """ Override this method to provide custom CSRF validation logic. The default CSRF validation logic simply checks if the recently generated token equals the one we received as formdata. :param form: The form which has this CSRF token. :param field: The CSRF token field. """ if field.current_token != field.data: raise ValidationError(field.gettext('Invalid CSRF Token'))
mit
slgobinath/SafeEyes
safeeyes/ui/break_screen.py
1
10537
#!/usr/bin/env python # Safe Eyes is a utility to remind you to take break frequently # to protect your eyes from eye strain. # Copyright (C) 2016 Gobinath # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import logging import os import threading import time import gi from safeeyes import utility from Xlib.display import Display from Xlib.display import X gi.require_version('Gtk', '3.0') from gi.repository import Gdk from gi.repository import GLib from gi.repository import Gtk BREAK_SCREEN_GLADE = os.path.join(utility.BIN_DIRECTORY, "glade/break_screen.glade") class BreakScreen: """ The fullscreen window which prevents users from using the computer. This class reads the break_screen.glade and build the user interface. """ def __init__(self, context, on_skip, on_postpone, style_sheet_path): self.context = context self.count_labels = [] self.display = Display() self.enable_postpone = False self.enable_shortcut = False self.is_pretified = False self.keycode_shortcut_postpone = 65 self.keycode_shortcut_skip = 9 self.on_postpone = on_postpone self.on_skip = on_skip self.shortcut_disable_time = 2 self.strict_break = False self.windows = [] # Initialize the theme css_provider = Gtk.CssProvider() css_provider.load_from_path(style_sheet_path) Gtk.StyleContext.add_provider_for_screen(Gdk.Screen.get_default(), css_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION) def initialize(self, config): """ Initialize the internal properties from configuration """ logging.info("Initialize the break screen") self.enable_postpone = config.get('allow_postpone', False) self.keycode_shortcut_postpone = config.get('shortcut_postpone', 65) self.keycode_shortcut_skip = config.get('shortcut_skip', 9) self.shortcut_disable_time = config.get('shortcut_disable_time', 2) self.strict_break = config.get('strict_break', False) def skip_break(self): """ Skip the break from the break screen """ logging.info("User skipped the break") # Must call on_skip before close to lock screen before closing the break screen self.on_skip() self.close() def postpone_break(self): """ Postpone the break from the break screen """ logging.info("User postponed the break") self.on_postpone() self.close() def on_window_delete(self, *args): """ Window close event handler. """ logging.info("Closing the break screen") self.close() def on_skip_clicked(self, button): """ Skip button press event handler. """ self.skip_break() def on_postpone_clicked(self, button): """ Postpone button press event handler. """ self.postpone_break() def show_count_down(self, countdown, seconds): """ Show/update the count down on all screens. """ self.enable_shortcut = self.shortcut_disable_time <= seconds mins, secs = divmod(countdown, 60) timeformat = '{:02d}:{:02d}'.format(mins, secs) GLib.idle_add(lambda: self.__update_count_down(timeformat)) def show_message(self, break_obj, widget, tray_actions=[]): """ Show the break screen with the given message on all displays. """ message = break_obj.name image_path = break_obj.image self.enable_shortcut = self.shortcut_disable_time <= 0 GLib.idle_add(lambda: self.__show_break_screen(message, image_path, widget, tray_actions)) def close(self): """ Hide the break screen from active window and destroy all other windows """ logging.info("Close the break screen(s)") self.__release_keyboard() # Destroy other windows if exists GLib.idle_add(lambda: self.__destroy_all_screens()) def __tray_action(self, button, tray_action): """ Tray action handler. Hides all toolbar buttons for this action and call the action provided by the plugin. """ tray_action.reset() tray_action.action() def __show_break_screen(self, message, image_path, widget, tray_actions): """ Show an empty break screen on all screens. """ # Lock the keyboard utility.start_thread(self.__lock_keyboard) screen = Gtk.Window().get_screen() no_of_monitors = screen.get_n_monitors() logging.info("Show break screens in %d display(s)", no_of_monitors) for monitor in range(no_of_monitors): monitor_gemoetry = screen.get_monitor_geometry(monitor) x = monitor_gemoetry.x y = monitor_gemoetry.y builder = Gtk.Builder() builder.add_from_file(BREAK_SCREEN_GLADE) builder.connect_signals(self) window = builder.get_object("window_main") window.set_title("SafeEyes-" + str(monitor)) lbl_message = builder.get_object("lbl_message") lbl_count = builder.get_object("lbl_count") lbl_widget = builder.get_object("lbl_widget") img_break = builder.get_object("img_break") box_buttons = builder.get_object("box_buttons") toolbar = builder.get_object("toolbar") for tray_action in tray_actions: toolbar_button = None if tray_action.system_icon: toolbar_button = Gtk.ToolButton.new_from_stock(tray_action.get_icon()) else: toolbar_button = Gtk.ToolButton.new(tray_action.get_icon(), tray_action.name) tray_action.add_toolbar_button(toolbar_button) toolbar_button.connect("clicked", lambda button, action: self.__tray_action(button, action), tray_action) toolbar_button.set_tooltip_text(_(tray_action.name)) toolbar.add(toolbar_button) toolbar_button.show() # Add the buttons if self.enable_postpone: # Add postpone button btn_postpone = Gtk.Button(_('Postpone')) btn_postpone.get_style_context().add_class('btn_postpone') btn_postpone.connect('clicked', self.on_postpone_clicked) btn_postpone.set_visible(True) box_buttons.pack_start(btn_postpone, True, True, 0) if not self.strict_break: # Add the skip button btn_skip = Gtk.Button(_('Skip')) btn_skip.get_style_context().add_class('btn_skip') btn_skip.connect('clicked', self.on_skip_clicked) btn_skip.set_visible(True) box_buttons.pack_start(btn_skip, True, True, 0) # Set values if image_path: img_break.set_from_file(image_path) lbl_message.set_label(message) lbl_widget.set_markup(widget) self.windows.append(window) self.count_labels.append(lbl_count) # Set visual to apply css theme. It should be called before show method. window.set_visual(window.get_screen().get_rgba_visual()) if self.context['desktop'] == 'kde': # Fix flickering screen in KDE by setting opacity to 1 window.set_opacity(0.9) # In Unity, move the window before present window.move(x, y) window.resize(monitor_gemoetry.width, monitor_gemoetry.height) window.stick() window.set_keep_above(True) window.fullscreen() window.present() # In other desktop environments, move the window after present window.move(x, y) window.resize(monitor_gemoetry.width, monitor_gemoetry.height) logging.info("Moved break screen to Display[%d, %d]", x, y) def __update_count_down(self, count): """ Update the countdown on all break screens. """ for label in self.count_labels: label.set_text(count) def __lock_keyboard(self): """ Lock the keyboard to prevent the user from using keyboard shortcuts """ logging.info("Lock the keyboard") self.lock_keyboard = True # Grab the keyboard root = self.display.screen().root root.change_attributes(event_mask=X.KeyPressMask | X.KeyReleaseMask) root.grab_keyboard(True, X.GrabModeAsync, X.GrabModeAsync, X.CurrentTime) # Consume keyboard events while self.lock_keyboard: if self.display.pending_events() > 0: # Avoid waiting for next event by checking pending events event = self.display.next_event() if self.enable_shortcut and event.type == X.KeyPress: if event.detail == self.keycode_shortcut_skip and not self.strict_break: self.skip_break() break elif self.enable_postpone and event.detail == self.keycode_shortcut_postpone: self.postpone_break() break else: # Reduce the CPU usage by sleeping for a second time.sleep(1) def __release_keyboard(self): """ Release the locked keyboard. """ logging.info("Unlock the keyboard") self.lock_keyboard = False self.display.ungrab_keyboard(X.CurrentTime) self.display.flush() def __destroy_all_screens(self): """ Close all the break screens. """ for win in self.windows: win.destroy() del self.windows[:] del self.count_labels[:]
gpl-3.0
citrix-openstack-build/horizon
openstack_dashboard/dashboards/admin/users/forms.py
8
8217
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from django.forms import ValidationError # noqa from django.utils.translation import ugettext_lazy as _ # noqa from django.views.decorators.debug import sensitive_variables # noqa from horizon import exceptions from horizon import forms from horizon import messages from horizon.utils import validators from openstack_dashboard import api LOG = logging.getLogger(__name__) class BaseUserForm(forms.SelfHandlingForm): def __init__(self, request, *args, **kwargs): super(BaseUserForm, self).__init__(request, *args, **kwargs) # Populate project choices project_choices = [('', _("Select a project"))] # If the user is already set (update action), list only projects which # the user has access to. user_id = kwargs['initial'].get('id', None) domain_id = kwargs['initial'].get('domain_id', None) projects, has_more = api.keystone.tenant_list(request, domain=domain_id, user=user_id) for project in projects: if project.enabled: project_choices.append((project.id, project.name)) self.fields['project'].choices = project_choices def clean(self): '''Check to make sure password fields match.''' data = super(forms.Form, self).clean() if 'password' in data: if data['password'] != data.get('confirm_password', None): raise ValidationError(_('Passwords do not match.')) return data ADD_PROJECT_URL = "horizon:admin:projects:create" class CreateUserForm(BaseUserForm): # Hide the domain_id and domain_name by default domain_id = forms.CharField(label=_("Domain ID"), required=False, widget=forms.HiddenInput()) domain_name = forms.CharField(label=_("Domain Name"), required=False, widget=forms.HiddenInput()) name = forms.CharField(label=_("User Name")) email = forms.EmailField( label=_("Email"), required=False) password = forms.RegexField( label=_("Password"), widget=forms.PasswordInput(render_value=False), regex=validators.password_validator(), error_messages={'invalid': validators.password_validator_msg()}) confirm_password = forms.CharField( label=_("Confirm Password"), required=False, widget=forms.PasswordInput(render_value=False)) project = forms.DynamicChoiceField(label=_("Primary Project"), add_item_link=ADD_PROJECT_URL) role_id = forms.ChoiceField(label=_("Role")) def __init__(self, *args, **kwargs): roles = kwargs.pop('roles') super(CreateUserForm, self).__init__(*args, **kwargs) role_choices = [(role.id, role.name) for role in roles] self.fields['role_id'].choices = role_choices # For keystone V3, display the two fields in read-only if api.keystone.VERSIONS.active >= 3: readonlyInput = forms.TextInput(attrs={'readonly': 'readonly'}) self.fields["domain_id"].widget = readonlyInput self.fields["domain_name"].widget = readonlyInput # We have to protect the entire "data" dict because it contains the # password and confirm_password strings. @sensitive_variables('data') def handle(self, request, data): domain = api.keystone.get_default_domain(self.request) try: LOG.info('Creating user with name "%s"' % data['name']) new_user = api.keystone.user_create(request, name=data['name'], email=data['email'], password=data['password'], project=data['project'], enabled=True, domain=domain.id) messages.success(request, _('User "%s" was successfully created.') % data['name']) if data['role_id']: try: api.keystone.add_tenant_user_role(request, data['project'], new_user.id, data['role_id']) except Exception: exceptions.handle(request, _('Unable to add user ' 'to primary project.')) return new_user except Exception: exceptions.handle(request, _('Unable to create user.')) class UpdateUserForm(BaseUserForm): # Hide the domain_id and domain_name by default domain_id = forms.CharField(label=_("Domain ID"), required=False, widget=forms.HiddenInput()) domain_name = forms.CharField(label=_("Domain Name"), required=False, widget=forms.HiddenInput()) id = forms.CharField(label=_("ID"), widget=forms.HiddenInput) name = forms.CharField(label=_("User Name")) email = forms.EmailField( label=_("Email"), required=False) password = forms.RegexField( label=_("Password"), widget=forms.PasswordInput(render_value=False), regex=validators.password_validator(), required=False, error_messages={'invalid': validators.password_validator_msg()}) confirm_password = forms.CharField( label=_("Confirm Password"), widget=forms.PasswordInput(render_value=False), required=False) project = forms.ChoiceField(label=_("Primary Project")) def __init__(self, request, *args, **kwargs): super(UpdateUserForm, self).__init__(request, *args, **kwargs) if api.keystone.keystone_can_edit_user() is False: for field in ('name', 'email', 'password', 'confirm_password'): self.fields.pop(field) # For keystone V3, display the two fields in read-only if api.keystone.VERSIONS.active >= 3: readonlyInput = forms.TextInput(attrs={'readonly': 'readonly'}) self.fields["domain_id"].widget = readonlyInput self.fields["domain_name"].widget = readonlyInput # We have to protect the entire "data" dict because it contains the # password and confirm_password strings. @sensitive_variables('data', 'password') def handle(self, request, data): user = data.pop('id') # Throw away the password confirmation, we're done with it. data.pop('confirm_password', None) data.pop('domain_id') data.pop('domain_name') try: api.keystone.user_update(request, user, **data) messages.success(request, _('User has been updated successfully.')) except Exception: exceptions.handle(request, ignore=True) messages.error(request, _('Unable to update the user.')) return True
apache-2.0
davidt/reviewboard
reviewboard/avatars/registry.py
2
3257
from __future__ import unicode_literals from djblets.avatars.registry import ( AvatarServiceRegistry as DjbletsAvatarServiceRegistry) from djblets.avatars.services import (FileUploadService, GravatarService, URLAvatarService) from djblets.registries.mixins import ExceptionFreeGetterMixin from djblets.siteconfig.models import SiteConfiguration from reviewboard.avatars.settings import UserProfileAvatarSettingsManager class AvatarServiceRegistry(ExceptionFreeGetterMixin, DjbletsAvatarServiceRegistry): """A registry for managing avatar services. This registry is a special case of Djblets' :py:class:`~djblets.avatars.registry.AvatarServiceRegistry` that will automatically migrate Review Board's settings (in the site configuration) from the old settings pertaining to Gravatars to the new avatar services preferences on first run. """ #: The key for enabling avatars. AVATARS_ENABLED_KEY = 'avatars_enabled' #: The key for migrating avatars. AVATARS_MIGRATED_KEY = 'avatars_migrated' #: The default avatar service classes. default_avatar_service_classes = [ GravatarService, FileUploadService, URLAvatarService, ] settings_manager_class = UserProfileAvatarSettingsManager @property def avatars_enabled(self): """Whether or not avatars are enabled. Returns: bool: Whether or not avatars are enabled. """ self.populate() siteconfig = SiteConfiguration.objects.get_current() return siteconfig.get(self.AVATARS_ENABLED_KEY) @avatars_enabled.setter def avatars_enabled(self, value): """Set whether or not avatars are enabled. Args: value (bool): Whether or not avatars are to be enabled. """ self.populate() siteconfig = SiteConfiguration.objects.get_current() siteconfig.set(self.AVATARS_ENABLED_KEY, value) siteconfig.save() def populate(self): """Populate the avatar service registry. On first run, the site configuration will be migrated to use the new avatar services instead of the ``integration_gravatars`` setting. """ if self.populated: return siteconfig = SiteConfiguration.objects.get_current() # Upon first run, migrate to the new avatar services settings. if not siteconfig.get(self.AVATARS_MIGRATED_KEY): avatars_enabled = siteconfig.get('integration_gravatars') siteconfig.set(self.AVATARS_MIGRATED_KEY, True) siteconfig.set(self.AVATARS_ENABLED_KEY, avatars_enabled) if avatars_enabled: siteconfig.set( self.ENABLED_SERVICES_KEY, [ service.avatar_service_id for service in self.default_avatar_service_classes ] ) siteconfig.set(self.DEFAULT_SERVICE_KEY, GravatarService.avatar_service_id) siteconfig.save() super(AvatarServiceRegistry, self).populate()
mit
CourseTalk/edx-platform
lms/djangoapps/lms_migration/management/commands/create_groups.py
102
1794
#!/usr/bin/python # # File: create_groups.py # # Create all staff_* groups for classes in data directory. import os from django.core.management.base import BaseCommand from django.conf import settings from django.contrib.auth.models import Group from path import Path as path from lxml import etree def create_groups(): ''' Create staff and instructor groups for all classes in the data_dir ''' data_dir = settings.DATA_DIR print "data_dir = %s" % data_dir for course_dir in os.listdir(data_dir): if course_dir.startswith('.'): continue if not os.path.isdir(path(data_dir) / course_dir): continue cxfn = path(data_dir) / course_dir / 'course.xml' try: coursexml = etree.parse(cxfn) except Exception: # pylint: disable=broad-except print "Oops, cannot read %s, skipping" % cxfn continue cxmlroot = coursexml.getroot() course = cxmlroot.get('course') # TODO (vshnayder!!): read metadata from policy file(s) instead of from course.xml if course is None: print "oops, can't get course id for %s" % course_dir continue print "course=%s for course_dir=%s" % (course, course_dir) create_group('staff_%s' % course) # staff group create_group('instructor_%s' % course) # instructor group (can manage staff group list) def create_group(gname): if Group.objects.filter(name=gname): print " group exists for %s" % gname return g = Group(name=gname) g.save() print " created group %s" % gname class Command(BaseCommand): help = "Create groups associated with all courses in data_dir." def handle(self, *args, **options): create_groups()
agpl-3.0
endlessm/chromium-browser
third_party/llvm/llvm/utils/benchmark/tools/gbench/report.py
43
8300
"""report.py - Utilities for reporting statistics about benchmark results """ import os import re import copy class BenchmarkColor(object): def __init__(self, name, code): self.name = name self.code = code def __repr__(self): return '%s%r' % (self.__class__.__name__, (self.name, self.code)) def __format__(self, format): return self.code # Benchmark Colors Enumeration BC_NONE = BenchmarkColor('NONE', '') BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m') BC_CYAN = BenchmarkColor('CYAN', '\033[96m') BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m') BC_HEADER = BenchmarkColor('HEADER', '\033[92m') BC_WARNING = BenchmarkColor('WARNING', '\033[93m') BC_WHITE = BenchmarkColor('WHITE', '\033[97m') BC_FAIL = BenchmarkColor('FAIL', '\033[91m') BC_ENDC = BenchmarkColor('ENDC', '\033[0m') BC_BOLD = BenchmarkColor('BOLD', '\033[1m') BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m') def color_format(use_color, fmt_str, *args, **kwargs): """ Return the result of 'fmt_str.format(*args, **kwargs)' after transforming 'args' and 'kwargs' according to the value of 'use_color'. If 'use_color' is False then all color codes in 'args' and 'kwargs' are replaced with the empty string. """ assert use_color is True or use_color is False if not use_color: args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE for arg in args] kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE for key, arg in kwargs.items()} return fmt_str.format(*args, **kwargs) def find_longest_name(benchmark_list): """ Return the length of the longest benchmark name in a given list of benchmark JSON objects """ longest_name = 1 for bc in benchmark_list: if len(bc['name']) > longest_name: longest_name = len(bc['name']) return longest_name def calculate_change(old_val, new_val): """ Return a float representing the decimal change between old_val and new_val. """ if old_val == 0 and new_val == 0: return 0.0 if old_val == 0: return float(new_val - old_val) / (float(old_val + new_val) / 2) return float(new_val - old_val) / abs(old_val) def filter_benchmark(json_orig, family, replacement=""): """ Apply a filter to the json, and only leave the 'family' of benchmarks. """ regex = re.compile(family) filtered = {} filtered['benchmarks'] = [] for be in json_orig['benchmarks']: if not regex.search(be['name']): continue filteredbench = copy.deepcopy(be) # Do NOT modify the old name! filteredbench['name'] = regex.sub(replacement, filteredbench['name']) filtered['benchmarks'].append(filteredbench) return filtered def generate_difference_report(json1, json2, use_color=True): """ Calculate and report the difference between each test of two benchmarks runs specified as 'json1' and 'json2'. """ first_col_width = find_longest_name(json1['benchmarks']) def find_test(name): for b in json2['benchmarks']: if b['name'] == name: return b return None first_col_width = max(first_col_width, len('Benchmark')) first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format( 'Benchmark', 12 + first_col_width) output_strs = [first_line, '-' * len(first_line)] gen = (bn for bn in json1['benchmarks'] if 'real_time' in bn and 'cpu_time' in bn) for bn in gen: other_bench = find_test(bn['name']) if not other_bench: continue if bn['time_unit'] != other_bench['time_unit']: continue def get_color(res): if res > 0.05: return BC_FAIL elif res > -0.07: return BC_WHITE else: return BC_CYAN fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}" tres = calculate_change(bn['real_time'], other_bench['real_time']) cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time']) output_strs += [color_format(use_color, fmt_str, BC_HEADER, bn['name'], first_col_width, get_color(tres), tres, get_color(cpures), cpures, bn['real_time'], other_bench['real_time'], bn['cpu_time'], other_bench['cpu_time'], endc=BC_ENDC)] return output_strs ############################################################################### # Unit tests import unittest class TestReportDifference(unittest.TestCase): def load_results(self): import json testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs') testOutput1 = os.path.join(testInputs, 'test1_run1.json') testOutput2 = os.path.join(testInputs, 'test1_run2.json') with open(testOutput1, 'r') as f: json1 = json.load(f) with open(testOutput2, 'r') as f: json2 = json.load(f) return json1, json2 def test_basic(self): expect_lines = [ ['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'], ['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'], ['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'], ['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'], ['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'], ['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'], ['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'], ['BM_100xSlower', '+99.0000', '+99.0000', '100', '10000', '100', '10000'], ['BM_100xFaster', '-0.9900', '-0.9900', '10000', '100', '10000', '100'], ['BM_10PercentCPUToTime', '+0.1000', '-0.1000', '100', '110', '100', '90'], ['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'], ['BM_BadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'], ] json1, json2 = self.load_results() output_lines_with_header = generate_difference_report(json1, json2, use_color=False) output_lines = output_lines_with_header[2:] print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): parts = [x for x in output_lines[i].split(' ') if x] self.assertEqual(len(parts), 7) self.assertEqual(parts, expect_lines[i]) class TestReportDifferenceBetweenFamilies(unittest.TestCase): def load_result(self): import json testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs') testOutput = os.path.join(testInputs, 'test2_run.json') with open(testOutput, 'r') as f: json = json.load(f) return json def test_basic(self): expect_lines = [ ['.', '-0.5000', '-0.5000', '10', '5', '10', '5'], ['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'], ['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'], ['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'], ] json = self.load_result() json1 = filter_benchmark(json, "BM_Z.ro", ".") json2 = filter_benchmark(json, "BM_O.e", ".") output_lines_with_header = generate_difference_report(json1, json2, use_color=False) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): parts = [x for x in output_lines[i].split(' ') if x] self.assertEqual(len(parts), 7) self.assertEqual(parts, expect_lines[i]) if __name__ == '__main__': unittest.main() # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; # kate: indent-mode python; remove-trailing-spaces modified;
bsd-3-clause
ity/pants
tests/python/pants_test/backend/python/tasks/test_pytest_run_integration.py
7
3703
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os import time import unittest from pants.util.contextutil import temporary_dir from pants_test.pants_run_integration_test import PantsRunIntegrationTest class PytestRunIntegrationTest(PantsRunIntegrationTest): def test_pytest_run_timeout_succeeds(self): pants_run = self.run_pants(['clean-all', 'test.pytest', '--test-pytest-options=-k within_timeout', '--timeout-default=2', 'testprojects/tests/python/pants/timeout:exceeds_timeout']) self.assert_success(pants_run) @unittest.skip('https://github.com/pantsbuild/pants/issues/3255') def test_pytest_run_timeout_fails(self): start = time.time() pants_run = self.run_pants(['clean-all', 'test.pytest', '--test-pytest-coverage=1', '--test-pytest-options=-k exceeds_timeout', '--timeout-default=1', 'testprojects/tests/python/pants/timeout:exceeds_timeout']) end = time.time() self.assert_failure(pants_run) # Ensure that the failure took less than 20 seconds to run. self.assertLess(end - start, 20) # Ensure that a warning about coverage reporting was emitted. self.assertIn("No .coverage file was found! Skipping coverage reporting", pants_run.stderr_data) # Ensure that the timeout message triggered. self.assertIn("FAILURE: Timeout of 1 seconds reached", pants_run.stdout_data) @unittest.skip('https://github.com/pantsbuild/pants/issues/3255') def test_pytest_run_timeout_cant_terminate(self): start = time.time() terminate_wait = 2 pants_run = self.run_pants(['clean-all', 'test.pytest', '--test-pytest-coverage=1', '--test-pytest-timeout-terminate-wait=%d' % terminate_wait, '--timeout-default=1', 'testprojects/tests/python/pants/timeout:ignores_terminate']) end = time.time() self.assert_failure(pants_run) # Ensure that the failure took less than 20 seconds to run. self.assertLess(end - start, 20) # Ensure that a warning about coverage reporting was emitted. self.assertIn("No .coverage file was found! Skipping coverage reporting", pants_run.stderr_data) # Ensure that the timeout message triggered. self.assertIn("FAILURE: Timeout of 1 seconds reached", pants_run.stdout_data) # Ensure that the warning about killing triggered. self.assertIn("WARN] Timed out test did not terminate gracefully after {} seconds, " "killing...".format(terminate_wait), pants_run.stderr_data) def test_pytest_explicit_coverage(self): with temporary_dir() as coverage_dir: pants_run = self.run_pants(['clean-all', 'test.pytest', '--test-pytest-coverage=path:testprojects', '--test-pytest-coverage-output-dir={}'.format(coverage_dir), 'testprojects/tests/python/pants/constants_only:constants_only']) self.assert_success(pants_run) self.assertTrue(os.path.exists(os.path.join(coverage_dir, 'coverage.xml')))
apache-2.0
maxcutler/Courant-News
courant/core/countthis/migrations/0001_initial.py
1
1306
from south.db import db from django.db import models from courant.core.countthis.models import * class Migration: def forwards(self, orm): # Adding model 'Statistic' db.create_table('countthis_statistic', ( ('view_count', models.PositiveIntegerField(default=0)), ('email_count', models.PositiveIntegerField(default=0)), ('created_at', CreationDateTimeField()), ('object_id', models.PositiveIntegerField()), ('comment_count', models.PositiveIntegerField(default=0)), ('content_type', models.ForeignKey(orm['contenttypes.ContentType'])), ('path', models.CharField(max_length=255)), ('id', models.AutoField(primary_key=True)), )) db.send_create_signal('countthis', ['Statistic']) def backwards(self, orm): # Deleting model 'Statistic' db.delete_table('countthis_statistic') models = { 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label','model'),)", 'db_table': "'django_content_type'"}, '_stub': True, 'id': ('models.AutoField', [], {'primary_key': 'True'}), } }
bsd-3-clause
waqasbhatti/astrobase
astrobase/checkplot/pkl_xmatch.py
2
15851
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # pkl_xmatch.py - Waqas Bhatti (wbhatti@astro.princeton.edu) - Feb 2019 # License: MIT. ''' This contains utility functions that support the checkplot.pkl xmatch functionality. ''' ############# ## LOGGING ## ############# import logging from astrobase import log_sub, log_fmt, log_date_fmt DEBUG = False if DEBUG: level = logging.DEBUG else: level = logging.INFO LOGGER = logging.getLogger(__name__) logging.basicConfig( level=level, style=log_sub, format=log_fmt, datefmt=log_date_fmt, ) LOGDEBUG = LOGGER.debug LOGINFO = LOGGER.info LOGWARNING = LOGGER.warning LOGERROR = LOGGER.error LOGEXCEPTION = LOGGER.exception ############# ## IMPORTS ## ############# import os import os.path import gzip import sys import json import pickle import numpy as np # import sps.cKDTree for external catalog xmatches from scipy.spatial import cKDTree ################### ## LOCAL IMPORTS ## ################### from .pkl_utils import _xyzdist_to_distarcsec from .pkl_io import _write_checkplot_picklefile ######################################### ## XMATCHING AGAINST EXTERNAL CATALOGS ## ######################################### def _parse_xmatch_catalog_header(xc, xk): ''' This parses the header for a catalog file and returns it as a file object. Parameters ---------- xc : str The file name of an xmatch catalog prepared previously. xk : list of str This is a list of column names to extract from the xmatch catalog. Returns ------- tuple The tuple returned is of the form:: (infd: the file object associated with the opened xmatch catalog, catdefdict: a dict describing the catalog column definitions, catcolinds: column number indices of the catalog, catcoldtypes: the numpy dtypes of the catalog columns, catcolnames: the names of each catalog column, catcolunits: the units associated with each catalog column) ''' catdef = [] # read in this catalog and transparently handle gzipped files if xc.endswith('.gz'): infd = gzip.open(xc,'rb') else: infd = open(xc,'rb') # read in the defs for line in infd: if line.decode().startswith('#'): catdef.append( line.decode().replace('#','').strip().rstrip('\n') ) if not line.decode().startswith('#'): break if not len(catdef) > 0: LOGERROR("catalog definition not parseable " "for catalog: %s, skipping..." % xc) return None catdef = ' '.join(catdef) catdefdict = json.loads(catdef) catdefkeys = [x['key'] for x in catdefdict['columns']] catdefdtypes = [x['dtype'] for x in catdefdict['columns']] catdefnames = [x['name'] for x in catdefdict['columns']] catdefunits = [x['unit'] for x in catdefdict['columns']] # get the correct column indices and dtypes for the requested columns # from the catdefdict catcolinds = [] catcoldtypes = [] catcolnames = [] catcolunits = [] for xkcol in xk: if xkcol in catdefkeys: xkcolind = catdefkeys.index(xkcol) catcolinds.append(xkcolind) catcoldtypes.append(catdefdtypes[xkcolind]) catcolnames.append(catdefnames[xkcolind]) catcolunits.append(catdefunits[xkcolind]) return (infd, catdefdict, catcolinds, catcoldtypes, catcolnames, catcolunits) def load_xmatch_external_catalogs(xmatchto, xmatchkeys, outfile=None): '''This loads the external xmatch catalogs into a dict for use in an xmatch. Parameters ---------- xmatchto : list of str This is a list of paths to all the catalog text files that will be loaded. The text files must be 'CSVs' that use the '|' character as the separator betwen columns. These files should all begin with a header in JSON format on lines starting with the '#' character. this header will define the catalog and contains the name of the catalog and the column definitions. Column definitions must have the column name and the numpy dtype of the columns (in the same format as that expected for the numpy.genfromtxt function). Any line that does not begin with '#' is assumed to be part of the columns in the catalog. An example is shown below:: # {"name":"NSVS catalog of variable stars", # "columns":[ # {"key":"objectid", "dtype":"U20", "name":"Object ID", "unit": null}, # {"key":"ra", "dtype":"f8", "name":"RA", "unit":"deg"}, # {"key":"decl","dtype":"f8", "name": "Declination", "unit":"deg"}, # {"key":"sdssr","dtype":"f8","name":"SDSS r", "unit":"mag"}, # {"key":"vartype","dtype":"U20","name":"Variable type", "unit":null} # ], # "colra":"ra", # "coldec":"decl", # "description":"Contains variable stars from the NSVS catalog"} objectid1 | 45.0 | -20.0 | 12.0 | detached EB objectid2 | 145.0 | 23.0 | 10.0 | RRab objectid3 | 12.0 | 11.0 | 14.0 | Cepheid . . . xmatchkeys : list of lists This is the list of lists of column names (as str) to get out of each `xmatchto` catalog. This should be the same length as `xmatchto` and each element here will apply to the respective file in `xmatchto`. outfile : str or None If this is not None, set this to the name of the pickle to write the collected xmatch catalogs to. this pickle can then be loaded transparently by the :py:func:`astrobase.checkplot.pkl.checkplot_dict`, :py:func:`astrobase.checkplot.pkl.checkplot_pickle` functions to provide xmatch info to the :py:func:`astrobase.checkplot.pkl_xmatch.xmatch_external_catalogs` function below. If this is None, will return the loaded xmatch catalogs directly. This will be a huge dict, so make sure you have enough RAM. Returns ------- str or dict Based on the `outfile` kwarg, will either return the path to a collected xmatch pickle file or the collected xmatch dict. ''' outdict = {} for xc, xk in zip(xmatchto, xmatchkeys): parsed_catdef = _parse_xmatch_catalog_header(xc, xk) if not parsed_catdef: continue (infd, catdefdict, catcolinds, catcoldtypes, catcolnames, catcolunits) = parsed_catdef # get the specified columns out of the catalog catarr = np.genfromtxt(infd, usecols=catcolinds, names=xk, dtype=','.join(catcoldtypes), comments='#', delimiter='|', autostrip=True) infd.close() catshortname = os.path.splitext(os.path.basename(xc))[0] catshortname = catshortname.replace('.csv','') # # make a kdtree for this catalog # # get the ra and decl columns objra, objdecl = (catarr[catdefdict['colra']], catarr[catdefdict['coldec']]) # get the xyz unit vectors from ra,decl cosdecl = np.cos(np.radians(objdecl)) sindecl = np.sin(np.radians(objdecl)) cosra = np.cos(np.radians(objra)) sinra = np.sin(np.radians(objra)) xyz = np.column_stack((cosra*cosdecl,sinra*cosdecl, sindecl)) # generate the kdtree kdt = cKDTree(xyz,copy_data=True) # generate the outdict element for this catalog catoutdict = {'kdtree':kdt, 'data':catarr, 'columns':xk, 'colnames':catcolnames, 'colunits':catcolunits, 'name':catdefdict['name'], 'desc':catdefdict['description']} outdict[catshortname] = catoutdict if outfile is not None: # if we're on OSX, we apparently need to save the file in chunks smaller # than 2 GB to make it work right. can't load pickles larger than 4 GB # either, but 3 GB < total size < 4 GB appears to be OK when loading. # also see: https://bugs.python.org/issue24658. # fix adopted from: https://stackoverflow.com/a/38003910 if sys.platform == 'darwin': dumpbytes = pickle.dumps(outdict, protocol=pickle.HIGHEST_PROTOCOL) max_bytes = 2**31 - 1 with open(outfile, 'wb') as outfd: for idx in range(0, len(dumpbytes), max_bytes): outfd.write(dumpbytes[idx:idx+max_bytes]) else: with open(outfile, 'wb') as outfd: pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL) return outfile else: return outdict def xmatch_external_catalogs(checkplotdict, xmatchinfo, xmatchradiusarcsec=2.0, returndirect=False, updatexmatch=True, savepickle=None): '''This matches the current object in the checkplotdict to all of the external match catalogs specified. Parameters ---------- checkplotdict : dict This is a checkplotdict, generated by either the `checkplot_dict` function, or read in from a `_read_checkplot_picklefile` function. This must have a structure somewhat like the following, where the indicated keys below are required:: {'objectid': the ID assigned to this object 'objectinfo': {'objectid': ID assigned to this object, 'ra': right ascension of the object in decimal deg, 'decl': declination of the object in decimal deg}} xmatchinfo : str or dict This is either the xmatch dict produced by the function :py:func:`astrobase.checkplot.pkl_xmatch.load_xmatch_external_catalogs` above, or the path to the xmatch info pickle file produced by that function. xmatchradiusarcsec : float This is the cross-matching radius to use in arcseconds. returndirect : bool If this is True, will only return the xmatch results as a dict. If this False, will return the checkplotdict with the xmatch results added in as a key-val pair. updatexmatch : bool This function will look for an existing 'xmatch' key in the input checkplotdict indicating that an xmatch has been performed before. If `updatexmatch` is set to True, the xmatch results will be added onto (e.g. when xmatching to additional catalogs after the first run). If this is set to False, the xmatch key-val pair will be completely overwritten. savepickle : str or None If this is None, it must be a path to where the updated checkplotdict will be written to as a new checkplot pickle. If this is False, only the updated checkplotdict is returned. Returns ------- dict or str If `savepickle` is False, this returns a checkplotdict, with the xmatch results added in. An 'xmatch' key will be added to this dict, with something like the following dict as the value:: {'xmatchradiusarcsec':xmatchradiusarcsec, 'catalog1':{'name':'Catalog of interesting things', 'found':True, 'distarcsec':0.7, 'info':{'objectid':...,'ra':...,'decl':...,'desc':...}}, 'catalog2':{'name':'Catalog of more interesting things', 'found':False, 'distarcsec':nan, 'info':None}, . . . ....} This will contain the matches of the object in the input checkplotdict to all of the catalogs provided in `xmatchinfo`. If `savepickle` is True, will return the path to the saved checkplot pickle file. ''' # load the xmatch info if isinstance(xmatchinfo, str) and os.path.exists(xmatchinfo): with open(xmatchinfo,'rb') as infd: xmatchdict = pickle.load(infd) elif isinstance(xmatchinfo, dict): xmatchdict = xmatchinfo else: LOGERROR("can't figure out xmatch info, can't xmatch, skipping...") return checkplotdict # # generate the xmatch spec # # get our ra, decl objra = checkplotdict['objectinfo']['ra'] objdecl = checkplotdict['objectinfo']['decl'] cosdecl = np.cos(np.radians(objdecl)) sindecl = np.sin(np.radians(objdecl)) cosra = np.cos(np.radians(objra)) sinra = np.sin(np.radians(objra)) objxyz = np.column_stack((cosra*cosdecl, sinra*cosdecl, sindecl)) # this is the search distance in xyz unit vectors xyzdist = 2.0 * np.sin(np.radians(xmatchradiusarcsec/3600.0)/2.0) # # now search in each external catalog # xmatchresults = {} extcats = sorted(xmatchdict.keys()) for ecat in extcats: # get the kdtree kdt = xmatchdict[ecat]['kdtree'] # look up the coordinates kdt_dist, kdt_ind = kdt.query(objxyz, k=1, distance_upper_bound=xyzdist) # sort by matchdist mdsorted = np.argsort(kdt_dist) matchdists = kdt_dist[mdsorted] matchinds = kdt_ind[mdsorted] if matchdists[np.isfinite(matchdists)].size == 0: xmatchresults[ecat] = {'name':xmatchdict[ecat]['name'], 'desc':xmatchdict[ecat]['desc'], 'found':False, 'distarcsec':None, 'info':None} else: for md, mi in zip(matchdists, matchinds): if np.isfinite(md) and md < xyzdist: infodict = {} distarcsec = _xyzdist_to_distarcsec(md) for col in xmatchdict[ecat]['columns']: coldata = xmatchdict[ecat]['data'][col][mi] if isinstance(coldata, str): coldata = coldata.strip() infodict[col] = coldata xmatchresults[ecat] = { 'name':xmatchdict[ecat]['name'], 'desc':xmatchdict[ecat]['desc'], 'found':True, 'distarcsec':distarcsec, 'info':infodict, 'colkeys':xmatchdict[ecat]['columns'], 'colnames':xmatchdict[ecat]['colnames'], 'colunit':xmatchdict[ecat]['colunits'], } break # # should now have match results for all external catalogs # if returndirect: return xmatchresults else: if updatexmatch and 'xmatch' in checkplotdict: checkplotdict['xmatch'].update(xmatchresults) else: checkplotdict['xmatch'] = xmatchresults if savepickle: cpf = _write_checkplot_picklefile(checkplotdict, outfile=savepickle, protocol=4) return cpf else: return checkplotdict
mit
ydaniv/quilt
quilt/remote/environ.py
1
1491
from fabric.api import env, task, roles, run, prefix, execute import cuisine from quilt import utilities @roles('app') @task def make(): utilities.notify(u'Making the virtual environment.') run('mkvirtualenv ' + env.project_name) run('mkdir ' + env.project_root) run('setvirtualenvproject ' + env.project_env + ' ' + env.project_root) @roles('app') @task def destroy(): utilities.notify(u'Destroying the whole virtual environment.') run('rmvirtualenv ' + env.project_name) run('rm -r ' + env.project_root) @roles('app') @task def clean(): utilities.notify(u'Clean all dependencies in the virtual environment.') run('rmvirtualenv ' + env.project_name) run('mkvirtualenv ' + env.project_name) run('setvirtualenvproject ' + env.project_env + ' ' + env.project_root) @task def ensure(): utilities.notify(u'Ensuring all project dependencies are present.') execute(pip) execute(ensure_settings) @roles('app') @task def ensure_settings(): utilities.notify(u'Configuring production settings.') with prefix(env.workon): context = env content = cuisine.text_template(env.project_config_template, context) cuisine.file_write(env.project_config_file, content) run(env.deactivate) @roles('app') @task def pip(): utilities.notify(u'Ensuring all Python dependencies are present.') with prefix(env.workon): run('pip install -U -r requirements.txt') run(env.deactivate)
bsd-2-clause
halfss/zerorpc-python
zerorpc/events.py
21
9075
# -*- coding: utf-8 -*- # Open Source Initiative OSI - The MIT License (MIT):Licensing # # The MIT License (MIT) # Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com) # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software is furnished to do # so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import msgpack import gevent.pool import gevent.queue import gevent.event import gevent.local import gevent.coros import gevent_zmq as zmq from .context import Context class Sender(object): def __init__(self, socket): self._socket = socket self._send_queue = gevent.queue.Queue(maxsize=0) self._send_task = gevent.spawn(self._sender) def __del__(self): self.close() def close(self): if self._send_task: self._send_task.kill() def _sender(self): running = True for parts in self._send_queue: for i in xrange(len(parts) - 1): try: self._socket.send(parts[i], flags=zmq.SNDMORE) except gevent.GreenletExit: if i == 0: return running = False self._socket.send(parts[i], flags=zmq.SNDMORE) self._socket.send(parts[-1]) if not running: return def __call__(self, parts): self._send_queue.put(parts) class Receiver(object): def __init__(self, socket): self._socket = socket self._recv_queue = gevent.queue.Queue(maxsize=0) self._recv_task = gevent.spawn(self._recver) def __del__(self): self.close() def close(self): if self._recv_task: self._recv_task.kill() def _recver(self): running = True while True: parts = [] while True: try: part = self._socket.recv() except gevent.GreenletExit: running = False if len(parts) == 0: return part = self._socket.recv() parts.append(part) if not self._socket.getsockopt(zmq.RCVMORE): break if not running: break self._recv_queue.put(parts) def __call__(self): return self._recv_queue.get() class Event(object): __slots__ = [ '_name', '_args', '_header' ] def __init__(self, name, args, context, header=None): self._name = name self._args = args if header is None: self._header = { 'message_id': context.new_msgid(), 'v': 3 } else: self._header = header @property def header(self): return self._header @property def name(self): return self._name @name.setter def name(self, v): self._name = v @property def args(self): return self._args def pack(self): return msgpack.Packer().pack((self._header, self._name, self._args)) @staticmethod def unpack(blob): unpacker = msgpack.Unpacker() unpacker.feed(blob) unpacked_msg = unpacker.unpack() try: (header, name, args) = unpacked_msg except Exception as e: raise Exception('invalid msg format "{0}": {1}'.format( unpacked_msg, e)) # Backward compatibility if not isinstance(header, dict): header = {} return Event(name, args, None, header) def __str__(self, ignore_args=False): if ignore_args: args = '[...]' else: args = self._args try: args = '<<{0}>>'.format(str(self.unpack(self._args))) except: pass return '{0} {1} {2}'.format(self._name, self._header, args) class Events(object): def __init__(self, zmq_socket_type, context=None): self._zmq_socket_type = zmq_socket_type self._context = context or Context.get_instance() self._socket = zmq.Socket(self._context, zmq_socket_type) self._send = self._socket.send_multipart self._recv = self._socket.recv_multipart if zmq_socket_type in (zmq.PUSH, zmq.PUB, zmq.DEALER, zmq.ROUTER): self._send = Sender(self._socket) if zmq_socket_type in (zmq.PULL, zmq.SUB, zmq.DEALER, zmq.ROUTER): self._recv = Receiver(self._socket) @property def recv_is_available(self): return self._zmq_socket_type in (zmq.PULL, zmq.SUB, zmq.DEALER, zmq.ROUTER) def __del__(self): try: if not self._socket.closed: self.close() except AttributeError: pass def close(self): try: self._send.close() except AttributeError: pass try: self._recv.close() except AttributeError: pass self._socket.close() def _resolve_endpoint(self, endpoint, resolve=True): if resolve: endpoint = self._context.hook_resolve_endpoint(endpoint) if isinstance(endpoint, (tuple, list)): r = [] for sub_endpoint in endpoint: r.extend(self._resolve_endpoint(sub_endpoint, resolve)) return r return [endpoint] def connect(self, endpoint, resolve=True): r = [] for endpoint_ in self._resolve_endpoint(endpoint, resolve): r.append(self._socket.connect(endpoint_)) return r def bind(self, endpoint, resolve=True): r = [] for endpoint_ in self._resolve_endpoint(endpoint, resolve): r.append(self._socket.bind(endpoint_)) return r def create_event(self, name, args, xheader={}): event = Event(name, args, context=self._context) for k, v in xheader.items(): if k == 'zmqid': continue event.header[k] = v return event def emit_event(self, event, identity=None): if identity is not None: parts = list(identity) parts.extend(['', event.pack()]) elif self._zmq_socket_type in (zmq.DEALER, zmq.ROUTER): parts = ('', event.pack()) else: parts = (event.pack(),) self._send(parts) def emit(self, name, args, xheader={}): event = self.create_event(name, args, xheader) identity = xheader.get('zmqid', None) return self.emit_event(event, identity) def recv(self): parts = self._recv() if len(parts) == 1: identity = None blob = parts[0] else: identity = parts[0:-2] blob = parts[-1] event = Event.unpack(blob) if identity is not None: event.header['zmqid'] = identity return event def setsockopt(self, *args): return self._socket.setsockopt(*args) @property def context(self): return self._context class WrappedEvents(object): def __init__(self, channel): self._channel = channel def close(self): pass @property def recv_is_available(self): return self._channel.recv_is_available def create_event(self, name, args, xheader={}): event = Event(name, args, self._channel.context) event.header.update(xheader) return event def emit_event(self, event, identity=None): event_payload = (event.header, event.name, event.args) wrapper_event = self._channel.create_event('w', event_payload) self._channel.emit_event(wrapper_event) def emit(self, name, args, xheader={}): wrapper_event = self.create_event(name, args, xheader) self.emit_event(wrapper_event) def recv(self, timeout=None): wrapper_event = self._channel.recv() (header, name, args) = wrapper_event.args return Event(name, args, None, header) @property def context(self): return self._channel.context
mit
numba/numba
numba/testing/__init__.py
6
1976
import os import sys import functools import unittest import traceback from fnmatch import fnmatch from os.path import join, isfile, relpath, normpath, splitext from .main import NumbaTestProgram, SerialSuite, make_tag_decorator from numba.core import config def load_testsuite(loader, dir): """Find tests in 'dir'.""" try: suite = unittest.TestSuite() files = [] for f in os.listdir(dir): path = join(dir, f) if isfile(path) and fnmatch(f, 'test_*.py'): files.append(f) elif isfile(join(path, '__init__.py')): suite.addTests(loader.discover(path)) for f in files: # turn 'f' into a filename relative to the toplevel dir... f = relpath(join(dir, f), loader._top_level_dir) # ...and translate it to a module name. f = splitext(normpath(f.replace(os.path.sep, '.')))[0] suite.addTests(loader.loadTestsFromName(f)) return suite except Exception: traceback.print_exc(file=sys.stderr) sys.exit(-1) def run_tests(argv=None, defaultTest=None, topleveldir=None, xmloutput=None, verbosity=1, nomultiproc=False): """ args ---- - xmloutput [str or None] Path of XML output directory (optional) - verbosity [int] Verbosity level of tests output Returns the TestResult object after running the test *suite*. """ if xmloutput is not None: import xmlrunner runner = xmlrunner.XMLTestRunner(output=xmloutput) else: runner = None prog = NumbaTestProgram(argv=argv, module=None, defaultTest=defaultTest, topleveldir=topleveldir, testRunner=runner, exit=False, verbosity=verbosity, nomultiproc=nomultiproc) return prog.result
bsd-2-clause
tmerrick1/spack
var/spack/repos/builtin.mock/packages/indirect-mpich/package.py
5
1664
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class IndirectMpich(Package): """Test case for a package that depends on MPI and one of its dependencies requires a *particular version* of MPI. """ homepage = "http://www.example.com" url = "http://www.example.com/indirect_mpich-1.0.tar.gz" version(1.0, 'foobarbaz') depends_on('mpi') depends_on('direct-mpich') def install(self, spec, prefix): pass
lgpl-2.1
torchbox/colourlens
coloursite/wsgi_webfaction.py
1
1442
""" WSGI config for coloursite project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os # We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks # if running multiple sites in the same mod_wsgi process. To fix this, use # mod_wsgi daemon mode with each site in its own daemon process, or use # os.environ["DJANGO_SETTINGS_MODULE"] = "coloursite.settings" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "coloursite.settings_webfaction") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
mit
cgstudiomap/cgstudiomap
main/parts/product-attribute/product_icecat/wizard/__init__.py
2
1062
# -*- encoding: utf-8 -*- ############################################################################################ # # OpenERP, Open Source Management Solution # Copyright (C) 2010 Zikzakmedia S.L. (<http://www.zikzakmedia.com>). All Rights Reserved # $Id$ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################################ import wizard_product_icecat
agpl-3.0
aricooperman/Jzipline
zipline/pipeline/api_utils.py
6
1648
""" Utilities for creating public APIs (e.g. argument validation decorators). """ from zipline.utils.input_validation import preprocess def restrict_to_dtype(dtype, message_template): """ A factory for decorators that restrict Term methods to only be callable on Terms with a specific dtype. This is conceptually similar to zipline.utils.input_validation.expect_dtypes, but provides more flexibility for providing error messages that are specifically targeting Term methods. Parameters ---------- dtype : numpy.dtype The dtype on which the decorated method may be called. message_template : str A template for the error message to be raised. `message_template.format` will be called with keyword arguments `method_name`, `expected_dtype`, and `received_dtype`. Usage ----- @restrict_to_dtype( dtype=float64_dtype, message_template=( "{method_name}() was called on a factor of dtype {received_dtype}." "{method_name}() requires factors of dtype{expected_dtype}." ), ) def some_factor_method(self, ...): self.stuff_that_requires_being_float64(...) """ def processor(term_method, _, term_instance): term_dtype = term_instance.dtype if term_dtype != dtype: raise TypeError( message_template.format( method_name=term_method.__name__, expected_dtype=dtype.name, received_dtype=term_dtype, ) ) return term_instance return preprocess(self=processor)
apache-2.0
okwow123/djangol2
example/env/lib/python2.7/site-packages/django/contrib/sites/managers.py
119
2079
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.conf import settings from django.core import checks from django.core.exceptions import FieldDoesNotExist from django.db import models class CurrentSiteManager(models.Manager): "Use this to limit objects to those associated with the current site." use_in_migrations = True def __init__(self, field_name=None): super(CurrentSiteManager, self).__init__() self.__field_name = field_name def check(self, **kwargs): errors = super(CurrentSiteManager, self).check(**kwargs) errors.extend(self._check_field_name()) return errors def _check_field_name(self): field_name = self._get_field_name() try: field = self.model._meta.get_field(field_name) except FieldDoesNotExist: return [ checks.Error( "CurrentSiteManager could not find a field named '%s'." % field_name, obj=self, id='sites.E001', ) ] if not field.many_to_many and not isinstance(field, (models.ForeignKey)): return [ checks.Error( "CurrentSiteManager cannot use '%s.%s' as it is not a foreign key or a many-to-many field." % ( self.model._meta.object_name, field_name ), obj=self, id='sites.E002', ) ] return [] def _get_field_name(self): """ Return self.__field_name or 'site' or 'sites'. """ if not self.__field_name: try: self.model._meta.get_field('site') except FieldDoesNotExist: self.__field_name = 'sites' else: self.__field_name = 'site' return self.__field_name def get_queryset(self): return super(CurrentSiteManager, self).get_queryset().filter( **{self._get_field_name() + '__id': settings.SITE_ID})
mit
BartDeWaal/libsigrokdecode
decoders/swd/__init__.py
13
1280
## ## This file is part of the libsigrokdecode project. ## ## Copyright (C) 2014 Angus Gratton <gus@projectgus.com> ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA ## ''' This PD decodes the ARM SWD (version 1) protocol, as described in the "ARM Debug Interface v5.2" Architecture Specification. Details: http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ihi0031c/index.html (Registration required) Not supported: * Turnaround periods other than the default 1, as set in DLCR.TURNROUND (should be trivial to add) * SWD protocol version 2 (multi-drop support, etc.) ''' from .pd import Decoder
gpl-3.0
adelton/django
tests/template_tests/test_engine.py
199
3971
import os from django.template import Context from django.template.engine import Engine from django.test import SimpleTestCase, ignore_warnings from django.utils.deprecation import RemovedInDjango110Warning from .utils import ROOT, TEMPLATE_DIR OTHER_DIR = os.path.join(ROOT, 'other_templates') @ignore_warnings(category=RemovedInDjango110Warning) class DeprecatedRenderToStringTest(SimpleTestCase): def setUp(self): self.engine = Engine( dirs=[TEMPLATE_DIR], libraries={'custom': 'template_tests.templatetags.custom'}, ) def test_basic_context(self): self.assertEqual( self.engine.render_to_string('test_context.html', {'obj': 'test'}), 'obj:test\n', ) def test_existing_context_kept_clean(self): context = Context({'obj': 'before'}) output = self.engine.render_to_string( 'test_context.html', {'obj': 'after'}, context_instance=context, ) self.assertEqual(output, 'obj:after\n') self.assertEqual(context['obj'], 'before') def test_no_empty_dict_pushed_to_stack(self): """ #21741 -- An empty dict should not be pushed to the context stack when render_to_string is called without a context argument. """ # The stack should have a length of 1, corresponding to the builtins self.assertEqual( '1', self.engine.render_to_string('test_context_stack.html').strip(), ) self.assertEqual( '1', self.engine.render_to_string( 'test_context_stack.html', context_instance=Context() ).strip(), ) class LoaderTests(SimpleTestCase): def test_origin(self): engine = Engine(dirs=[TEMPLATE_DIR], debug=True) template = engine.get_template('index.html') self.assertEqual(template.origin.template_name, 'index.html') def test_loader_priority(self): """ #21460 -- Check that the order of template loader works. """ loaders = [ 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ] engine = Engine(dirs=[OTHER_DIR, TEMPLATE_DIR], loaders=loaders) template = engine.get_template('priority/foo.html') self.assertEqual(template.render(Context()), 'priority\n') def test_cached_loader_priority(self): """ Check that the order of template loader works. Refs #21460. """ loaders = [ ('django.template.loaders.cached.Loader', [ 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]), ] engine = Engine(dirs=[OTHER_DIR, TEMPLATE_DIR], loaders=loaders) template = engine.get_template('priority/foo.html') self.assertEqual(template.render(Context()), 'priority\n') template = engine.get_template('priority/foo.html') self.assertEqual(template.render(Context()), 'priority\n') @ignore_warnings(category=RemovedInDjango110Warning) class TemplateDirsOverrideTests(SimpleTestCase): DIRS = ((OTHER_DIR, ), [OTHER_DIR]) def setUp(self): self.engine = Engine() def test_render_to_string(self): for dirs in self.DIRS: self.assertEqual( self.engine.render_to_string('test_dirs.html', dirs=dirs), 'spam eggs\n', ) def test_get_template(self): for dirs in self.DIRS: template = self.engine.get_template('test_dirs.html', dirs=dirs) self.assertEqual(template.render(Context()), 'spam eggs\n') def test_select_template(self): for dirs in self.DIRS: template = self.engine.select_template(['test_dirs.html'], dirs=dirs) self.assertEqual(template.render(Context()), 'spam eggs\n')
bsd-3-clause
balloob/home-assistant
script/scaffold/templates/config_flow/tests/test_config_flow.py
5
3028
"""Test the NEW_NAME config flow.""" from homeassistant import config_entries, setup from homeassistant.components.NEW_DOMAIN.config_flow import CannotConnect, InvalidAuth from homeassistant.components.NEW_DOMAIN.const import DOMAIN from tests.async_mock import patch async def test_form(hass): """Test we get the form.""" await setup.async_setup_component(hass, "persistent_notification", {}) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == "form" assert result["errors"] == {} with patch( "homeassistant.components.NEW_DOMAIN.config_flow.PlaceholderHub.authenticate", return_value=True, ), patch( "homeassistant.components.NEW_DOMAIN.async_setup", return_value=True ) as mock_setup, patch( "homeassistant.components.NEW_DOMAIN.async_setup_entry", return_value=True, ) as mock_setup_entry: result2 = await hass.config_entries.flow.async_configure( result["flow_id"], { "host": "1.1.1.1", "username": "test-username", "password": "test-password", }, ) await hass.async_block_till_done() assert result2["type"] == "create_entry" assert result2["title"] == "Name of the device" assert result2["data"] == { "host": "1.1.1.1", "username": "test-username", "password": "test-password", } assert len(mock_setup.mock_calls) == 1 assert len(mock_setup_entry.mock_calls) == 1 async def test_form_invalid_auth(hass): """Test we handle invalid auth.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch( "homeassistant.components.NEW_DOMAIN.config_flow.PlaceholderHub.authenticate", side_effect=InvalidAuth, ): result2 = await hass.config_entries.flow.async_configure( result["flow_id"], { "host": "1.1.1.1", "username": "test-username", "password": "test-password", }, ) assert result2["type"] == "form" assert result2["errors"] == {"base": "invalid_auth"} async def test_form_cannot_connect(hass): """Test we handle cannot connect error.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch( "homeassistant.components.NEW_DOMAIN.config_flow.PlaceholderHub.authenticate", side_effect=CannotConnect, ): result2 = await hass.config_entries.flow.async_configure( result["flow_id"], { "host": "1.1.1.1", "username": "test-username", "password": "test-password", }, ) assert result2["type"] == "form" assert result2["errors"] == {"base": "cannot_connect"}
apache-2.0
aspose-cells/Aspose.Cells-for-Cloud
SDKs/Aspose.Cells-Cloud-SDK-for-Python/asposecellscloud/models/Row.py
4
1245
#!/usr/bin/env python class Row(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.""" def __init__(self): """ Attributes: swaggerTypes (dict): The key is attribute name and the value is attribute type. attributeMap (dict): The key is attribute name and the value is json key in definition. """ self.swaggerTypes = { 'GroupLevel': 'int', 'Height': 'float', 'Index': 'int', 'IsBlank': 'bool', 'IsHeightMatched': 'bool', 'IsHidden': 'bool', 'Style': 'LinkElement', 'link': 'Link' } self.attributeMap = { 'GroupLevel': 'GroupLevel','Height': 'Height','Index': 'Index','IsBlank': 'IsBlank','IsHeightMatched': 'IsHeightMatched','IsHidden': 'IsHidden','Style': 'Style','link': 'link'} self.GroupLevel = None # int self.Height = None # float self.Index = None # int self.IsBlank = None # bool self.IsHeightMatched = None # bool self.IsHidden = None # bool self.Style = None # LinkElement self.link = None # Link
mit
benfinke/ns_python
build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/network/bridgegroup_binding.py
3
3925
# # Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response from nssrc.com.citrix.netscaler.nitro.service.options import options from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class bridgegroup_binding(base_resource): """ Binding class showing the resources that can be bound to bridgegroup_binding. """ def __init__(self) : self._id = 0 self.bridgegroup_nsip_binding = [] self.bridgegroup_vlan_binding = [] self.bridgegroup_nsip6_binding = [] @property def id(self) : ur"""The name of the bridge group.<br/>Minimum value = 1<br/>Maximum value = 1000. """ try : return self._id except Exception as e: raise e @id.setter def id(self, id) : ur"""The name of the bridge group.<br/>Minimum value = 1<br/>Maximum value = 1000 """ try : self._id = id except Exception as e: raise e @property def bridgegroup_vlan_bindings(self) : ur"""vlan that can be bound to bridgegroup. """ try : return self._bridgegroup_vlan_binding except Exception as e: raise e @property def bridgegroup_nsip_bindings(self) : ur"""nsip that can be bound to bridgegroup. """ try : return self._bridgegroup_nsip_binding except Exception as e: raise e @property def bridgegroup_nsip6_bindings(self) : ur"""nsip6 that can be bound to bridgegroup. """ try : return self._bridgegroup_nsip6_binding except Exception as e: raise e def _get_nitro_response(self, service, response) : ur""" converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(bridgegroup_binding_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.bridgegroup_binding except Exception as e : raise e def _get_object_name(self) : ur""" Returns the value of object identifier argument """ try : if self.id is not None : return str(self.id) return None except Exception as e : raise e @classmethod def get(self, service, id) : ur""" Use this API to fetch bridgegroup_binding resource. """ try : if type(id) is not list : obj = bridgegroup_binding() obj.id = id response = obj.get_resource(service) else : if id and len(id) > 0 : obj = [bridgegroup_binding() for _ in range(len(id))] for i in range(len(id)) : obj[i].id = id[i]; response[i] = obj[i].get_resource(service) return response except Exception as e: raise e class bridgegroup_binding_response(base_response) : def __init__(self, length=1) : self.bridgegroup_binding = [] self.errorcode = 0 self.message = "" self.severity = "" self.sessionid = "" self.bridgegroup_binding = [bridgegroup_binding() for _ in range(length)]
apache-2.0
jck/pymtl
accel/mvmult/MatrixVecCOP_test.py
7
5546
#============================================================================== # MatrixVecMultLaneBL_test #============================================================================== import pytest from pymtl import * from pclib.test import TestSource, TestMemory from pclib.ifcs import mem_msgs from MatrixVecCOP import MatrixVecCOP #------------------------------------------------------------------------------ # SourceHarness #------------------------------------------------------------------------------ class SourceHarness( Model ): def __init__( s, nlanes, nmul_stages, mem_delay, src_delay, config_msgs, test_verilog ): cop_addr_nbits = 5 cop_data_nbits = 32 mem_addr_nbits = 32 mem_data_nbits = 32 memreq_params = mem_msgs.MemReqParams( mem_addr_nbits, mem_data_nbits ) memresp_params = mem_msgs.MemRespParams( mem_data_nbits ) s.src = TestSource ( cop_addr_nbits + cop_data_nbits, config_msgs, src_delay ) s.cop = MatrixVecCOP( nlanes, nmul_stages, cop_addr_nbits, cop_data_nbits, mem_addr_nbits, mem_data_nbits ) s.mem = TestMemory ( memreq_params, memresp_params, nlanes, mem_delay ) if test_verilog: s.cop = TranslationTool( s.cop ) assert nlanes > 0 s.nlanes = nlanes def elaborate_logic( s ): for i in range( s.nlanes ): s.connect( s.src.out, s.cop.from_cpu ) s.connect( s.cop.lane_req [i], s.mem.reqs [i] ) s.connect( s.cop.lane_resp[i], s.mem.resps[i] ) def done( s ): return s.src.done and s.cop.from_cpu.rdy def line_trace( s ): return "{} || {}".format( s.src.line_trace(), s.mem.line_trace() ) #------------------------------------------------------------------------------ # run_lane_managed_test #------------------------------------------------------------------------------ def run_lane_managed_test( dump_vcd, model, src_matrix, src_vector, dest_vector ): model.vcd_file = dump_vcd model.elaborate() sim = SimulationTool( model ) # Load the memory model.mem.load_memory( src_matrix ) model.mem.load_memory( src_vector ) # Run the simulation sim.reset() print while not model.done() and sim.ncycles < 80: sim.print_line_trace() sim.cycle() assert model.done() dest_addr = dest_vector[0] for i, dest_value in enumerate( dest_vector[1] ): assert model.mem.mem.mem[ dest_addr+i ] == dest_value # Add a couple extra ticks so that the VCD dump is nicer sim.cycle() sim.cycle() sim.cycle() #------------------------------------------------------------------------------ # config_msg #------------------------------------------------------------------------------ # Utility method for creating config messages def config_msg( addr, value ): return concat( Bits(3, addr), Bits(32, value) ) #------------------------------------------------------------------------------ # mem_array_32bit #------------------------------------------------------------------------------ # Utility function for creating arrays formatted for memory loading. from itertools import chain def mem_array_32bit( base_addr, data ): return [base_addr, list( chain.from_iterable([ [x,0,0,0] for x in data ] )) ] #------------------------------------------------------------------------------ # test_managed #------------------------------------------------------------------------------ # 5 1 3 1 16 # 1 1 1 . 2 = 6 # 1 2 1 3 8 # @pytest.mark.parametrize( ('mem_delay','nmul_stages'), [(0,1),(0,4),(5,1),(5,4)] ) def test_managed_1lane( dump_vcd, test_verilog, mem_delay, nmul_stages ): run_lane_managed_test( dump_vcd, SourceHarness( 1, nmul_stages, mem_delay, 0, [ config_msg( 1, 3), # size config_msg( 2, 0), # r_addr config_msg( 3, 80), # v_addr config_msg( 4, 160), # d_addr config_msg( 0, 1), # go ], test_verilog ), # NOTE: C++ has dummy data between rows when you have array**! mem_array_32bit( 0, [ 5, 1 ,3, 99, 1, 1 ,1, 99, 1, 2 ,1] ), mem_array_32bit( 80, [ 1, 2, 3 ]), mem_array_32bit(160, [16]), ) @pytest.mark.parametrize( ('mem_delay','nmul_stages'), [(0,1),(0,4),(5,1),(5,4)] ) def test_managed_3lane( dump_vcd, test_verilog, mem_delay, nmul_stages ): run_lane_managed_test( dump_vcd, SourceHarness( 3, nmul_stages, mem_delay, 0, [ config_msg( 1, 3), # size config_msg( 2, 0), # r_addr config_msg( 3, 80), # v_addr config_msg( 4, 160), # d_addr config_msg( 0, 1), # go ], test_verilog ), # NOTE: C++ has dummy data between rows when you have array**! mem_array_32bit( 0, [ 5, 1 ,3, 99, 1, 1 ,1, 99, 1, 2 ,1] ), mem_array_32bit( 80, [ 1, 2, 3 ]), mem_array_32bit(160, [16, 6, 8 ]), )
bsd-3-clause
jacenkow/inspire-next
inspirehep/modules/forms/fields/date.py
3
2262
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2012, 2013 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. from datetime import date, datetime import six from wtforms import DateField from wtforms.validators import optional from ..field_base import INSPIREField __all__ = ['Date'] class Date(INSPIREField, DateField): def __init__(self, **kwargs): defaults = dict( icon='calendar', validators=[optional()], widget_classes="form-control" ) defaults.update(kwargs) super(Date, self).__init__(**defaults) def process_data(self, value): """ Called when loading data from Python (incoming objects can be either datetime objects or strings, depending on if they are loaded from an JSON or Python objects). """ if isinstance(value, six.string_types): self.object_data = datetime.strptime(value, self.format).date() elif isinstance(value, datetime): self.object_data = value.date() elif isinstance(value, date): self.object_data = value # Be sure to set both self.object_data and self.data due to internals # of Field.process() and draft_form_process_and_validate(). self.data = self.object_data @property def json_data(self): """ Serialize data into JSON serializalbe object """ # Just use _value() to format the date into a string. if self.data: return self.data.strftime(self.format) # pylint: disable-msg= return None
gpl-2.0
weblyzard/inscriptis
benchmarking/run_benchmarking.py
1
10529
#!/usr/bin/env python3 # coding:utf-8 """ Runs a benchmarking suite to compare speed and output of different implementations. """ import argparse from datetime import datetime import operator import os import signal import subprocess import sys import threading import urllib.request from time import time # # Import inscriptis (using the version in the project directory rather than # any installed module versions). # LYNX_BIN = '/usr/bin/lynx' BENCHMARKING_ROOT = os.path.dirname(os.path.abspath(__file__)) SRC_DIR = os.path.join(BENCHMARKING_ROOT, '../src') sys.path.insert(0, os.path.abspath(SRC_DIR)) try: import inscriptis except ImportError: print('Inscriptis is not available. Please install it in order to ' 'compare with inscriptis.') # # Import third-party HTML 2 text converters. # try: from bs4 import BeautifulSoup except ImportError: print('BeautifulSoup is not available. Please install it in order to ' 'compare with BeautifulSoup.') try: import html2text except ImportError: print('html2text is not available. Please install it in order to ' 'compare with html2text.') try: import justext except ImportError: print('justext is not available. Please install it in order to compare ' 'with justext.') TRIES = 7 OUTFILE = 'speed_comparisons.txt' class AbstractHtmlConverter: """ An abstract HTML convert class. """ def get_text(self, html): """ Returns: a text representation of the given HTML snippet. """ raise NotImplementedError def benchmark(self, html): """ Benchmarks the classes HTML to text converter. Returns: A tuple of the required time and the obtained text representation. """ start_time = time() for _ in range(TRIES): text = self.get_text(html) return time() - start_time, text class BeautifulSoupHtmlConverter(AbstractHtmlConverter): """ Converts HTML to text using BeautifulSoup. """ name = 'BeautifulSoup' def __init__(self): self.available = 'bs4' in sys.modules def get_text(self, html): soup = BeautifulSoup(html, 'lxml') for script in soup(['script', 'style']): script.extract() text = soup.get_text() lines = (line.strip() for line in text.splitlines()) chunks = (phrase.strip() for line in lines for phrase in line.split(' ')) result = '\n'.join(chunk for chunk in chunks if chunk) return result class JustextConverter(AbstractHtmlConverter): """ Converts HTML to text using Justtext. """ name = 'Justtext' def __init__(self): self.available = 'justext' in sys.modules def get_text(self, html): paragraphs = justext.justext(html, stoplist='English') result = [paragraph.text for paragraph in paragraphs] return '\n'.join(result) class Html2TextConverter(AbstractHtmlConverter): """ Converts HTML to text using Html2Text. """ name = 'Html2Text' def __init__(self): self.available = 'html2text' in sys.modules def get_text(self, html): converter = html2text.HTML2Text() converter.ignore_links = True result = converter.handle(str(html)) return ''.join(result) class LynxConverter(AbstractHtmlConverter): """ Converts HTML to text using lynx. """ name = 'Lynx' def __init__(self): try: subprocess.call([LYNX_BIN, '-dump \'www.google.com\''], stdout=subprocess.PIPE) self.available = True except OSError: print('lynx can not be called. Please check in order to compare ' 'with lynx.') self.available = False def get_text(self, html): def kill_lynx(pid): os.kill(pid, signal.SIGKILL) os.waitpid(-1, os.WNOHANG) print('lynx killed') text = '' lynx_args = '-stdin -width=20000 -force_html -nocolor -dump -nolist ' \ '-nobold -display_charset=utf8' cmd = [LYNX_BIN, ] + lynx_args.split(' ') lynx = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) lynx.stdin.write(html.encode('utf8')) lynx.stdin.close() _t = threading.Timer(200.0, kill_lynx, args=[lynx.pid]) _t.start() text = lynx.stdout.read().decode('utf-8', 'replace') _t.cancel() return text class InscriptisHtmlConverter(AbstractHtmlConverter): """ Converts HTML to text using Inscriptis. """ name = 'Inscriptis' def __init__(self): self.available = 'inscriptis' in sys.modules if self.available: from inscriptis import get_text self.get_text = get_text def get_text(self, html): return self.get_text(html) timestamp = str(datetime.now()).replace(' ', '_').replace(':', '-')\ .split('.')[0] DEFAULT_RESULT_DIR = os.path.join(BENCHMARKING_ROOT, 'benchmarking_results', timestamp) DEFAULT_CACHE_DIR = os.path.join(BENCHMARKING_ROOT, 'html_cache') def save_to_file(algorithm, url, data, benchmarking_results_dir): """ Saves a benchmarking result to the given file. """ result_file = os.path.join(benchmarking_results_dir, '{}_{}.txt'.format(algorithm, url)) with open(result_file, 'w') as output_file: output_file.write(data) def get_speed_table(times): """ Provides the table which compares the conversion speed. """ fastest = 999999 for key, value in times.items(): if value < fastest: fastest = value longest_key = 0 longest_value = 0 for key, value in times.items(): if len(key) > longest_key: longest_key = len(key) if len(str(value)) > longest_value: longest_value = len(str(value)) sorted_times = sorted(times.items(), key=operator.itemgetter(1)) result = '' for key, value in sorted_times: difference = value - fastest if difference > 0: difference = '+{}'.format(difference) elif difference < 0: difference = '-{}'.format(difference) elif difference == 0: difference = '--> fastest' output = '{}{}: {}{} {}'.format(key, ' ' * (longest_key - len(key)), value, ' ' * (longest_value - len(str(value))), difference) result += output + '\n' return result def get_fname(url) -> str: """ Transforms a URL to a file name. """ trash = (('http://', ''), ('https://', ''), ('/', '-'), (':', '-'), ('%', '')) for key, value in trash: url = url.replace(key, value) return url[0:100] CONVERTER = (BeautifulSoupHtmlConverter(), JustextConverter(), Html2TextConverter(), LynxConverter(), InscriptisHtmlConverter()) def parse_args(): """ Parse optional benchmarking arguments. """ parser = argparse.ArgumentParser(description='Inscriptis benchmarking ' 'suite') parser.add_argument('converter', type=str, nargs='*', help='The list of converters to benchmark (options:' 'BeautifulSoup, Justext, Html2Text, Lynx, ' 'Inscriptis; default: all)') parser.add_argument('-u', '--benchmarking-urls', default=os.path.join(BENCHMARKING_ROOT, 'url_list.txt'), help='A list of URLs to use in the benchmark.') parser.add_argument('-r', '--benchmarking-results', default=DEFAULT_RESULT_DIR, help='Optional directory for saving the benchmarking ' 'results.') parser.add_argument('-c', '--cache', default=DEFAULT_CACHE_DIR, help='Optional cache directory for the retrieved Web ' 'pages.') return parser.parse_args() def benchmark(): """ Runs the benchmark. """ args = parse_args() # These are a few predefined urls the script will with open(args.benchmarking_urls) as url_list: sources = [url.strip() for url in url_list] if not os.path.exists(args.benchmarking_results): os.makedirs(args.benchmarking_results) if not os.path.exists(args.cache): os.makedirs(args.cache) for source in sources: source_name = get_fname(source) source_cache_path = os.path.join(args.cache, source_name) if os.path.exists(source_cache_path): html = open(source_cache_path).read() else: req = urllib.request.Request(source) try: html = urllib.request.urlopen(req).read().decode('utf-8') except UnicodeDecodeError: html = urllib.request.urlopen(req).read().decode('latin1') open(source_cache_path, 'w').write(html) with open(os.path.join(args.benchmarking_results, 'speed_comparisons.txt'), 'a') as output_file: output_file.write('\nURL: {}\n'.format(source_name)) print('\nURL: {}'.format(source_name)) times = {} for converter in CONVERTER: if converter.available and not args.converter or converter.name \ in args.converter: time_required, text = converter.benchmark(html) times[converter.name] = time_required save_to_file(converter.name, source_name, text, args.benchmarking_results) speed_table = get_speed_table(times) print(speed_table) with open(os.path.join(args.benchmarking_results, OUTFILE), 'a') as output_file: output_file.write(speed_table + '\n') with open(os.path.join(args.benchmarking_results, OUTFILE), 'a') as output_file: output_file.write('\n') if __name__ == '__main__': benchmark()
gpl-2.0
freak97/binutils
gdb/testsuite/gdb.python/py-pp-maint.py
13
2491
# Copyright (C) 2010-2016 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # This file is part of the GDB testsuite. It tests python pretty # printers. import re import gdb.types import gdb.printing def lookup_function_lookup_test(val): class PrintFunctionLookup(object): def __init__(self, val): self.val = val def to_string(self): return ("x=<" + str(self.val["x"]) + "> y=<" + str(self.val["y"]) + ">") typename = gdb.types.get_basic_type(val.type).tag # Note: typename could be None. if typename == "function_lookup_test": return PrintFunctionLookup(val) return None class pp_s (object): def __init__(self, val): self.val = val def to_string(self): a = self.val["a"] b = self.val["b"] if a.address != b: raise Exception("&a(%s) != b(%s)" % (str(a.address), str(b))) return "a=<" + str(self.val["a"]) + "> b=<" + str(self.val["b"]) + ">" class pp_ss (object): def __init__(self, val): self.val = val def to_string(self): return "a=<" + str(self.val["a"]) + "> b=<" + str(self.val["b"]) + ">" def build_pretty_printer(): pp = gdb.printing.RegexpCollectionPrettyPrinter("pp-test") pp.add_printer('struct s', '^struct s$', pp_s) pp.add_printer('s', '^s$', pp_s) # Use a lambda this time to exercise doing things this way. pp.add_printer('struct ss', '^struct ss$', lambda val: pp_ss(val)) pp.add_printer('ss', '^ss$', lambda val: pp_ss(val)) pp.add_printer('enum flag_enum', '^flag_enum$', gdb.printing.FlagEnumerationPrinter('enum flag_enum')) return pp gdb.printing.register_pretty_printer(gdb, lookup_function_lookup_test) my_pretty_printer = build_pretty_printer() gdb.printing.register_pretty_printer(gdb, my_pretty_printer)
gpl-2.0
lxybox1/MissionPlanner
Lib/lib2to3/fixes/fix_itertools.py
55
1592
""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363) imports from itertools are fixed in fix_itertools_import.py If itertools is imported as something else (ie: import itertools as it; it.izip(spam, eggs)) method calls will not get fixed. """ # Local imports from .. import fixer_base from ..fixer_util import Name class FixItertools(fixer_base.BaseFix): BM_compatible = True it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')" PATTERN = """ power< it='itertools' trailer< dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > > | power< func=%(it_funcs)s trailer< '(' [any] ')' > > """ %(locals()) # Needs to be run after fix_(map|zip|filter) run_order = 6 def transform(self, node, results): prefix = None func = results['func'][0] if ('it' in results and func.value not in (u'ifilterfalse', u'izip_longest')): dot, it = (results['dot'], results['it']) # Remove the 'itertools' prefix = it.prefix it.remove() # Replace the node wich contains ('.', 'function') with the # function (to be consistant with the second part of the pattern) dot.remove() func.parent.replace(func) prefix = prefix or func.prefix func.replace(Name(func.value[1:], prefix=prefix))
gpl-3.0
yunxliu/crosswalk-test-suite
webapi/tct-fileapi-w3c-tests/inst.apk.py
1996
3186
#!/usr/bin/env python import os import shutil import glob import time import sys import subprocess from optparse import OptionParser, make_option SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) PARAMETERS = None ADB_CMD = "adb" def doCMD(cmd): # Do not need handle timeout in this short script, let tool do it print "-->> \"%s\"" % cmd output = [] cmd_return_code = 1 cmd_proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) while True: output_line = cmd_proc.stdout.readline().strip("\r\n") cmd_return_code = cmd_proc.poll() if output_line == '' and cmd_return_code is not None: break sys.stdout.write("%s\n" % output_line) sys.stdout.flush() output.append(output_line) return (cmd_return_code, output) def uninstPKGs(): action_status = True for root, dirs, files in os.walk(SCRIPT_DIR): for file in files: if file.endswith(".apk"): cmd = "%s -s %s uninstall org.xwalk.%s" % ( ADB_CMD, PARAMETERS.device, os.path.basename(os.path.splitext(file)[0])) (return_code, output) = doCMD(cmd) for line in output: if "Failure" in line: action_status = False break return action_status def instPKGs(): action_status = True for root, dirs, files in os.walk(SCRIPT_DIR): for file in files: if file.endswith(".apk"): cmd = "%s -s %s install %s" % (ADB_CMD, PARAMETERS.device, os.path.join(root, file)) (return_code, output) = doCMD(cmd) for line in output: if "Failure" in line: action_status = False break return action_status def main(): try: usage = "usage: inst.py -i" opts_parser = OptionParser(usage=usage) opts_parser.add_option( "-s", dest="device", action="store", help="Specify device") opts_parser.add_option( "-i", dest="binstpkg", action="store_true", help="Install package") opts_parser.add_option( "-u", dest="buninstpkg", action="store_true", help="Uninstall package") global PARAMETERS (PARAMETERS, args) = opts_parser.parse_args() except Exception as e: print "Got wrong option: %s, exit ..." % e sys.exit(1) if not PARAMETERS.device: (return_code, output) = doCMD("adb devices") for line in output: if str.find(line, "\tdevice") != -1: PARAMETERS.device = line.split("\t")[0] break if not PARAMETERS.device: print "No device found" sys.exit(1) if PARAMETERS.binstpkg and PARAMETERS.buninstpkg: print "-i and -u are conflict" sys.exit(1) if PARAMETERS.buninstpkg: if not uninstPKGs(): sys.exit(1) else: if not instPKGs(): sys.exit(1) if __name__ == "__main__": main() sys.exit(0)
bsd-3-clause
disabler/isida3
plugins/shortlink.py
2
2408
#!/usr/bin/python # -*- coding: utf-8 -*- # --------------------------------------------------------------------------- # # # # Plugin for iSida Jabber Bot # # Copyright (C) diSabler <dsy@dsy.name> # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. # # # # --------------------------------------------------------------------------- # SHORT_TINYURL = 'http://tinyurl.com/api-create.php?url=%s' SHORT_CLCK = 'http://clck.ru/--?url=%s' SHORT_QR = 'http://chart.apis.google.com/chart?cht=qr&chs=350x350&chld=M|2&chl=%s' def shorter_raw(type, jid, nick, text, url): text = text.strip() if text: msg = load_page(url % enidna(text).decode('utf-8')) else: msg = L('What?','%s/%s'%(jid,nick)) send_msg(type, jid, nick, msg) def short_clck(type, jid, nick, text): shorter_raw(type, jid, nick, text, SHORT_CLCK) def short_tinyurl(type, jid, nick, text): shorter_raw(type, jid, nick, text, SHORT_TINYURL) def short_qr(type, jid, nick, text): shorter_raw(type, jid, nick, text, SHORT_TINYURL % SHORT_QR) global execute execute = [(3, 'clck', short_clck, 2, 'URL Shortener'), (3, 'tinyurl', short_tinyurl, 2, 'URL Shortener'), (3, 'qr', short_qr, 2, 'QR-code generator')]
gpl-3.0
joeyjy/ayi-django
ms/accounts/migrations/0008_auto__add_ayi.py
1
6287
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Ayi' db.create_table(u'accounts_ayi', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)), )) db.send_create_signal(u'accounts', ['Ayi']) def backwards(self, orm): # Deleting model 'Ayi' db.delete_table(u'accounts_ayi') models = { u'accounts.ayi': { 'Meta': {'ordering': "['name']", 'object_name': 'Ayi'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}) }, u'accounts.compound': { 'Meta': {'ordering': "['name']", 'object_name': 'Compound'}, 'cross_street': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'district': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'street_address': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'street_name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}) }, u'accounts.myprofile': { 'Meta': {'object_name': 'MyProfile'}, 'apt_num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'area': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'bldg_num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'compound': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.Compound']", 'null': 'True', 'blank': 'True'}), 'cross': ('django.db.models.fields.CharField', [], {'max_length': '300'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mobile': ('django.db.models.fields.BigIntegerField', [], {}), 'mugshot': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}), 'privacy': ('django.db.models.fields.CharField', [], {'default': "'registered'", 'max_length': '15'}), 'street': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'street_num': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'my_profile'", 'unique': 'True', 'to': u"orm['auth.User']"}) }, u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['accounts']
mit
PanDAWMS/panda-harvester
pandaharvester/harvestertest/stageInTest_dpb.py
1
1815
import sys queueName = sys.argv[1] from pandaharvester.harvestercore.queue_config_mapper import QueueConfigMapper queueConfigMapper = QueueConfigMapper() queueConfig = queueConfigMapper.get_queue(queueName) from pandaharvester.harvestercore.job_spec import JobSpec jobSpec = JobSpec() jobSpec.computingSite = sys.argv[1] jobSpec.jobParams = {'inFiles': 'EVNT.06820166._000001.pool.root.1', 'scopeIn': 'mc15_13TeV', 'fsize': '196196765', 'GUID': 'B7F387CD-1F97-1C47-88BD-D8785442C49D', 'checksum': 'ad:326e445d', 'ddmEndPointIn': 'MWT2_DATADISK', 'realDatasetsIn': 'mc15_13TeV:mc15_13TeV.301042.PowhegPythia8EvtGen_AZNLOCTEQ6L1_DYtautau_250M400.evgen.EVNT.e3649_tid06820166_00', } from pandaharvester.harvestercore.plugin_factory import PluginFactory pluginFactory = PluginFactory() # get plugin preparatorCore = pluginFactory.get_plugin(queueConfig.preparator) print ("plugin={0}".format(preparatorCore.__class__.__name__)) print ("testing preparation") tmpStat, tmpOut = preparatorCore.trigger_preparation(jobSpec) if tmpStat: print (" OK") else: print (" NG {0}".format(tmpOut)) print print ("testing status check") while True: tmpStat, tmpOut = preparatorCore.check_stage_in_status(jobSpec) if tmpStat is True: print (" OK") break elif tmpStat is False: print (" NG {0}".format(tmpOut)) sys.exit(1) else: print (" still running. sleep 1 min") time.sleep(60) print print ("checking path resolution") tmpStat, tmpOut = preparatorCore.resolve_input_paths(jobSpec) if tmpStat: print (" OK {0}".format(jobSpec.jobParams['inFilePaths'])) else: print (" NG {0}".format(tmpOut))
apache-2.0
ebonilla/AutoGP
autogp/datasets/mnist.py
2
3487
from __future__ import absolute_import import numpy as np from tensorflow.contrib.learn.python.learn.datasets import base from tensorflow.contrib.learn.python.learn.datasets.mnist import \ extract_images, extract_labels from tensorflow.python.framework import dtypes from .dataset import DataSet def process_mnist(images, dtype=dtypes.float32, reshape=True): if reshape: assert images.shape[3] == 1 images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]) if dtype == dtypes.float32: # Convert from [0, 255] -> [0.0, 1.0]. images = images.astype(np.float32) images = np.multiply(images, 1.0 / 255.0) return images def get_data_info(images): rows, cols = images.shape std = np.zeros(cols) mean = np.zeros(cols) for col in range(cols): std[col] = np.std(images[:, col]) mean[col] = np.mean(images[:, col]) return mean, std def standardize_data(images, means, stds): data = images.copy() rows, cols = data.shape for col in range(cols): if stds[col] == 0: data[:, col] = (data[:, col] - means[col]) else: data[:, col] = (data[:, col] - means[col]) / stds[col] return data def import_mnist(validation_size=0): """ This import mnist and saves the data as an object of our DataSet class :param concat_val: Concatenate training and validation :return: """ SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/' TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' TRAIN_LABELS = 'train-labels-idx1-ubyte.gz' TEST_IMAGES = 't10k-images-idx3-ubyte.gz' TEST_LABELS = 't10k-labels-idx1-ubyte.gz' ONE_HOT = True TRAIN_DIR = 'experiments/data/MNIST_data' local_file = base.maybe_download(TRAIN_IMAGES, TRAIN_DIR, SOURCE_URL + TRAIN_IMAGES) with open(local_file) as f: train_images = extract_images(f) local_file = base.maybe_download(TRAIN_LABELS, TRAIN_DIR, SOURCE_URL + TRAIN_LABELS) with open(local_file) as f: train_labels = extract_labels(f, one_hot=ONE_HOT) local_file = base.maybe_download(TEST_IMAGES, TRAIN_DIR, SOURCE_URL + TEST_IMAGES) with open(local_file) as f: test_images = extract_images(f) local_file = base.maybe_download(TEST_LABELS, TRAIN_DIR, SOURCE_URL + TEST_LABELS) with open(local_file) as f: test_labels = extract_labels(f, one_hot=ONE_HOT) validation_images = train_images[:validation_size] validation_labels = train_labels[:validation_size] train_images = train_images[validation_size:] train_labels = train_labels[validation_size:] # process images train_images = process_mnist(train_images) validation_images = process_mnist(validation_images) test_images = process_mnist(test_images) # standardize data train_mean, train_std = get_data_info(train_images) train_images = standardize_data(train_images, train_mean, train_std) validation_images = standardize_data( validation_images, train_mean, train_std) test_images = standardize_data(test_images, train_mean, train_std) data = DataSet(train_images, train_labels) test = DataSet(test_images, test_labels) val = DataSet(validation_images, validation_labels) return data, test, val
apache-2.0
n0max/servo
tests/wpt/web-platform-tests/tools/pytest/testing/test_genscript.py
194
1689
import pytest import sys @pytest.fixture(scope="module") def standalone(request): return Standalone(request) class Standalone: def __init__(self, request): self.testdir = request.getfuncargvalue("testdir") script = "mypytest" result = self.testdir.runpytest("--genscript=%s" % script) assert result.ret == 0 self.script = self.testdir.tmpdir.join(script) assert self.script.check() def run(self, anypython, testdir, *args): return testdir._run(anypython, self.script, *args) def test_gen(testdir, anypython, standalone): if sys.version_info >= (2,7): result = testdir._run(anypython, "-c", "import sys;print (sys.version_info >=(2,7))") assert result.ret == 0 if result.stdout.str() == "False": pytest.skip("genscript called from python2.7 cannot work " "earlier python versions") result = standalone.run(anypython, testdir, '--version') if result.ret == 2: result.stderr.fnmatch_lines(["*ERROR: setuptools not installed*"]) elif result.ret == 0: result.stderr.fnmatch_lines([ "*imported from*mypytest*" ]) p = testdir.makepyfile("def test_func(): assert 0") result = standalone.run(anypython, testdir, p) assert result.ret != 0 else: pytest.fail("Unexpected return code") def test_freeze_includes(): """ Smoke test for freeze_includes(), to ensure that it works across all supported python versions. """ includes = pytest.freeze_includes() assert len(includes) > 1 assert '_pytest.genscript' in includes
mpl-2.0
loopCM/chromium
build/android/gyp/create_device_library_links.py
9
3337
#!/usr/bin/env python # # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Creates symlinks to native libraries for an APK. The native libraries should have previously been pushed to the device (in options.target_dir). This script then creates links in an apk's lib/ folder to those native libraries. """ import json import optparse import os import sys from util import build_utils from util import md5_check BUILD_ANDROID_DIR = os.path.join(os.path.dirname(__file__), '..') sys.path.append(BUILD_ANDROID_DIR) from pylib import android_commands from pylib.utils import apk_helper def RunShellCommand(adb, cmd): output = adb.RunShellCommand(cmd) if output: raise Exception( 'Unexpected output running command: ' + cmd + '\n' + '\n'.join(output)) def CreateSymlinkScript(options): libraries = build_utils.ReadJson(options.libraries_json) link_cmd = ( 'rm $APK_LIBRARIES_DIR/%(lib_basename)s > /dev/null 2>&1 \n' 'ln -s $STRIPPED_LIBRARIES_DIR/%(lib_basename)s ' '$APK_LIBRARIES_DIR/%(lib_basename)s \n' ) script = '#!/bin/sh \n' for lib in libraries: script += link_cmd % { 'lib_basename': lib } with open(options.script_host_path, 'w') as scriptfile: scriptfile.write(script) def TriggerSymlinkScript(options): apk_package = apk_helper.GetPackageName(options.apk) apk_libraries_dir = '/data/data/%s/lib' % apk_package adb = android_commands.AndroidCommands() device_dir = os.path.dirname(options.script_device_path) mkdir_cmd = ('if [ ! -e %(dir)s ]; then mkdir -p %(dir)s; fi ' % { 'dir': device_dir }) RunShellCommand(adb, mkdir_cmd) adb.PushIfNeeded(options.script_host_path, options.script_device_path) trigger_cmd = ( 'APK_LIBRARIES_DIR=%(apk_libraries_dir)s; ' 'STRIPPED_LIBRARIES_DIR=%(target_dir)s; ' '. %(script_device_path)s' ) % { 'apk_libraries_dir': apk_libraries_dir, 'target_dir': options.target_dir, 'script_device_path': options.script_device_path } RunShellCommand(adb, trigger_cmd) def main(argv): if not build_utils.IsDeviceReady(): build_utils.PrintBigWarning( 'Zero (or multiple) devices attached. Skipping creating symlinks.') return parser = optparse.OptionParser() parser.add_option('--apk', help='Path to the apk.') parser.add_option('--script-host-path', help='Path on the host for the symlink script.') parser.add_option('--script-device-path', help='Path on the device to push the created symlink script.') parser.add_option('--libraries-json', help='Path to the json list of native libraries.') parser.add_option('--target-dir', help='Device directory that contains the target libraries for symlinks.') parser.add_option('--stamp', help='Path to touch on success.') options, _ = parser.parse_args() required_options = ['apk', 'libraries_json', 'script_host_path', 'script_device_path', 'target_dir'] build_utils.CheckOptions(options, parser, required=required_options) CreateSymlinkScript(options) TriggerSymlinkScript(options) if options.stamp: build_utils.Touch(options.stamp) if __name__ == '__main__': sys.exit(main(sys.argv))
bsd-3-clause
katrid/django
django/contrib/admindocs/urls.py
574
1183
from django.conf.urls import url from django.contrib.admindocs import views urlpatterns = [ url('^$', views.BaseAdminDocsView.as_view(template_name='admin_doc/index.html'), name='django-admindocs-docroot'), url('^bookmarklets/$', views.BookmarkletsView.as_view(), name='django-admindocs-bookmarklets'), url('^tags/$', views.TemplateTagIndexView.as_view(), name='django-admindocs-tags'), url('^filters/$', views.TemplateFilterIndexView.as_view(), name='django-admindocs-filters'), url('^views/$', views.ViewIndexView.as_view(), name='django-admindocs-views-index'), url('^views/(?P<view>[^/]+)/$', views.ViewDetailView.as_view(), name='django-admindocs-views-detail'), url('^models/$', views.ModelIndexView.as_view(), name='django-admindocs-models-index'), url('^models/(?P<app_label>[^\.]+)\.(?P<model_name>[^/]+)/$', views.ModelDetailView.as_view(), name='django-admindocs-models-detail'), url('^templates/(?P<template>.*)/$', views.TemplateDetailView.as_view(), name='django-admindocs-templates'), ]
bsd-3-clause
danceos/dosek
generator/coder/syscall_specialized.py
1
10418
from .syscall_full import FullSystemCalls from .elements import Statement, Comment from generator.tools import unwrap_seq from generator.analysis.AtomicBasicBlock import E,S from generator.analysis.SystemSemantic import SystemState class SpecializedSystemCalls(FullSystemCalls): def __init__(self, global_abb_info): super(SpecializedSystemCalls, self).__init__() self.global_abb_info = global_abb_info def Comment(self, kernelspace, comment, *args): kernelspace.add(Comment(comment % args )) def SetReady(self, kernelspace, abb, task): abb_info = self.global_abb_info.for_abb(abb) # If all currently possible running task abbs are <= prio, we # can simply increase the priority if abb_info.state_before.is_surely_suspended(task): self.Comment(kernelspace, "OPTIMIZATION: We surely know that the task is suspended") self.call_function(kernelspace, "scheduler_.SetReadyFromSuspended_impl", "void", [self.task_desc(task)]) self.stats.add_data(abb, "opt:SetReady:fromSuspended", task) elif abb_info.state_before.is_surely_ready(task): self.Comment(kernelspace, "OPTIMIZATION: Task %s is surely ready, we do not have to activate it.", task) self.stats.add_data(abb, "opt:SetReady:not-needed", task) else: self.call_function(kernelspace, "scheduler_.SetReady_impl", "void", [self.task_desc(task)]) self.stats.add_data(abb, "opt:SetReady:general", task) def SetSuspended(self, kernelspace, abb, task): abb_info = self.global_abb_info.for_abb(abb) assert abb_info.state_before.is_surely_ready(task) self.call_function(kernelspace, "scheduler_.SetSuspended_impl", "void", [self.task_desc(task)]) def Dispatch(self, kernelspace, abb, task): abb_info = self.global_abb_info.for_abb(abb) if task.name == "Idle": self.call_function(kernelspace, "Dispatcher::idle", "void", []) else: # Check wheter the task is surely continued, or surely started is_entry_abb = [task.entry_abb == x for x in abb_info.abbs_after] if all(is_entry_abb): # only jumping to an entry abb assert len(is_entry_abb) == 1 self.Comment(kernelspace, "OPTIMIZATION: The task is surely no paused. We can surely start it from scratch.") self.call_function(kernelspace, "Dispatcher::StartToTask", "void", [self.task_desc(task)]) self.stats.add_data(abb, "opt:Dispatch:StartToTask", True) elif all([not x for x in is_entry_abb]): # Only continuing self.Comment(kernelspace, "OPTIMIZATION: The task is surely ready, just resume it.") self.call_function(kernelspace, "Dispatcher::ResumeToTask", "void", [self.task_desc(task)]) self.stats.add_data(abb, "opt:Dispatcher:ResumeToTask", True) else: self.call_function(kernelspace, "Dispatcher::Dispatch", "void", [self.task_desc(task)]) self.stats.add_data(abb, "opt:Dispatcher:general", True) def RescheduleTo(self, kernelspace, abb, task): """Reschedule to a specific task""" # Step 1: Set the current running marker. The current running # task is absolutly sure here. if abb.function.subtask == task: self.Comment(kernelspace, "OPTIMIZATION: We do not have to update the current_task entry") elif task.name == "Idle": kernelspace.add(Statement("scheduler_.current_task = TaskList::idle_id")) else: kernelspace.add(Statement("scheduler_.SetCurrentTask(%s)" % self.task_desc(task))) # Step 2: Determine the current system priority. abb_info = self.global_abb_info.for_abb(abb) priorities = set([x.dynamic_priority for x in abb_info.abbs_after]) # When task is non-preemptable, we have to set the system # priority to RES_SCHEDULER if not task.conf.preemptable: priorities = [task.entry_abb.definite_after(E.function_level).dynamic_priority] if len(priorities) == 1: next_prio = unwrap_seq(priorities) if next_prio == abb.dynamic_priority: self.Comment(kernelspace, "OPTIMIZATION: We do not have to update the current system priority") self.stats.add_data(abb, "opt:SetSystemPriority:not-needed", next_prio) else: self.Comment(kernelspace, "OPTIMIZATION: The system priority is determined." + " Therefore we set it from a constant: %d == %s", next_prio, self.system_graph.who_has_prio(next_prio)) kernelspace.add(Statement("scheduler_.SetSystemPriority(%d)" % next_prio)) self.stats.add_data(abb, "opt:SetSystemPriority:constant", next_prio) else: # The current system priority is the priority of the next running task kernelspace.add(Statement("scheduler_.current_prio = scheduler_.tlist.%s" % task.name)) self.stats.add_data(abb, "opt:SetSystemPriority:general", True) # Step 3: Call the dispatcher! self.Dispatch(kernelspace, abb, task) def Schedule(self, kernelspace, abb): if abb.subtask.conf.is_isr: self.Comment(kernelspace, """OPTIMIZATION: Each ABB is either in an ISR or not, therefore we can surely decide if we have to reschedule in the AST or not""") self.stats.add_data(abb, "opt:Schedule:in-ast", True) self.call_function(kernelspace, "request_reschedule_ast", "void", []) return abb_info = self.global_abb_info.for_abb(abb) # All following tasks (Subtask->ABB) next_subtasks = abb_info.tasks_after if len(next_subtasks) == 1: next_subtask = unwrap_seq(list(next_subtasks.keys())) self.Comment(kernelspace, "OPTIMIZATION: There is only one possible subtask continuing" + " to, we directly dispatch to that task: %s", next_subtask) self.stats.add_data(abb, "opt:Schedule:possible-tasks", 1) self.RescheduleTo(kernelspace, abb, next_subtask) return tasks = list(abb_info.tasks_after.keys()) self.Comment(kernelspace, "OPTIMIZATION: Only the following tasks are possible %s", tasks) self.stats.add_data(abb, "opt:Schedule:possible-tasks", len(tasks)) self.call_function(kernelspace, "scheduler_.Reschedule", "void", []) def ASTSchedule(self, kernelspace): kernelspace.unused_parameter(0) irets = [x for x in self.system_graph.abbs if x.isA(S.iret)] # Collect all tasks that may be ready, after an interrupt is processed maybe_ready = set() for iret in irets: abb_info = self.global_abb_info.for_abb(iret) if not abb_info: # iret belongs to the activate task of an soft-counter continue for subtask in self.system_graph.subtasks: if abb_info.state_before.is_maybe_ready(subtask): maybe_ready.add(subtask) self.stats.add_data(self.system_graph.system_task, "opt:ASTSchedule:possible-tasks", len(maybe_ready)) if(len(maybe_ready) > 0): self.call_function(kernelspace, "scheduler_.Reschedule", "void", []) else: self.call_function(kernelspace, "Machine::unreachable", "void", []) def ActivateTask(self, abb, userspace, kernelspace): subtask = abb.arguments[0] abb_info = self.global_abb_info.for_abb(abb) if abb_info: self.SetReady(kernelspace, abb, subtask) self.Schedule(kernelspace, abb) else: # If we have no information about this systemcall just # make a full-featured systemcall FullSystemCalls.ActivateTask(self, abb, userspace, kernelspace) def TerminateTask(self, abb, userspace, kernelspace): subtask = abb.function.subtask abb_info = self.global_abb_info.for_abb(abb) if abb_info: self.SetSuspended(kernelspace, abb, subtask) self.Schedule(kernelspace, abb) else: # If we have no information about this systemcall just # make a full-featured systemcall FullSystemCalls.TerminateTask(self, abb, userspace, kernelspace) def ChainTask(self, abb, userspace, kernelspace): from_task = abb.function.subtask to_task = abb.arguments[0] abb_info = self.global_abb_info.for_abb(abb) if abb_info: self.SetSuspended(kernelspace, abb, from_task) self.SetReady(kernelspace, abb, to_task) self.Schedule(kernelspace, abb) else: # If we have no information about this systemcall just # make a full-featured systemcall FullSystemCalls.ChainTask(self, abb, userspace, kernelspace) def ReleaseResource(self, abb, userspace, kernelspace): from_task = abb.function.subtask next_prio = abb.definite_after(E.task_level).dynamic_priority abb_info = self.global_abb_info.for_abb(abb) if abb_info: self.call_function(kernelspace, "scheduler_.SetPriority", "void", [self.task_desc(from_task), str(next_prio)]) self.call_function(kernelspace, "scheduler_.SetSystemPriority", "void", [str(next_prio)]) self.Schedule(kernelspace, abb) else: # If we have no information about this systemcall just # make a full-featured systemcall FullSystemCalls.ReleaseResource(self, abb, userspace, kernelspace)
lgpl-3.0
jnovinger/django
tests/template_tests/syntax_tests/test_regroup.py
367
3984
from datetime import date from django.template import TemplateSyntaxError from django.test import SimpleTestCase from ..utils import setup class RegroupTagTests(SimpleTestCase): @setup({'regroup01': '' '{% regroup data by bar as grouped %}' '{% for group in grouped %}' '{{ group.grouper }}:' '{% for item in group.list %}' '{{ item.foo }}' '{% endfor %},' '{% endfor %}'}) def test_regroup01(self): output = self.engine.render_to_string('regroup01', { 'data': [{'foo': 'c', 'bar': 1}, {'foo': 'd', 'bar': 1}, {'foo': 'a', 'bar': 2}, {'foo': 'b', 'bar': 2}, {'foo': 'x', 'bar': 3}], }) self.assertEqual(output, '1:cd,2:ab,3:x,') @setup({'regroup02': '' '{% regroup data by bar as grouped %}' '{% for group in grouped %}' '{{ group.grouper }}:' '{% for item in group.list %}' '{{ item.foo }}' '{% endfor %}' '{% endfor %}'}) def test_regroup02(self): """ Test for silent failure when target variable isn't found """ output = self.engine.render_to_string('regroup02', {}) self.assertEqual(output, '') @setup({'regroup03': '' '{% regroup data by at|date:"m" as grouped %}' '{% for group in grouped %}' '{{ group.grouper }}:' '{% for item in group.list %}' '{{ item.at|date:"d" }}' '{% endfor %},' '{% endfor %}'}) def test_regroup03(self): """ Regression tests for #17675 The date template filter has expects_localtime = True """ output = self.engine.render_to_string('regroup03', { 'data': [{'at': date(2012, 2, 14)}, {'at': date(2012, 2, 28)}, {'at': date(2012, 7, 4)}], }) self.assertEqual(output, '02:1428,07:04,') @setup({'regroup04': '' '{% regroup data by bar|join:"" as grouped %}' '{% for group in grouped %}' '{{ group.grouper }}:' '{% for item in group.list %}' '{{ item.foo|first }}' '{% endfor %},' '{% endfor %}'}) def test_regroup04(self): """ The join template filter has needs_autoescape = True """ output = self.engine.render_to_string('regroup04', { 'data': [{'foo': 'x', 'bar': ['ab', 'c']}, {'foo': 'y', 'bar': ['a', 'bc']}, {'foo': 'z', 'bar': ['a', 'd']}], }) self.assertEqual(output, 'abc:xy,ad:z,') # Test syntax errors @setup({'regroup05': '{% regroup data by bar as %}'}) def test_regroup05(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template('regroup05') @setup({'regroup06': '{% regroup data by bar thisaintright grouped %}'}) def test_regroup06(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template('regroup06') @setup({'regroup07': '{% regroup data thisaintright bar as grouped %}'}) def test_regroup07(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template('regroup07') @setup({'regroup08': '{% regroup data by bar as grouped toomanyargs %}'}) def test_regroup08(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template('regroup08')
bsd-3-clause
Nicop06/ansible
lib/ansible/modules/cloud/azure/azure_rm_virtualmachine_extension.py
21
10097
#!/usr/bin/python # # Copyright (c) 2017 Sertac Ozercan <seozerca@microsoft.com> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_virtualmachine_extension version_added: "2.4" short_description: Managed Azure Virtual Machine extension description: - Create, update and delete Azure Virtual Machine Extension options: resource_group: description: - Name of a resource group where the vm extension exists or will be created. required: true name: description: - Name of the vm extension required: true state: description: - Assert the state of the vm extension. Use 'present' to create or update a vm extension and 'absent' to delete a vm extension. default: present choices: - absent - present required: false location: description: - Valid azure location. Defaults to location of the resource group. default: resource_group location required: false virtual_machine_name: description: - The name of the virtual machine where the extension should be create or updated. required: false publisher: description: - The name of the extension handler publisher. required: false virtual_machine_extension_type: description: - The type of the extension handler. required: false type_handler_version: description: - The type version of the extension handler. required: false settings: description: - Json formatted public settings for the extension. required: false protected_settings: description: - Json formatted protected settings for the extension. required: false auto_upgrade_minor_version: description: - Whether the extension handler should be automatically upgraded across minor versions. required: false extends_documentation_fragment: - azure author: - "Sertac Ozercan (@sozercan)" - "Julien Stroheker (@ju_stroh)" ''' EXAMPLES = ''' - name: Create VM Extension azure_rm_virtualmachine_extension: name: myvmextension location: eastus resource_group: Testing virtual_machine_name: myvm publisher: Microsoft.Azure.Extensions virtual_machine_extension_type: CustomScript type_handler_version: 2.0 settings: '{"commandToExecute": "hostname"}' auto_upgrade_minor_version: true - name: Delete VM Extension azure_rm_virtualmachine_extension: name: myvmextension location: eastus resource_group: Testing virtual_machine_name: myvm state: absent ''' RETURN = ''' state: description: Current state of the vm extension returned: always type: dict changed: description: Whether or not the resource has changed returned: always type: bool ''' from ansible.module_utils.azure_rm_common import AzureRMModuleBase try: from msrestazure.azure_exceptions import CloudError from azure.mgmt.compute.models import ( VirtualMachineExtension ) except ImportError: # This is handled in azure_rm_common pass def vmextension_to_dict(extension): ''' Serializing the VM Extension from the API to Dict :return: dict ''' return dict( id=extension.id, name=extension.name, location=extension.location, publisher=extension.publisher, virtual_machine_extension_type=extension.virtual_machine_extension_type, type_handler_version=extension.type_handler_version, auto_upgrade_minor_version=extension.auto_upgrade_minor_version, settings=extension.settings, protected_settings=extension.protected_settings, ) class AzureRMVMExtension(AzureRMModuleBase): """Configuration class for an Azure RM VM Extension resource""" def __init__(self): self.module_arg_spec = dict( resource_group=dict( type='str', required=True ), name=dict( type='str', required=True ), state=dict( type='str', required=False, default='present', choices=['present', 'absent'] ), location=dict( type='str', required=False ), virtual_machine_name=dict( type='str', required=False ), publisher=dict( type='str', required=False ), virtual_machine_extension_type=dict( type='str', required=False ), type_handler_version=dict( type='str', required=False ), auto_upgrade_minor_version=dict( type='bool', required=False ), settings=dict( type='dict', required=False ), protected_settings=dict( type='dict', required=False ) ) self.resource_group = None self.name = None self.location = None self.publisher = None self.virtual_machine_extension_type = None self.type_handler_version = None self.auto_upgrade_minor_version = None self.settings = None self.protected_settings = None self.state = None self.results = dict(changed=False, state=dict()) super(AzureRMVMExtension, self).__init__(derived_arg_spec=self.module_arg_spec, supports_check_mode=False, supports_tags=False) def exec_module(self, **kwargs): """Main module execution method""" for key in list(self.module_arg_spec.keys()): setattr(self, key, kwargs[key]) resource_group = None response = None to_be_updated = False try: resource_group = self.get_resource_group(self.resource_group) except CloudError: self.fail('resource group {} not found'.format(self.resource_group)) if not self.location: self.location = resource_group.location if self.state == 'present': response = self.get_vmextension() if not response: to_be_updated = True else: if response['settings'] != self.settings: response['settings'] = self.settings to_be_updated = True if response['protected_settings'] != self.protected_settings: response['protected_settings'] = self.protected_settings to_be_updated = True if to_be_updated: self.results['changed'] = True self.results['state'] = self.create_or_update_vmextension() elif self.state == 'absent': self.delete_vmextension() self.results['changed'] = True return self.results def create_or_update_vmextension(self): ''' Method calling the Azure SDK to create or update the VM extension. :return: void ''' self.log("Creating VM extension {0}".format(self.name)) try: params = VirtualMachineExtension( location=self.location, publisher=self.publisher, virtual_machine_extension_type=self.virtual_machine_extension_type, type_handler_version=self.type_handler_version, auto_upgrade_minor_version=self.auto_upgrade_minor_version, settings=self.settings, protected_settings=self.protected_settings ) poller = self.compute_client.virtual_machine_extensions.create_or_update(self.resource_group, self.virtual_machine_name, self.name, params) response = self.get_poller_result(poller) return vmextension_to_dict(response) except CloudError as e: self.log('Error attempting to create the VM extension.') self.fail("Error creating the VM extension: {0}".format(str(e))) def delete_vmextension(self): ''' Method calling the Azure SDK to delete the VM Extension. :return: void ''' self.log("Deleting vmextension {0}".format(self.name)) try: poller = self.compute_client.virtual_machine_extensions.delete(self.resource_group, self.virtual_machine_name, self.name) self.get_poller_result(poller) except CloudError as e: self.log('Error attempting to delete the vmextension.') self.fail("Error deleting the vmextension: {0}".format(str(e))) def get_vmextension(self): ''' Method calling the Azure SDK to get a VM Extension. :return: void ''' self.log("Checking if the vm extension {0} is present".format(self.name)) found = False try: response = self.compute_client.virtual_machine_extensions.get(self.resource_group, self.virtual_machine_name, self.name) found = True except CloudError as e: self.log('Did not find vm extension') if found: return vmextension_to_dict(response) else: return False def main(): """Main execution""" AzureRMVMExtension() if __name__ == '__main__': main()
gpl-3.0
zaragoza-sedeelectronica/hackathon-co.sa
node_modules/cordova/node_modules/cordova-lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/ninja_syntax.py
217
5286
# This file comes from # https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py # Do not edit! Edit the upstream one instead. """Python module for generating .ninja files. Note that this is emphatically not a required piece of Ninja; it's just a helpful utility for build-file-generation systems that already use Python. """ import textwrap import re def escape_path(word): return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:') class Writer(object): def __init__(self, output, width=78): self.output = output self.width = width def newline(self): self.output.write('\n') def comment(self, text): for line in textwrap.wrap(text, self.width - 2): self.output.write('# ' + line + '\n') def variable(self, key, value, indent=0): if value is None: return if isinstance(value, list): value = ' '.join(filter(None, value)) # Filter out empty strings. self._line('%s = %s' % (key, value), indent) def rule(self, name, command, description=None, depfile=None, generator=False, restat=False, rspfile=None, rspfile_content=None): self._line('rule %s' % name) self.variable('command', command, indent=1) if description: self.variable('description', description, indent=1) if depfile: self.variable('depfile', depfile, indent=1) if generator: self.variable('generator', '1', indent=1) if restat: self.variable('restat', '1', indent=1) if rspfile: self.variable('rspfile', rspfile, indent=1) if rspfile_content: self.variable('rspfile_content', rspfile_content, indent=1) def build(self, outputs, rule, inputs=None, implicit=None, order_only=None, variables=None): outputs = self._as_list(outputs) all_inputs = self._as_list(inputs)[:] out_outputs = list(map(escape_path, outputs)) all_inputs = list(map(escape_path, all_inputs)) if implicit: implicit = map(escape_path, self._as_list(implicit)) all_inputs.append('|') all_inputs.extend(implicit) if order_only: order_only = map(escape_path, self._as_list(order_only)) all_inputs.append('||') all_inputs.extend(order_only) self._line('build %s: %s %s' % (' '.join(out_outputs), rule, ' '.join(all_inputs))) if variables: if isinstance(variables, dict): iterator = variables.iteritems() else: iterator = iter(variables) for key, val in iterator: self.variable(key, val, indent=1) return outputs def include(self, path): self._line('include %s' % path) def subninja(self, path): self._line('subninja %s' % path) def default(self, paths): self._line('default %s' % ' '.join(self._as_list(paths))) def _count_dollars_before_index(self, s, i): """Returns the number of '$' characters right in front of s[i].""" dollar_count = 0 dollar_index = i - 1 while dollar_index > 0 and s[dollar_index] == '$': dollar_count += 1 dollar_index -= 1 return dollar_count def _line(self, text, indent=0): """Write 'text' word-wrapped at self.width characters.""" leading_space = ' ' * indent while len(leading_space) + len(text) > self.width: # The text is too wide; wrap if possible. # Find the rightmost space that would obey our width constraint and # that's not an escaped space. available_space = self.width - len(leading_space) - len(' $') space = available_space while True: space = text.rfind(' ', 0, space) if space < 0 or \ self._count_dollars_before_index(text, space) % 2 == 0: break if space < 0: # No such space; just use the first unescaped space we can find. space = available_space - 1 while True: space = text.find(' ', space + 1) if space < 0 or \ self._count_dollars_before_index(text, space) % 2 == 0: break if space < 0: # Give up on breaking. break self.output.write(leading_space + text[0:space] + ' $\n') text = text[space+1:] # Subsequent lines are continuations, so indent them. leading_space = ' ' * (indent+2) self.output.write(leading_space + text + '\n') def _as_list(self, input): if input is None: return [] if isinstance(input, list): return input return [input] def escape(string): """Escape a string such that it can be embedded into a Ninja file without further interpretation.""" assert '\n' not in string, 'Ninja syntax does not allow newlines' # We only have one special metacharacter: '$'. return string.replace('$', '$$')
apache-2.0
allenp/odoo
addons/l10n_hn/__openerp__.py
29
1437
# -*- encoding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. # Copyright (c) 2009-2010 Salvatore Josué Trimarchi Pinto <salvatore@trigluu.com> # (http://trigluu.com) # # This module provides a minimal Honduran chart of accounts that can be use # to build upon a more complex one. It also includes a chart of taxes and # the Lempira currency. # # This module is based on the Guatemalan chart of accounts: # Copyright (c) 2009-2010 Soluciones Tecnologócias Prisma S.A. All Rights Reserved. # José Rodrigo Fernández Menegazzo, Soluciones Tecnologócias Prisma S.A. # (http://www.solucionesprisma.com) # # This module works with OpenERP 6.0 to 8.0 # { 'name': 'Honduras - Accounting', 'version': '0.1', 'category': 'Localization/Account Charts', 'description': """ This is the base module to manage the accounting chart for Honduras. ==================================================================== Agrega una nomenclatura contable para Honduras. También incluye impuestos y la moneda Lempira. -- Adds accounting chart for Honduras. It also includes taxes and the Lempira currency.""", 'author': 'Salvatore Josue Trimarchi Pinto', 'website': 'http://trigluu.com', 'depends': ['base', 'account'], 'data': [ 'account_chart.xml', 'l10n_hn_base.xml', 'account_chart_template.yml', ], 'demo': [], 'installable': True, }
gpl-3.0
navodissa/python-flask
flask/lib/python2.7/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py
22
22235
# postgresql/psycopg2.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: postgresql+psycopg2 :name: psycopg2 :dbapi: psycopg2 :connectstring: postgresql+psycopg2://user:password@host:port/dbname\ [?key=value&key=value...] :url: http://pypi.python.org/pypi/psycopg2/ psycopg2 Connect Arguments ----------------------------------- psycopg2-specific keyword arguments which are accepted by :func:`.create_engine()` are: * ``server_side_cursors``: Enable the usage of "server side cursors" for SQL statements which support this feature. What this essentially means from a psycopg2 point of view is that the cursor is created using a name, e.g. ``connection.cursor('some name')``, which has the effect that result rows are not immediately pre-fetched and buffered after statement execution, but are instead left on the server and only retrieved as needed. SQLAlchemy's :class:`~sqlalchemy.engine.ResultProxy` uses special row-buffering behavior when this feature is enabled, such that groups of 100 rows at a time are fetched over the wire to reduce conversational overhead. Note that the ``stream_results=True`` execution option is a more targeted way of enabling this mode on a per-execution basis. * ``use_native_unicode``: Enable the usage of Psycopg2 "native unicode" mode per connection. True by default. .. seealso:: :ref:`psycopg2_disable_native_unicode` * ``isolation_level``: This option, available for all PostgreSQL dialects, includes the ``AUTOCOMMIT`` isolation level when using the psycopg2 dialect. .. seealso:: :ref:`psycopg2_isolation_level` * ``client_encoding``: sets the client encoding in a libpq-agnostic way, using psycopg2's ``set_client_encoding()`` method. .. seealso:: :ref:`psycopg2_unicode` Unix Domain Connections ------------------------ psycopg2 supports connecting via Unix domain connections. When the ``host`` portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2, which specifies Unix-domain communication rather than TCP/IP communication:: create_engine("postgresql+psycopg2://user:password@/dbname") By default, the socket file used is to connect to a Unix-domain socket in ``/tmp``, or whatever socket directory was specified when PostgreSQL was built. This value can be overridden by passing a pathname to psycopg2, using ``host`` as an additional keyword argument:: create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql") See also: `PQconnectdbParams <http://www.postgresql.org/docs/9.1/static\ /libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_ Per-Statement/Connection Execution Options ------------------------------------------- The following DBAPI-specific options are respected when used with :meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`, :meth:`.Query.execution_options`, in addition to those not specific to DBAPIs: * isolation_level - Set the transaction isolation level for the lifespan of a :class:`.Connection` (can only be set on a connection, not a statement or query). See :ref:`psycopg2_isolation_level`. * stream_results - Enable or disable usage of psycopg2 server side cursors - this feature makes use of "named" cursors in combination with special result handling methods so that result rows are not fully buffered. If ``None`` or not set, the ``server_side_cursors`` option of the :class:`.Engine` is used. .. _psycopg2_unicode: Unicode with Psycopg2 ---------------------- By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE`` extension, such that the DBAPI receives and returns all strings as Python Unicode objects directly - SQLAlchemy passes these values through without change. Psycopg2 here will encode/decode string values based on the current "client encoding" setting; by default this is the value in the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``. Typically, this can be changed to ``utf8``, as a more useful default:: # postgresql.conf file # client_encoding = sql_ascii # actually, defaults to database # encoding client_encoding = utf8 A second way to affect the client encoding is to set it within Psycopg2 locally. SQLAlchemy will call psycopg2's :meth:`psycopg2:connection.set_client_encoding` method on all new connections based on the value passed to :func:`.create_engine` using the ``client_encoding`` parameter:: # set_client_encoding() setting; # works for *all* Postgresql versions engine = create_engine("postgresql://user:pass@host/dbname", client_encoding='utf8') This overrides the encoding specified in the Postgresql client configuration. When using the parameter in this way, the psycopg2 driver emits ``SET client_encoding TO 'utf8'`` on the connection explicitly, and works in all Postgresql versions. Note that the ``client_encoding`` setting as passed to :func:`.create_engine` is **not the same** as the more recently added ``client_encoding`` parameter now supported by libpq directly. This is enabled when ``client_encoding`` is passed directly to ``psycopg2.connect()``, and from SQLAlchemy is passed using the :paramref:`.create_engine.connect_args` parameter:: # libpq direct parameter setting; # only works for Postgresql **9.1 and above** engine = create_engine("postgresql://user:pass@host/dbname", connect_args={'client_encoding': 'utf8'}) # using the query string is equivalent engine = create_engine("postgresql://user:pass@host/dbname?client_encoding=utf8") The above parameter was only added to libpq as of version 9.1 of Postgresql, so using the previous method is better for cross-version support. .. _psycopg2_disable_native_unicode: Disabling Native Unicode ^^^^^^^^^^^^^^^^^^^^^^^^ SQLAlchemy can also be instructed to skip the usage of the psycopg2 ``UNICODE`` extension and to instead utilize its own unicode encode/decode services, which are normally reserved only for those DBAPIs that don't fully support unicode directly. Passing ``use_native_unicode=False`` to :func:`.create_engine` will disable usage of ``psycopg2.extensions.UNICODE``. SQLAlchemy will instead encode data itself into Python bytestrings on the way in and coerce from bytes on the way back, using the value of the :func:`.create_engine` ``encoding`` parameter, which defaults to ``utf-8``. SQLAlchemy's own unicode encode/decode functionality is steadily becoming obsolete as most DBAPIs now support unicode fully. Transactions ------------ The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations. .. _psycopg2_isolation_level: Psycopg2 Transaction Isolation Level ------------------------------------- As discussed in :ref:`postgresql_isolation_level`, all Postgresql dialects support setting of transaction isolation level both via the ``isolation_level`` parameter passed to :func:`.create_engine`, as well as the ``isolation_level`` argument used by :meth:`.Connection.execution_options`. When using the psycopg2 dialect, these options make use of psycopg2's ``set_isolation_level()`` connection method, rather than emitting a Postgresql directive; this is because psycopg2's API-level setting is always emitted at the start of each transaction in any case. The psycopg2 dialect supports these constants for isolation level: * ``READ COMMITTED`` * ``READ UNCOMMITTED`` * ``REPEATABLE READ`` * ``SERIALIZABLE`` * ``AUTOCOMMIT`` .. versionadded:: 0.8.2 support for AUTOCOMMIT isolation level when using psycopg2. .. seealso:: :ref:`postgresql_isolation_level` :ref:`pg8000_isolation_level` NOTICE logging --------------- The psycopg2 dialect will log Postgresql NOTICE messages via the ``sqlalchemy.dialects.postgresql`` logger:: import logging logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO) .. _psycopg2_hstore:: HSTORE type ------------ The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of the HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension by default when it is detected that the target database has the HSTORE type set up for use. In other words, when the dialect makes the first connection, a sequence like the following is performed: 1. Request the available HSTORE oids using ``psycopg2.extras.HstoreAdapter.get_oids()``. If this function returns a list of HSTORE identifiers, we then determine that the ``HSTORE`` extension is present. 2. If the ``use_native_hstore`` flag is at its default of ``True``, and we've detected that ``HSTORE`` oids are available, the ``psycopg2.extensions.register_hstore()`` extension is invoked for all connections. The ``register_hstore()`` extension has the effect of **all Python dictionaries being accepted as parameters regardless of the type of target column in SQL**. The dictionaries are converted by this extension into a textual HSTORE expression. If this behavior is not desired, disable the use of the hstore extension by setting ``use_native_hstore`` to ``False`` as follows:: engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test", use_native_hstore=False) The ``HSTORE`` type is **still supported** when the ``psycopg2.extensions.register_hstore()`` extension is not used. It merely means that the coercion between Python dictionaries and the HSTORE string format, on both the parameter side and the result side, will take place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2`` which may be more performant. """ from __future__ import absolute_import import re import logging from ... import util, exc import decimal from ... import processors from ...engine import result as _result from ...sql import expression from ... import types as sqltypes from .base import PGDialect, PGCompiler, \ PGIdentifierPreparer, PGExecutionContext, \ ENUM, ARRAY, _DECIMAL_TYPES, _FLOAT_TYPES,\ _INT_TYPES from .hstore import HSTORE from .json import JSON logger = logging.getLogger('sqlalchemy.dialects.postgresql') class _PGNumeric(sqltypes.Numeric): def bind_processor(self, dialect): return None def result_processor(self, dialect, coltype): if self.asdecimal: if coltype in _FLOAT_TYPES: return processors.to_decimal_processor_factory( decimal.Decimal, self._effective_decimal_return_scale) elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: # pg8000 returns Decimal natively for 1700 return None else: raise exc.InvalidRequestError( "Unknown PG numeric type: %d" % coltype) else: if coltype in _FLOAT_TYPES: # pg8000 returns float natively for 701 return None elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: return processors.to_float else: raise exc.InvalidRequestError( "Unknown PG numeric type: %d" % coltype) class _PGEnum(ENUM): def result_processor(self, dialect, coltype): if util.py2k and self.convert_unicode is True: # we can't easily use PG's extensions here because # the OID is on the fly, and we need to give it a python # function anyway - not really worth it. self.convert_unicode = "force_nocheck" return super(_PGEnum, self).result_processor(dialect, coltype) class _PGHStore(HSTORE): def bind_processor(self, dialect): if dialect._has_native_hstore: return None else: return super(_PGHStore, self).bind_processor(dialect) def result_processor(self, dialect, coltype): if dialect._has_native_hstore: return None else: return super(_PGHStore, self).result_processor(dialect, coltype) class _PGJSON(JSON): def result_processor(self, dialect, coltype): if dialect._has_native_json: return None else: return super(_PGJSON, self).result_processor(dialect, coltype) # When we're handed literal SQL, ensure it's a SELECT query. Since # 8.3, combining cursors and "FOR UPDATE" has been fine. SERVER_SIDE_CURSOR_RE = re.compile( r'\s*SELECT', re.I | re.UNICODE) _server_side_id = util.counter() class PGExecutionContext_psycopg2(PGExecutionContext): def create_cursor(self): # TODO: coverage for server side cursors + select.for_update() if self.dialect.server_side_cursors: is_server_side = \ self.execution_options.get('stream_results', True) and ( (self.compiled and isinstance(self.compiled.statement, expression.Selectable) or ( (not self.compiled or isinstance(self.compiled.statement, expression.TextClause)) and self.statement and SERVER_SIDE_CURSOR_RE.match( self.statement)) ) ) else: is_server_side = \ self.execution_options.get('stream_results', False) self.__is_server_side = is_server_side if is_server_side: # use server-side cursors: # http://lists.initd.org/pipermail/psycopg/2007-January/005251.html ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:]) return self._dbapi_connection.cursor(ident) else: return self._dbapi_connection.cursor() def get_result_proxy(self): # TODO: ouch if logger.isEnabledFor(logging.INFO): self._log_notices(self.cursor) if self.__is_server_side: return _result.BufferedRowResultProxy(self) else: return _result.ResultProxy(self) def _log_notices(self, cursor): for notice in cursor.connection.notices: # NOTICE messages have a # newline character at the end logger.info(notice.rstrip()) cursor.connection.notices[:] = [] class PGCompiler_psycopg2(PGCompiler): def visit_mod_binary(self, binary, operator, **kw): return self.process(binary.left, **kw) + " %% " + \ self.process(binary.right, **kw) def post_process_text(self, text): return text.replace('%', '%%') class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer): def _escape_identifier(self, value): value = value.replace(self.escape_quote, self.escape_to_quote) return value.replace('%', '%%') class PGDialect_psycopg2(PGDialect): driver = 'psycopg2' if util.py2k: supports_unicode_statements = False default_paramstyle = 'pyformat' # set to true based on psycopg2 version supports_sane_multi_rowcount = False execution_ctx_cls = PGExecutionContext_psycopg2 statement_compiler = PGCompiler_psycopg2 preparer = PGIdentifierPreparer_psycopg2 psycopg2_version = (0, 0) _has_native_hstore = False _has_native_json = False colspecs = util.update_copy( PGDialect.colspecs, { sqltypes.Numeric: _PGNumeric, ENUM: _PGEnum, # needs force_unicode sqltypes.Enum: _PGEnum, # needs force_unicode HSTORE: _PGHStore, JSON: _PGJSON } ) def __init__(self, server_side_cursors=False, use_native_unicode=True, client_encoding=None, use_native_hstore=True, **kwargs): PGDialect.__init__(self, **kwargs) self.server_side_cursors = server_side_cursors self.use_native_unicode = use_native_unicode self.use_native_hstore = use_native_hstore self.supports_unicode_binds = use_native_unicode self.client_encoding = client_encoding if self.dbapi and hasattr(self.dbapi, '__version__'): m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?', self.dbapi.__version__) if m: self.psycopg2_version = tuple( int(x) for x in m.group(1, 2, 3) if x is not None) def initialize(self, connection): super(PGDialect_psycopg2, self).initialize(connection) self._has_native_hstore = self.use_native_hstore and \ self._hstore_oids(connection.connection) \ is not None self._has_native_json = self.psycopg2_version >= (2, 5) # http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9 self.supports_sane_multi_rowcount = self.psycopg2_version >= (2, 0, 9) @classmethod def dbapi(cls): import psycopg2 return psycopg2 @util.memoized_property def _isolation_lookup(self): from psycopg2 import extensions return { 'AUTOCOMMIT': extensions.ISOLATION_LEVEL_AUTOCOMMIT, 'READ COMMITTED': extensions.ISOLATION_LEVEL_READ_COMMITTED, 'READ UNCOMMITTED': extensions.ISOLATION_LEVEL_READ_UNCOMMITTED, 'REPEATABLE READ': extensions.ISOLATION_LEVEL_REPEATABLE_READ, 'SERIALIZABLE': extensions.ISOLATION_LEVEL_SERIALIZABLE } def set_isolation_level(self, connection, level): try: level = self._isolation_lookup[level.replace('_', ' ')] except KeyError: raise exc.ArgumentError( "Invalid value '%s' for isolation_level. " "Valid isolation levels for %s are %s" % (level, self.name, ", ".join(self._isolation_lookup)) ) connection.set_isolation_level(level) def on_connect(self): from psycopg2 import extras, extensions fns = [] if self.client_encoding is not None: def on_connect(conn): conn.set_client_encoding(self.client_encoding) fns.append(on_connect) if self.isolation_level is not None: def on_connect(conn): self.set_isolation_level(conn, self.isolation_level) fns.append(on_connect) if self.dbapi and self.use_native_unicode: def on_connect(conn): extensions.register_type(extensions.UNICODE, conn) extensions.register_type(extensions.UNICODEARRAY, conn) fns.append(on_connect) if self.dbapi and self.use_native_hstore: def on_connect(conn): hstore_oids = self._hstore_oids(conn) if hstore_oids is not None: oid, array_oid = hstore_oids if util.py2k: extras.register_hstore(conn, oid=oid, array_oid=array_oid, unicode=True) else: extras.register_hstore(conn, oid=oid, array_oid=array_oid) fns.append(on_connect) if self.dbapi and self._json_deserializer: def on_connect(conn): extras.register_default_json( conn, loads=self._json_deserializer) fns.append(on_connect) if fns: def on_connect(conn): for fn in fns: fn(conn) return on_connect else: return None @util.memoized_instancemethod def _hstore_oids(self, conn): if self.psycopg2_version >= (2, 4): from psycopg2 import extras oids = extras.HstoreAdapter.get_oids(conn) if oids is not None and oids[0]: return oids[0:2] return None def create_connect_args(self, url): opts = url.translate_connect_args(username='user') if 'port' in opts: opts['port'] = int(opts['port']) opts.update(url.query) return ([], opts) def is_disconnect(self, e, connection, cursor): if isinstance(e, self.dbapi.Error): # check the "closed" flag. this might not be # present on old psycopg2 versions. Also, # this flag doesn't actually help in a lot of disconnect # situations, so don't rely on it. if getattr(connection, 'closed', False): return True # checks based on strings. in the case that .closed # didn't cut it, fall back onto these. str_e = str(e).partition("\n")[0] for msg in [ # these error messages from libpq: interfaces/libpq/fe-misc.c # and interfaces/libpq/fe-secure.c. 'terminating connection', 'closed the connection', 'connection not open', 'could not receive data from server', 'could not send data to server', # psycopg2 client errors, psycopg2/conenction.h, # psycopg2/cursor.h 'connection already closed', 'cursor already closed', # not sure where this path is originally from, it may # be obsolete. It really says "losed", not "closed". 'losed the connection unexpectedly', # these can occur in newer SSL 'connection has been closed unexpectedly', 'SSL SYSCALL error: Bad file descriptor', 'SSL SYSCALL error: EOF detected', ]: idx = str_e.find(msg) if idx >= 0 and '"' not in str_e[:idx]: return True return False dialect = PGDialect_psycopg2
bsd-3-clause
gadru/home-media-control-center
Interfaces/Kodi.py
1
4732
#!/usr/bin/python from xbmcjson import XBMC, PLAYER_VIDEO import socket import struct import sys class Kodi: def __init__(self,hostname,mac_address=None,user='xbmc',password='xbmc'): self.hostname = hostname.upper() self.mac = mac_address self._username = user self._password = password self.connected = False self.kodi = None self.connect() def _get_info_booleans(self,boolean_name): returned = self.kodi.xbmc.GetInfoBooleans(booleans=[boolean_name]) return returned.get('result',dict()).get(boolean_name) def _execute_action(self,action_name): returned = self.kodi.Input.ExecuteAction(action=action_name) result = returned.get('result') return result == u'OK' def ping(self): return self.kodi.JSONRPC.Ping()['result']=='pong' def is_up(self): try: res = self.ping() except: res = False self.connected = res return res def connect(self): address = "http://%s/jsonrpc"%self.hostname self.kodi = XBMC(address,self._username,self._password) return self.is_up() def wake_up(self): if self.mac is None: return None macaddress = self.mac # Check macaddress format and try to compensate. if len(macaddress) == 12: pass elif len(macaddress) == 12 + 5: sep = macaddress[2] macaddress = macaddress.replace(sep, '') else: raise ValueError('Incorrect MAC address format') macaddress = macaddress.upper() # Pad the synchronization stream. data = ''.join(['FFFFFFFFFFFF', macaddress * 20]) send_data = '' # Split up the hex values and pack. for i in range(0, len(data), 2): send_data = ''.join([send_data, struct.pack('B', int(data[i: i + 2], 16))]) # Broadcast it to the LAN. sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) sock.sendto(send_data, ('<broadcast>', 7)) def suspend(self): if self.connected: self.kodi.System.Suspend() def toggle_suspend(self): if self.is_up(): self.suspend() return "off" else: self.wakeUp() return "on" def reboot(self): if self.connected: self.kodi.System.Reboot() def play_pause(self): if self.connected: self.kodi.Player.PlayPause([PLAYER_VIDEO]) def step_forward(self): self._execute_action("stepforward") def step_back(self): self._execute_action("stepback") def big_step_forward(self): self._execute_action("bigstepforward") def big_step_back(self): self._execute_action("bigstepback") def small_step_back(self): self._execute_action("smallstepback") def stop(self): if self.connected: self.kodi.Player.Stop([PLAYER_VIDEO]) def mute(self): if self.connected: self.kodi.Application.SetMute({"mute":True}) def unmute(self): if self.connected: self.kodi.Application.SetMute({"mute":False}) def notify(self,title,message): if self.connected: self.kodi.GUI.ShowNotification({"title":title, "message":message}) def nav_left(self): self.kodi.Input.Left() def nav_right(self): self.kodi.Input.Right() def nav_up(self): self.kodi.Input.Up() def nav_down(self): self.kodi.Input.Up() def nav_down(self): self.kodi.Input.Up() def nav_back(self): self.kodi.Input.Back() def nav_select(self): self.kodi.Input.Select() def nav_context_menu(self): self.kodi.Input.ContextMenu() def nav_info(self): self.kodi.Input.Info() def nav_home(self): self.kodi.Input.Home() def addon_enable(self,addonid,enable=True): data = {"addonid":addonid,'enabled':enable} self.kodi.Addons.SetAddonEnabled(data) def addon_disable(self,addonid): self.addon_enable(addonid,False) def volume_set(self,volume_normalized): if not ((type(volume_normalized)==float)and(0<=volume_normalized<=1)): raise ValueError("Must be a number between 0 and 1") level_percent = round(100*volume_normalized) self.kodi.Application.SetVolume({'volume':level_percent}) def play_url(self,url): self.kodi.Player.Open(item={"file": url}) def is_screensaver_on(self): return self._get_info_booleans("System.ScreenSaverActive")
mit
SpamExperts/SpamPAD
oa/plugins/header_eval.py
2
31692
"""Expose some eval rules that do checks on the headers.""" from __future__ import absolute_import from __future__ import division import re import time import datetime import itertools import email.utils import email.header from dateutil import relativedelta import oa.locales import oa.plugins.base from oa.received_parser import IP_ADDRESS from oa.regex import Regex class HeaderEval(oa.plugins.base.BasePlugin): hotmail_addr_with_forged_hotmail_received = 0 hotmail_addr_but_no_hotmail_received = 0 tocc_sorted_count = 7 tocc_similar_count = 5 tocc_similar_length = 2 eval_rules = ( "check_for_fake_aol_relay_in_rcvd", "check_for_faraway_charset_in_headers", "check_for_unique_subject_id", "check_illegal_chars", "check_for_forged_hotmail_received_headers", "check_for_no_hotmail_received_headers", "check_for_msn_groups_headers", "check_for_forged_eudoramail_received_headers", "check_for_forged_yahoo_received_headers", "check_for_forged_juno_received_headers", "check_for_matching_env_and_hdr_from", "sorted_recipients", "similar_recipients", "check_for_missing_to_header", "check_for_forged_gw05_received_headers", "check_for_shifted_date", "subject_is_all_caps", "check_for_to_in_subject", "check_outlook_message_id", "check_messageid_not_usable", "check_header_count_range", "check_unresolved_template", "check_ratware_name_id", "check_ratware_envelope_from", "gated_through_received_hdr_remover", "check_equal_from_domains", "received_within_months" ) options = { "util_rb_tld": ("append_split", []), "util_rb_2tld": ("append_split", []), "util_rb_3tld": ("append_split", []), } def check_for_fake_aol_relay_in_rcvd(self, msg, target=None): """Check for common AOL fake received header.""" for recv in msg.get_decoded_header("Received"): if not Regex(r" rly-[a-z][a-z]\d\d\.", re.I).search(recv): continue if Regex(r"\/AOL-\d+\.\d+\.\d+\)").search(recv): continue if Regex(r"ESMTP id (?:RELAY|MAILRELAY|MAILIN)").search(recv): continue return True return False def check_for_faraway_charset_in_headers(self, msg, target=None): """Check if the Subject/From header is in a NOT ok locale. This eval rule requires the ok_locales setting configured, and not set to ALL. """ ok_locales = self.ctxt.conf.get_global("ok_locales") if not ok_locales or ok_locales.lower() == "all": return False ok_locales = ok_locales.split() # XXX We should really be checking ALL headers here, # XXX not just Subject and From. for header_name in ("Subject", "From"): for header in msg.get_raw_header(header_name): try: decoded_header = email.header.decode_header(header) except (ValueError, email.header.HeaderParseError): continue for value, charset in decoded_header: if not oa.locales.charset_ok_for_locales( charset, ok_locales): return True return False def check_for_unique_subject_id(self, msg, target=None): """Check if in subject appears an unique id""" subject = "".join(msg.get_decoded_header("Subject")) id = None unique_id_re_list = [ r"[-_\.\s]{7,}([-a-z0-9]{4,})$", r"\s{10,}(?:\S\s)?(\S+)$", r"\s{3,}[-:\#\(\[]+([-a-z0-9]{4,})[\]\)]+$", r"\s{3,}[-:\#]([a-z0-9]{5,})$", r"[\s._]{3,}([^0\s._]\d{3,})$", r"[\s._]{3,}\[(\S+)\]$", # (7217vPhZ0-478TLdy5829qicU9-0@26) and similar r"\(([-\w]{7,}\@\d+)\)$", r"\b(\d{7,})\s*$", # stuff at end of line after "!" or "?" is usually an id r"[!\?]\s*(\d{4,}|\w+(-\w+)+)\s*$", # 9095IPZK7-095wsvp8715rJgY8-286-28 and similar # excluding 'Re:', etc and the first word r"(?:\w{2,3}:\s)?\w+\s+(\w{7,}-\w{7,}(-\w+)*)\s*$", # #30D7 and similar r"\s#\s*([a-f0-9]{4,})\s*$" ] for rgx in unique_id_re_list: match = Regex(rgx, re.I).search(subject) if match: id = match.group() break if not id: return False comercial_re = Regex(r"(?:item|invoice|order|number|confirmation)" r".{1,6}%s\s*$" % id, re.X | re.I) if Regex(r"\d{5,}").search(id) and comercial_re.search(subject): return False return True def check_illegal_chars(self, msg, header, ratio, count, target=None): """look for 8-bit and other illegal characters that should be MIME encoded, these might want to exempt languages that do not use Latin-based alphabets, but only if the user wants it that way """ try: ratio = float(ratio) except ValueError: self.ctxt.log.warn("HeaderEval::Plugin check_illegal_chars " "invalid option: %s", ratio) return False try: count = int(count) except ValueError: self.ctxt.log.warn("HeaderEval::Plugin check_illegal_chars " "invalid option: %s", count) return False if header == 'ALL': raw_headers = msg.raw_headers key_headers = [] for keys in raw_headers.keys(): key_headers.append(keys) for key in key_headers: if key.lower() in ("subject", "from"): try: del raw_headers[key] except KeyError: pass else: raw_headers = {header: msg.get_raw_header(header)} # count illegal substrings (RFC 2045) # (non-ASCII + C0 controls except TAB, NL, CR) raw_str = ''.join([''.join(value) for value in raw_headers.values()]) try: raw_str = raw_str.decode("utf-8") except AttributeError: # in Python 3 all string is unicode object pass clean_hdr = ''.join([i if ord(i) < 128 else '' for i in raw_str]) illegal = len(raw_str) - len(clean_hdr) if illegal > 0 and header.lower() == "subject": exempt = 0 # only exempt a single cent sign, pound sign, or registered sign for except_chr in (u'\xa2', u'\xa3', u'\xae'): if except_chr in raw_str: exempt += 1 if exempt == 1: illegal -= exempt if raw_str: return (illegal / len(raw_str)) >= ratio and illegal >= count else: return False def check_for_forged_hotmail_received_headers(self, msg, target=None): """Check for forged hotmail received headers""" self._check_for_forged_hotmail_received_headers(msg) return self.hotmail_addr_with_forged_hotmail_received def check_for_no_hotmail_received_headers(self, msg, target=None): """Check for no hotmail received headers""" self._check_for_forged_hotmail_received_headers(msg) return self.hotmail_addr_but_no_hotmail_received def _check_for_forged_hotmail_received_headers(self, msg): self.hotmail_addr_but_no_hotmail_received = 0 self.hotmail_addr_with_forged_hotmail_received = 0 rcvd = msg.msg.get("Received") if not rcvd: return False pickup_service_regex = Regex(r"from mail pickup service by hotmail" r"\.com with Microsoft SMTPSVC;") if pickup_service_regex.search(rcvd): return False if self.check_for_msn_groups_headers(msg): return False ip_header = msg.msg.get("X-ORIGINATING-IP") if ip_header and IP_ADDRESS.search(ip_header): FORGED_REGEX = Regex( r"from\s+(?:\S*\.)?hotmail.com\s+\(\S+\.hotmail(" r"?:\.msn)?\.com[\)]|" r"from\s+\S*\.hotmail\.com\s+\(\[{IP_ADDRESS}\]|" r"from\s+\S+\s+by\s+\S+\.hotmail(?:\.msn)?\.com\s+with\s+ " r"HTTP\;|" r"from\s+\[66\.218.\S+\]\s+by\s+\S+\.yahoo\.com" r"".format(IP_ADDRESS=IP_ADDRESS.pattern), re.I | re.X) if FORGED_REGEX.search(rcvd): return False if self.gated_through_received_hdr_remover(msg): return False helo_hotmail_regex = Regex(r"(?:from |HELO |helo=)\S*hotmail\.com\b") if helo_hotmail_regex.search(rcvd): self.hotmail_addr_with_forged_hotmail_received = 1 else: from_address = msg.msg.get("From") if not from_address: from_address = "" if "hotmail.com" not in from_address: return False self.hotmail_addr_but_no_hotmail_received = 1 def check_for_msn_groups_headers(self, msg, target=None): """Check if the email's destination is a msn group""" to = ''.join(msg.get_decoded_header('To')) if not Regex(r"<(\S+)\@groups\.msn\.com>").search(to): return False listname = Regex(r"<(\S+)\@groups\.msn\.com>").match(to).groups()[0] server_rgx = Regex(r"from mail pickup service by " r"((?:p\d\d\.)groups\.msn\.com)\b") server = '' for rcvd in msg.get_decoded_header('Received'): if server_rgx.search(rcvd): server = server_rgx.search(rcvd).groups()[0] break if not server: return False message_id = ''.join(msg.get_decoded_header('Message-Id')) if listname == "notifications": if not Regex(r"^<\S+\@{0}".format(server)).search(message_id): return False else: msn_addr = Regex(r"^<{0}-\S+\@groups\.msn\.com>".format(listname)) if not msn_addr.search(message_id): return False msn_addr = "{0}-bounce@groups.msn.com".format(listname) if msg.sender_address != msn_addr: return False return True def check_for_forged_eudoramail_received_headers(self, msg, target=None): """Check if the email has forged eudoramail received header""" from_addr = ''.join(msg.get_all_addr_header("From")) if from_addr.rsplit("@", 1)[-1] != "eudoramail.com": return False rcvd = ''.join(msg.get_decoded_header("Received")) ip = ''.join(msg.get_decoded_header("X-Sender-Ip")) if ip and IP_ADDRESS.search(ip): ip = True else: ip = False if self.gated_through_received_hdr_remover(msg): return False if Regex(r"by \S*whowhere.com\;").search(rcvd) and ip: return False return True def check_for_forged_yahoo_received_headers(self, msg, target=None): """Check for forged yahoo received headers""" from_addr = ''.join(msg.get_all_addr_header("From")) rcvd = ''.join(msg.get_decoded_header("Received")) if "yahoo.com" not in from_addr: return False if (msg.get_decoded_header("Resent-From") and msg.get_decoded_header("Resent-To")): xrcvd = ''.join(msg.get_decoded_header("X-Received")) rcvd = xrcvd if xrcvd else rcvd if self.gated_through_received_hdr_remover(msg): return False for relay in msg.untrusted_relays + msg.trusted_relays: rdns = relay.get("rdns") if rdns and "yahoo.com" in rdns: return False if Regex(r"by web\S+\.mail\S*\.yahoo\.com via HTTP").search(rcvd): return False if Regex(r"by smtp\S+\.yahoo\.com with SMTP").search(rcvd): return False yahoo_ip_re = Regex( r"from\s+\[{}\]\s+by\s+\S+\." r"(?:groups|scd|dcn)\.yahoo\.com\s+with\s+NNFMP".format( IP_ADDRESS.pattern), re.X) if yahoo_ip_re.search(rcvd): return False if (Regex(r"\bmailer\d+\.bulk\.scd\.yahoo\.com\b").search(rcvd) and from_addr.rsplit("@", 1)[-1] == "reply.yahoo.com"): return False if Regex("by \w+\.\w+\.yahoo\.com \(\d+\.\d+\.\d+\/\d+\.\d+\.\d+\)" "(?: with ESMTP)? id \w+").search(rcvd): return False return True def check_for_forged_juno_received_headers(self, msg, target=None): from_addr = ''.join(msg.get_all_addr_header("From")) if not from_addr.rsplit("@", 1)[-1].endswith("juno.com"): return False if self.gated_through_received_hdr_remover(msg): return False xorig = ''.join(msg.get_decoded_header("X-Originating-IP")) xmailer = ''.join(msg.get_decoded_header("X-Mailer")) rcvd = ''.join(msg.get_decoded_header("Received")) if xorig != "": juno_re = Regex(r"from.*\b(?:juno|untd)\.com.*" r"[\[\(]{0}[\]\)].*by".format(IP_ADDRESS.pattern), re.X) cookie_re = Regex(r" cookie\.(?:juno|untd)\.com ") if not juno_re.search(rcvd) and not cookie_re.search(rcvd): return True if "Juno " not in xmailer: return True else: mail_com_re = Regex(r"from.*\bmail\.com.*\[{}\].*by".format( IP_ADDRESS.pattern), re.X) untd_com_re = Regex(r"from\s+(webmail\S+\.untd" r"\.com)\s+\(\1\s+\[{}\]\)\s+by".format( IP_ADDRESS.pattern), re.X) if mail_com_re.search(rcvd) and not Regex(r"\bmail\.com").search( xmailer): return True elif untd_com_re.search(rcvd) and not Regex( r"^Webmail Version \d").search(xmailer): return True else: return True return False def check_for_matching_env_and_hdr_from(self, msg, target=None): from_addr = ''.join(msg.get_all_addr_header("From")) envfrom = "" for relay in msg.trusted_relays + msg.untrusted_relays: if relay.get('envfrom'): envfrom = relay.get('envfrom') break return envfrom in from_addr def _parse_rcpt(self, addr): user = addr[:self.tocc_similar_count] try: fqhn = addr.rsplit("@", 1)[1] except IndexError: fqhn = addr host = fqhn[:self.tocc_similar_length] return user, fqhn, host def _check_recipients(self, msg): """Check for similar recipients addresses. Return the ratio of possibly similar recipient of the total number of possible combinations. """ try: return self.get_local(msg, "tocc_similar") except KeyError: pass recipients = [] for header_name in ("To", "Cc", "Bcc", "ToCc"): recipients.extend(msg.get_all_addr_header(header_name)) if not recipients: self.set_local(msg, "tocc_similar", 0) self.set_local(msg, "tocc_sorted", False) return sorted_recipients = sorted(recipients) self.set_local(msg, "tocc_sorted", sorted_recipients == recipients) # Remove dupes IF they are next to each other addresses = [recipients[0]] for rcpt1, rcpt2 in zip(recipients, recipients[1:]): if rcpt1 == rcpt2: continue addresses.append(rcpt2) if len(addresses) < self.tocc_similar_count: self.set_local(msg, "tocc_similar", 0) return hits = 0 combinations = 0 for rcpt1, rcpt2 in itertools.combinations(addresses, 2): user1, fqhn1, host1 = self._parse_rcpt(rcpt1) user2, fqhn2, host2 = self._parse_rcpt(rcpt2) combinations += 1 if user1 == user2: hits += 1 if host1 == host2 and fqhn1 != fqhn2: hits += 1 ratio = hits / combinations self.set_local(msg, "tocc_similar", ratio) return ratio def sorted_recipients(self, msg, target=None): """Matches if the recipients are sorted""" self._check_recipients(msg) return self.get_local(msg, "tocc_sorted") def similar_recipients(self, msg, minr=None, maxr=None, target=None): """Matches if the similar recipients ratio is in the specified . :param minr: The minimum for the ratio :param maxr: The maximum for the ratio """ ratio = self._check_recipients(msg) try: return ( (minr is None or float(minr) < ratio) and (maxr is None or ratio < float(maxr)) ) except (TypeError, ValueError): return False def check_for_missing_to_header(self, msg, target=None): """Check if the To header is missing.""" if msg.get_raw_header("To"): return False if msg.get_raw_header("Apparently-To"): return False return True def check_for_forged_gw05_received_headers(self, msg, target=None): gw05_re = Regex(r"from\s(\S+)\sby\s(\S+)\swith\sESMTP\;\s+\S\S\S," r"\s+\d+\s+\S\S\S\s+\d{4}\s+\d\d:\d\d:\d\d\s+[-+]*" r"\d{4}", re.X | re.I) for rcv in msg.get_decoded_header("Received"): h1 = "" h2 = "" try: match = gw05_re.match(rcv) if match: h1, h2 = match.groups() if h1 and h2 and h2 != ".": return True except IndexError: continue return False def _parse_rfc822_date(self, date): try: parsed_date = email.utils.parsedate(date) time_in_seconds = time.mktime(parsed_date) date_time = datetime.datetime.utcfromtimestamp( time_in_seconds) except ValueError: return return date_time def _get_date_header_time(self, msg): try: return self.get_local(msg, "date_header_time") except KeyError: pass date_time = None for header in ["Resent-Date", "Date"]: dates = msg.get_decoded_header(header) for date in dates: date_time = self._parse_rfc822_date(date) if date_time: break if date_time: self.set_local(msg, "date_header_time", date_time) else: self.set_local(msg, "date_header_time", -1) return date_time def _get_received_header_times(self, msg): try: return self.get_local(msg, "received_header_times") except KeyError: pass received_times = [] self.set_local(msg, "received_header_times", received_times) date_re = Regex(r"(\s.?\d+ \S\S\S \d+ \d+:\d+:\d+ \S+)") for rcvd in msg.get_decoded_header("Received"): try: date = date_re.search(rcvd).group() except (AttributeError, TypeError): date = None if not date: continue self.ctxt.log.debug("eval: trying Received header date for " "real time: %s", date) received_time = self._parse_rfc822_date(date) if received_time: received_times.append(received_time) return received_times def _check_date_diff(self, msg): try: return self.get_local(msg, "date_diff") except KeyError: pass self.set_local(msg, "date_diff", datetime.timedelta(0)) date_header_time = self._get_date_header_time(msg) received_header_times = self._get_received_header_times(msg) diffs = [] for received_header_time in received_header_times: diff = abs(received_header_time - date_header_time) if diff: diffs.append(diff) try: date_diff = sorted(diffs)[0] self.set_local(msg, "date_diff", date_diff) return date_diff except IndexError: self.set_local(msg, "date_diff", datetime.timedelta(0)) return datetime.timedelta(0) def subject_is_all_caps(self, msg, target=None): """Checks if the subject is all capital letters. This eval rule ignore short subjects, one word subject and the prepended notations. (E.g. ``Re:``) """ for subject in msg.get_decoded_header("Subject"): # Remove the Re/Fwd notations in the subject subject = Regex(r"^(Re|Fwd|Fw|Aw|Antwort|Sv):", re.I).sub("", subject) subject = subject.strip() if len(subject) < 10: # Don't match short subjects continue if len(subject.split()) == 1: # Don't match one word subjects continue if subject.isupper(): return True return False def check_for_to_in_subject(self, msg, test, target=None): """ Check if to address is in Subject field. If it is called with 'address', check if full address is in subject, else if the parameter is 'user', then check if user name is in subject. """ full_to = msg.get_all_addr_header('To') if not full_to: return False subject = msg.msg.get('Subject', "") for to in full_to: if test == "address": subject_regex = Regex(r".*" + re.escape(to) + r".*", re.I) if subject_regex.search(subject): return True elif test == "user": regex = re.match("(\S+)@.*", to) if regex: to = regex.group(1) if Regex(r"^" + re.escape(to) + "$").search(subject): return True if Regex(r"(?:re|fw):\s*(?:\w+\s+)?" + re.escape(to) + "$")\ .search(subject): return True if Regex(r"\s*" + re.escape(to) + "[,:;!?-]$")\ .search(subject): return True if Regex(r"^" + re.escape(to) + "\s*[,:;!?-](\s).*")\ .search(subject): return True return False def check_outlook_message_id(self, msg, target=None): message_id = msg.msg.get("Message-ID") if not message_id: return msg_regex = Regex(r"^<[0-9a-f]{4}([0-9a-f]{8})\$[0-9a-f]{8}\$[" r"0-9a-f]{8}\@") regex = msg_regex.search(message_id) if not regex: return False timetocken = int(regex.group(1), 16) date = msg.msg.get("Date") x = 0.0023283064365387 y = 27111902.8329849 mail_date = time.mktime(email.utils.parsedate(date)) expected = int((mail_date * x) + y) if abs(timetocken - expected) < 250: return False received = msg.msg.get("Received") received_regex = Regex(r"(\s.?\d+ \S\S\S \d+ \d+:\d+:\d+ \S+).*?$") regex = received_regex.search(received) received_date = 0 if regex: received_date = time.mktime(email.utils.parsedate(regex.group())) expected = int((received_date * x) + y) return abs(timetocken - expected) >= 250 def check_messageid_not_usable(self, msg, target=None): list_unsubscribe = msg.msg.get("List-Unsubscribe") if list_unsubscribe: if Regex(r"<mailto:(?:leave-\S+|\S+-unsubscribe)\@\S+>$").search( list_unsubscribe): return True if self.gated_through_received_hdr_remover(msg): return True received = msg.msg.get("Received") if Regex(r"/CWT/DCE\)").search(received): return True if Regex(r"iPlanet Messaging Server").search(received): return True return False def check_header_count_range(self, msg, header, minr, maxr, target=None): """Check if the count of the header is withing the given range. The range is inclusive in both ranges. :param header: the header name :param minr: the minimum number of headers with the same name :param maxr: the minimum number of headers with the same name :return: True if the header count is withing the range. """ raw_headers = set(msg.get_raw_header(header)) return int(minr) <= len(raw_headers) <= int(maxr) def check_unresolved_template(self, msg, target=None): message = msg.raw_msg headers = message.split("\n") for header in headers: if Regex(r"%[A-Z][A-Z_-]").search(header) and not \ Regex(r"^(?:x-vms-to|x-uidl|x-face|to|cc|from|subject|" r"references|in-reply-to|(?:x-|resent-|" r"x-original-)?message-id):").search(header.lower()): return True return False def check_ratware_name_id(self, msg, target=None): """Check if message-id is ratware or not.""" message_id = msg.msg.get("Message-Id") from_header = msg.msg.get("From") if not message_id and not from_header: return False regex = Regex(r"<[A-Z]{28}\.([^>]+?)>").search(message_id) if regex: if Regex(r"\"[^\"]+\"\s*<" + regex.group(1) + ">").search( from_header): return True return False def check_in_TL_TLDS(self, address): if address in self["util_rb_tld"]: return True if address in self["util_rb_2tld"]: return True if address in self["util_rb_3tld"]: return True return False def is_domain_valid(self, domain): domain = domain.lower() if " " in domain: return False parts = domain.split(".") if len(parts) <= 1: return False elif not self.check_in_TL_TLDS(".".join(parts[1:])): return False return True def check_ratware_envelope_from(self, msg, target=None): """Check if envelope-from address is ratware or not.""" to_header = msg.msg.get("To") envelope_from = msg.sender_address if not to_header or not envelope_from: return False if Regex(r"^SRS\d=").search(envelope_from): return False regex = Regex(r"^([^@]+)@(.+)$").search(to_header) if regex: user = regex.group(1) dom = regex.group(2) if not self.is_domain_valid(dom): return False if Regex(r"\b" + dom + "." + user + "@").search(envelope_from): return True return False def gated_through_received_hdr_remover(self, msg, target=None): """Check if the email is gated through ezmlm""" txt = ''.join(msg.get_decoded_header("Mailing-List")) rcvd = ''.join(msg.get_decoded_header("Received")) if Regex(r"^contact \S+\@\S+\; run by ezmlm$").search(txt): dlto = ''.join(msg.get_decoded_header("Delivered-To")) mailing_list_re = Regex(r"^mailing list \S+\@\S+") qmail_re = Regex(r"qmail \d+ invoked (?:from " r"network|by .{3,20})\); \d+ ... \d+") if mailing_list_re.search(dlto) and qmail_re.search(rcvd): return True if not rcvd: return True if Regex(r"from groups\.msn\.com \(\S+\.msn\.com ").search(rcvd): return True return False def check_equal_from_domains(self, msg, target=None): """Check if the domain from `From` header and `EnvelopeFrom` header are different.""" from_addr = ''.join(msg.get_all_addr_header("From")) envfrom = msg.sender_address if not envfrom or not from_addr: return False try: fromdomain = from_addr.rsplit("@", 1)[-1] except (IndexError, AttributeError): return False try: envfromdomain = envfrom.rsplit("@", 1)[-1] except (IndexError, AttributeError): return False self.ctxt.log.debug("eval: From 2nd level domain: %s, " "EnvelopeFrom 2nd level domain: %s", fromdomain, envfromdomain) if envfromdomain.lower() not in fromdomain.lower(): return True return False def _check_date_received(self, msg): try: return self.get_local(msg, "date_received") except KeyError: pass all_times = self._get_received_header_times(msg) + [ self._get_date_header_time(msg) ] median = int(len(all_times)/2) date_received = sorted(all_times, reverse=True)[median] self.set_local(msg, "date_received", date_received) return date_received def received_within_months(self, msg, min, max, target=None): """Check if the date from received is in past""" if min == "undef": min = None if max == "undef": max = None try: if min: min = int(min) if max: max = int(max) except ValueError: self.ctxt.log.warn("HeaderEval::Plugin received_within_months " "min and max should be integer values") return False diff = relativedelta.relativedelta( datetime.datetime.utcnow(), self._check_date_received(msg) ) number_months = (diff.year or 0) * 12 + (diff.months or 0) if ((not min or number_months >= min) and (not max or number_months < max)): return True return False def check_for_shifted_date(self, msg, min=None, max=None, target=None): """Check if the difference between Date header and date from received headers its between min,max interval * min: minimum time express in hours * max: maximum time express in hours """ if min == "undef": min = None if max == "undef": max = None try: if min: min = int(min) if max: max = int(max) except ValueError: self.ctxt.log.warn("HeaderEval::Plugin check_for_shifted_date " "min and max should be integer values") return False diff = int(self._check_date_diff(msg).total_seconds() / 3600) if ((not min or diff >= min) and (not max or diff < max)): return True return False
gpl-2.0
Team-Hydra/android_kernel_samsung_klte
tools/perf/scripts/python/sctop.py
11180
1924
# system call top # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Periodically displays system-wide system call totals, broken down by # syscall. If a [comm] arg is specified, only syscalls called by # [comm] are displayed. If an [interval] arg is specified, the display # will be refreshed every [interval] seconds. The default interval is # 3 seconds. import os, sys, thread, time sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s sctop.py [comm] [interval]\n"; for_comm = None default_interval = 3 interval = default_interval if len(sys.argv) > 3: sys.exit(usage) if len(sys.argv) > 2: for_comm = sys.argv[1] interval = int(sys.argv[2]) elif len(sys.argv) > 1: try: interval = int(sys.argv[1]) except ValueError: for_comm = sys.argv[1] interval = default_interval syscalls = autodict() def trace_begin(): thread.start_new_thread(print_syscall_totals, (interval,)) pass def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def print_syscall_totals(interval): while 1: clear_term() if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): try: print "%-40s %10d\n" % (syscall_name(id), val), except TypeError: pass syscalls.clear() time.sleep(interval)
gpl-2.0
silentsee/blog
www/models.py
1
1629
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = 'xieshaoxin' from transwrap.db import next_id from transwrap.orm import FloatField,Model,StringField,BooleanFiled,IntegerField,TextField import time class User(Model): __table__ = 'user' id = StringField(primary_key=True, default=next_id(), type="varchar(50)") name = StringField(type='varchar(50)') email = StringField(updatable=False, type='varchar(50)') admin = BooleanFiled() passwd = StringField(type="varchar(50)") created_at = FloatField(updatable=False, default=time.time) image = StringField(type='varchar(500)') class Blog(Model): __table__ = 'blog' id = StringField(primary_key=True, default=next_id(), type="varchar(50)") user_id = StringField(updatable=False,type="varchar(50)") user_name = StringField(type='varchar(50)') user_image = StringField(type='varchar(500)') summary = StringField(type='varchar(200)') name = StringField(type="varchar(50)") content = TextField() created_at = FloatField(updatable=False, default=time.time) class Comment(Model): __table__ = 'comment' id = StringField(primary_key=True, default=next_id(), type="varchar(50)") user_id = StringField(updatable=False,type="varchar(50)") user_name = StringField(type='varchar(50)') user_image = StringField(type='varchar(500)') blog_id = StringField(updatable=False,type="varchar(50)") content = TextField() created_at = FloatField(updatable=False, default=time.time) if __name__ == '__main__': print User().__sql__() print Blog.__sql__() print Comment.__sql__()
gpl-2.0
tvall43/Samsung_SM-V700_Kernel
tools/perf/python/twatch.py
3213
1338
#! /usr/bin/python # -*- python -*- # -*- coding: utf-8 -*- # twatch - Experimental use of the perf python interface # Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com> # # This application is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2. # # This application is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. import perf def main(): cpus = perf.cpu_map() threads = perf.thread_map() evsel = perf.evsel(task = 1, comm = 1, mmap = 0, wakeup_events = 1, sample_period = 1, sample_id_all = 1, sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID) evsel.open(cpus = cpus, threads = threads); evlist = perf.evlist(cpus, threads) evlist.add(evsel) evlist.mmap() while True: evlist.poll(timeout = -1) for cpu in cpus: event = evlist.read_on_cpu(cpu) if not event: continue print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu, event.sample_pid, event.sample_tid), print event if __name__ == '__main__': main()
gpl-2.0
rvs/gpdb
gpMgmt/bin/gppylib/system/fileSystemImplOs.py
54
1142
#!/usr/bin/env python # # Copyright (c) Greenplum Inc 2009. All Rights Reserved. # """ This file defines the interface that can be used to fetch and update system configuration information, as well as the data object returned by the """ import os from gppylib.gplog import * from gppylib.utils import checkNotNone from gppylib.system.fileSystemInterface import GpFileSystemProvider from tempfile import NamedTemporaryFile logger = get_default_logger() # # An implementation of GpFileSystemProvider that passes operations through to the underlying # operating system # class GpFileSystemProviderUsingOs(GpFileSystemProvider) : def __init__(self): pass # # Shutdown the file system provider # def destroy(self): pass # # Create a temporary file. Note that this uses python's NamedTemporaryFile which # may not do what we want on windows (that is, on windows, the temporary file # cannot be opened for reading while it is opened for this write # # returns self # def createNamedTemporaryFile( self ) : return NamedTemporaryFile('w', delete=True)
apache-2.0
theheros/kbengine
kbe/res/scripts/common/Lib/turtledemo/paint.py
3
1177
#!/usr/bin/env python3 """ turtle-example-suite: tdemo_paint.py A simple eventdriven paint program - use left mouse button to move turtle - middle mouse button to change color - right mouse button do turn filling on/off ------------------------------------------- Play around by clicking into the canvas using all three mouse buttons. ------------------------------------------- To exit press STOP button ------------------------------------------- """ from turtle import * def switchupdown(x=0, y=0): if pen()["pendown"]: end_fill() up() else: down() begin_fill() def changecolor(x=0, y=0): global colors colors = colors[1:]+colors[:1] color(colors[0]) def main(): global colors shape("circle") resizemode("user") shapesize(.5) width(3) colors=["red", "green", "blue", "yellow"] color(colors[0]) switchupdown() onscreenclick(goto,1) onscreenclick(changecolor,2) onscreenclick(switchupdown,3) return "EVENTLOOP" if __name__ == "__main__": msg = main() print(msg) mainloop()
lgpl-3.0
msleal/amspy
amspy/examples/analytics/video_thumbnail/video_thumbnail.py
2
14588
""" Copyright (c) 2016, John Deutscher Description: Sample Python script for Video Thumbnail/Summarization processor License: MIT (see LICENSE.txt file for details) Documentation : https://azure.microsoft.com/en-us/documentation/articles/media-services-video-optical-character-recognition/ """ import os import json import amspy import time import sys #import pytz import urllib import logging import datetime from azure import * from azure.storage.blob import BlockBlobService from azure.storage.blob import ContentSettings ########################################################################################### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ########################################################################################### # ALL CODE IN THIS DIRECTOY (INCLUDING THIS FILE) ARE EXAMPLE CODES THAT WILL ACT ON YOUR # AMS ACCOUNT. IT ASSUMES THAT THE AMS ACCOUNT IS CLEAN (e.g.: BRAND NEW), WITH NO DATA OR # PRODUCTION CODE ON IT. DO NOT, AGAIN: DO NOT RUN ANY EXAMPLE CODE AGAINST PRODUCTION AMS # ACCOUNT! IF YOU RUN ANY EXAMPLE CODE AGAINST YOUR PRODUCTION AMS ACCOUNT, YOU CAN LOSE # DATA, AND/OR PUT YOUR AMS SERVICES IN A DEGRADED OR UNAVAILABLE STATE. BE WARNED! ########################################################################################### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ########################################################################################### # Load Azure app defaults try: with open('../../config.json') as configFile: configData = json.load(configFile) except FileNotFoundError: print_phase_message("ERROR: Expecting config.json in examples folder") sys.exit() account_name = configData['accountName'] account_key = configData['accountKey'] sto_account_name = configData['sto_accountName'] sto_accountKey = configData['sto_accountKey'] log_name = configData['logName'] log_level = configData['logLevel'] purge_log = configData['purgeLog'] #Initialization... print ("\n-----------------------= AMS Py =----------------------") print ("Azure Media Analytics - Video Thumbnail/Summary Sample") print ("for details see: https://azure.microsoft.com/en-us/documentation/articles/media-services-video-summarization/") print ("-------------------------------------------------------\n") #Remove old log file if requested (default behavior)... if (os.path.isdir('./log') != True): os.mkdir('log') if (purge_log.lower() == "yes"): if (os.path.isfile(log_name)): os.remove(log_name) #Basic Logging... logging.basicConfig(format='%(asctime)s - %(levelname)s:%(message)s', level=log_level, filename=log_name) # Get the access token... response = amspy.get_access_token(account_name, account_key) resjson = response.json() access_token = resjson["access_token"] #Some global vars... NAME = "movie" COUNTER = 0; ENCRYPTION = "1" # 0=None, StorageEncrypted=1, CommonEncryptionProtected=2, EnvelopeEncryptionProtected=4 ENCRYPTION_SCHEME = "StorageEncryption" # StorageEncryption or CommonEncryption. VIDEO_NAME = "movie.mp4" VIDEO_PATH = "../assets/movie.mp4" ASSET_FINAL_NAME = "Python Sample-Thumbnail" PROCESSOR_NAME = "Azure Media Video Thumbnails" THUMBNAIL_CONFIG = "thumbnail_config.json" # Just a simple wrapper function to print the title of each of our phases to the console... def print_phase_header(message): global COUNTER; print ("\n[" + str("%02d" % int(COUNTER)) + "] >>> " + message) COUNTER += 1; # This wrapper function prints our messages to the console with a timestamp... def print_phase_message(message): time_stamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") print (str(time_stamp) + ": " + message) ### get ams redirected url response = amspy.get_url(access_token) if (response.status_code == 200): ams_redirected_rest_endpoint = str(response.url) else: print_phase_message("GET Status: " + str(response.status_code) + " - Getting Redirected URL ERROR." + str(response.content)) exit(1) ######################### PHASE 1: UPLOAD ######################### ### create an asset print_phase_header ("Creating a Media Asset") response = amspy.create_media_asset(access_token, NAME) if (response.status_code == 201): resjson = response.json() asset_id = str(resjson['d']['Id']) print_phase_message("POST Status.............................: " + str(response.status_code)) print_phase_message("Media Asset Name........................: " + NAME) print_phase_message("Media Asset Id..........................: " + asset_id) else: print_phase_message("POST Status.............................: " + str(response.status_code) + " - Media Asset: '" + NAME + "' Creation ERROR." + str(response.content)) ### create an assetfile print_phase_header("Creating a Media Assetfile (for the video file)") response = amspy.create_media_assetfile(access_token, asset_id, VIDEO_NAME, "false", "false") if (response.status_code == 201): resjson = response.json() video_assetfile_id = str(resjson['d']['Id']) print_phase_message("POST Status.............................: " + str(response.status_code)) print_phase_message("Media Assetfile Name....................: " + str(resjson['d']['Name'])) print_phase_message("Media Assetfile Id......................: " + video_assetfile_id) print_phase_message("Media Assetfile IsPrimary...............: " + str(resjson['d']['IsPrimary'])) else: print_phase_message("POST Status: " + str(response.status_code) + " - Media Assetfile: '" + VIDEO_NAME + "' Creation ERROR." + str(response.content)) ### create an asset write access policy for uploading print_phase_header("Creating an Asset Write Access Policy") duration = "440" response = amspy.create_asset_accesspolicy(access_token, "NewUploadPolicy", duration, "2") if (response.status_code == 201): resjson = response.json() write_accesspolicy_id = str(resjson['d']['Id']) print_phase_message("POST Status.............................: " + str(response.status_code)) print_phase_message("Asset Access Policy Id..................: " + write_accesspolicy_id) print_phase_message("Asset Access Policy Duration/min........: " + str(resjson['d']['DurationInMinutes'])) else: print_phase_message("POST Status: " + str(response.status_code) + " - Asset Write Access Policy Creation ERROR." + str(response.content)) ### create a sas locator print_phase_header("Creating a write SAS Locator") ## INFO: If you need to upload your files immediately, you should set your StartTime value to five minutes before the current time. #This is because there may be clock skew between your client machine and Media Services. #Also, your StartTime value must be in the following DateTime format: YYYY-MM-DDTHH:mm:ssZ (for example, "2014-05-23T17:53:50Z"). # EDITED: Not providing starttime is the best approach to be able to upload a file immediatly... #starttime = datetime.datetime.now(pytz.timezone(time_zone)).strftime("%Y-%m-%dT%H:%M:%SZ") #response = amspy.create_sas_locator(access_token, asset_id, write_accesspolicy_id, starttime) response = amspy.create_sas_locator(access_token, asset_id, write_accesspolicy_id) if (response.status_code == 201): resjson = response.json() saslocator_id = str(resjson['d']['Id']) saslocator_baseuri = str(resjson['d']['BaseUri']) sto_asset_name = os.path.basename(os.path.normpath(saslocator_baseuri)) saslocator_cac = str(resjson['d']['ContentAccessComponent']) print_phase_message("POST Status.............................: " + str(response.status_code)) print_phase_message("SAS URL Locator StartTime...............: " + str(resjson['d']['StartTime'])) print_phase_message("SAS URL Locator Id......................: " + saslocator_id) print_phase_message("SAS URL Locator Base URI................: " + saslocator_baseuri) print_phase_message("SAS URL Locator Content Access Component: " + saslocator_cac) else: print_phase_message("POST Status: " + str(response.status_code) + " - SAS URL Locator Creation ERROR." + str(response.content)) ### Use the Azure Blob Blob Servic library from the Azure Storage SDK. block_blob_service = BlockBlobService(account_name=sto_account_name, sas_token=saslocator_cac[1:]) ### Define a callback method to show progress of large uploads def uploadCallback(current, total): if (current != None): print_phase_message('{0:2,f}/{1:2,.0f} MB'.format(current,total/1024/1024)) ### Start upload the video file print_phase_header("Uploading the Video File") with open(VIDEO_PATH, mode='rb') as file: video_content = file.read() video_content_length = len(video_content) response = block_blob_service.create_blob_from_path( sto_asset_name, VIDEO_NAME, VIDEO_PATH, max_connections=5, content_settings=ContentSettings(content_type='video/mp4'), progress_callback=uploadCallback, ) if (response == None): print_phase_message("PUT Status..............................: 201") print_phase_message("Video File Uploaded.....................: OK") ### update the assetfile metadata after uploading print_phase_header("Updating the Video Assetfile") response = amspy.update_media_assetfile(access_token, asset_id, video_assetfile_id, video_content_length, VIDEO_NAME) if (response.status_code == 204): print_phase_message("MERGE Status............................: " + str(response.status_code)) print_phase_message("Assetfile Content Length Updated........: " + str(video_content_length)) else: print_phase_message("MERGE Status............................: " + str(response.status_code) + " - Assetfile: '" + VIDEO_NAME + "' Update ERROR." + str(response.content)) ### delete the locator, so that it can't be used again print_phase_header("Deleting the Locator") response = amspy.delete_sas_locator(access_token, saslocator_id) if (response.status_code == 204): print_phase_message("DELETE Status...........................: " + str(response.status_code)) print_phase_message("SAS URL Locator Deleted.................: " + saslocator_id) else: print_phase_message("DELETE Status...........................: " + str(response.status_code) + " - SAS URL Locator: '" + saslocator_id + "' Delete ERROR." + str(response.content)) ### delete the asset access policy print_phase_header ("Deleting the Acess Policy") response = amspy.delete_asset_accesspolicy(access_token, write_accesspolicy_id) if (response.status_code == 204): print_phase_message("DELETE Status...........................: " + str(response.status_code)) print_phase_message("Asset Access Policy Deleted.............: " + write_accesspolicy_id) else: print_phase_message("DELETE Status...........................: " + str(response.status_code) + " - Asset Access Policy: '" + write_accesspolicy_id + "' Delete ERROR." + str(response.content)) ### get the media processor for Thumbnail print_phase_header("Getting the Media Processor for Thumbnail") response = amspy.list_media_processor(access_token) if (response.status_code == 200): resjson = response.json() print_phase_message("GET Status..............................: " + str(response.status_code)) for mp in resjson['d']['results']: if(str(mp['Name']) == PROCESSOR_NAME): processor_id = str(mp['Id']) print_phase_message("MEDIA Processor Id......................: " + processor_id) print_phase_message("MEDIA Processor Name....................: " + PROCESSOR_NAME) else: print_phase_message("GET Status: " + str(response.status_code) + " - Media Processors Listing ERROR." + str(response.content)) ## create a Video Thumbnail Job print_phase_header("Creating a Video Thumbnail job to process the content") with open(THUMBNAIL_CONFIG, mode='r') as file: configuration = file.read() response = amspy.encode_mezzanine_asset(access_token, processor_id, asset_id, ASSET_FINAL_NAME, configuration) if (response.status_code == 201): resjson = response.json() job_id = str(resjson['d']['Id']) print_phase_message("POST Status.............................: " + str(response.status_code)) print_phase_message("Media Job Id............................: " + job_id) else: print_phase_message("POST Status.............................: " + str(response.status_code) + " - Media Job Creation ERROR." + str(response.content)) ### list a media job print_phase_header("Getting the Media Job Status") flag = 1 while (flag): response = amspy.list_media_job(access_token, job_id) if (response.status_code == 200): resjson = response.json() job_state = str(resjson['d']['State']) if (resjson['d']['EndTime'] != None): joboutputassets_uri = resjson['d']['OutputMediaAssets']['__deferred']['uri'] flag = 0 print_phase_message("GET Status..............................: " + str(response.status_code)) print_phase_message("Media Job Status........................: " + amspy.translate_job_state(job_state)) else: print_phase_message("GET Status..............................: " + str(response.status_code) + " - Media Job: '" + asset_id + "' Listing ERROR." + str(response.content)) time.sleep(5) ## getting the output Asset id print_phase_header("Getting the Thumbnail Media Asset Id") response = amspy.get_url(access_token, joboutputassets_uri, False) if (response.status_code == 200): resjson = response.json() thumbnail_asset_id = resjson['d']['results'][0]['Id'] print_phase_message("GET Status..............................: " + str(response.status_code)) print_phase_message("Thumbnail Media Asset Id................: " + thumbnail_asset_id) else: print_phase_message("GET Status..............................: " + str(response.status_code) + " - Media Job Output Asset: '" + job_id + "' Getting ERROR." + str(response.content)) # Get Asset by using the list_media_asset method and the Asset ID response = amspy.list_media_asset(access_token,thumbnail_asset_id) if (response.status_code == 200): resjson = response.json() # Get the container name from the Uri outputAssetContainer = resjson['d']['Uri'].split('/')[3] ### Use the Azure Blob Blob Service library from the Azure Storage SDK to download the Video Thumbnail block_blob_service = BlockBlobService(account_name=sto_account_name,account_key=sto_accountKey) generator = block_blob_service.list_blobs(outputAssetContainer) for blob in generator: print_phase_message("Output File Name........................: " + blob.name) block_blob_service.get_blob_to_path(outputAssetContainer, blob.name, "output/" + blob.name)
mit
petewarden/tensorflow
tensorflow/python/ops/sdca_ops.py
42
1261
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed unde_sdca_opsr the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A Dual Coordinate Ascent optimizer library for training fast linear models. """ # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_sdca_ops import * # pylint: enable=wildcard-import ops.NotDifferentiable("SdcaFprint") ops.NotDifferentiable("SdcaOptimizer") ops.NotDifferentiable("SdcaOptimizerV2") ops.NotDifferentiable("SdcaShrinkL1")
apache-2.0
endlessm/chromium-browser
third_party/tlslite/tlslite/integration/httptlsconnection.py
115
4314
# Authors: # Trevor Perrin # Kees Bos - Added ignoreAbruptClose parameter # Dimitris Moraitis - Anon ciphersuites # Martin von Loewis - python 3 port # # See the LICENSE file for legal information regarding use of this file. """TLS Lite + httplib.""" import socket try: import httplib except ImportError: # Python 3 from http import client as httplib from tlslite.tlsconnection import TLSConnection from tlslite.integration.clienthelper import ClientHelper class HTTPTLSConnection(httplib.HTTPConnection, ClientHelper): """This class extends L{httplib.HTTPConnection} to support TLS.""" def __init__(self, host, port=None, strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None, username=None, password=None, certChain=None, privateKey=None, checker=None, settings=None, ignoreAbruptClose=False, anon=False): """Create a new HTTPTLSConnection. For client authentication, use one of these argument combinations: - username, password (SRP) - certChain, privateKey (certificate) For server authentication, you can either rely on the implicit mutual authentication performed by SRP or you can do certificate-based server authentication with one of these argument combinations: - x509Fingerprint Certificate-based server authentication is compatible with SRP or certificate-based client authentication. The constructor does not perform the TLS handshake itself, but simply stores these arguments for later. The handshake is performed only when this class needs to connect with the server. Thus you should be prepared to handle TLS-specific exceptions when calling methods inherited from L{httplib.HTTPConnection} such as request(), connect(), and send(). See the client handshake functions in L{tlslite.TLSConnection.TLSConnection} for details on which exceptions might be raised. @type host: str @param host: Server to connect to. @type port: int @param port: Port to connect to. @type username: str @param username: SRP username. Requires the 'password' argument. @type password: str @param password: SRP password for mutual authentication. Requires the 'username' argument. @type certChain: L{tlslite.x509certchain.X509CertChain} or @param certChain: Certificate chain for client authentication. Requires the 'privateKey' argument. Excludes the SRP arguments. @type privateKey: L{tlslite.utils.rsakey.RSAKey} @param privateKey: Private key for client authentication. Requires the 'certChain' argument. Excludes the SRP arguments. @type checker: L{tlslite.checker.Checker} @param checker: Callable object called after handshaking to evaluate the connection and raise an Exception if necessary. @type settings: L{tlslite.handshakesettings.HandshakeSettings} @param settings: Various settings which can be used to control the ciphersuites, certificate types, and SSL/TLS versions offered by the client. @type ignoreAbruptClose: bool @param ignoreAbruptClose: ignore the TLSAbruptCloseError on unexpected hangup. """ if source_address: httplib.HTTPConnection.__init__(self, host, port, strict, timeout, source_address) if not source_address: httplib.HTTPConnection.__init__(self, host, port, strict, timeout) self.ignoreAbruptClose = ignoreAbruptClose ClientHelper.__init__(self, username, password, certChain, privateKey, checker, settings, anon) def connect(self): httplib.HTTPConnection.connect(self) self.sock = TLSConnection(self.sock) self.sock.ignoreAbruptClose = self.ignoreAbruptClose ClientHelper._handshake(self, self.sock)
bsd-3-clause
ptchankue/youtube-dl
youtube_dl/extractor/appleconnect.py
139
1848
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( str_to_int, ExtractorError ) class AppleConnectIE(InfoExtractor): _VALID_URL = r'https?://itunes\.apple\.com/\w{0,2}/?post/idsa\.(?P<id>[\w-]+)' _TEST = { 'url': 'https://itunes.apple.com/us/post/idsa.4ab17a39-2720-11e5-96c5-a5b38f6c42d3', 'md5': '10d0f2799111df4cb1c924520ca78f98', 'info_dict': { 'id': '4ab17a39-2720-11e5-96c5-a5b38f6c42d3', 'ext': 'm4v', 'title': 'Energy', 'uploader': 'Drake', 'thumbnail': 'http://is5.mzstatic.com/image/thumb/Video5/v4/78/61/c5/7861c5fa-ad6d-294b-1464-cf7605b911d6/source/1920x1080sr.jpg', 'upload_date': '20150710', 'timestamp': 1436545535, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) try: video_json = self._html_search_regex( r'class="auc-video-data">(\{.*?\})', webpage, 'json') except ExtractorError: raise ExtractorError('This post doesn\'t contain a video', expected=True) video_data = self._parse_json(video_json, video_id) timestamp = str_to_int(self._html_search_regex(r'data-timestamp="(\d+)"', webpage, 'timestamp')) like_count = str_to_int(self._html_search_regex(r'(\d+) Loves', webpage, 'like count')) return { 'id': video_id, 'url': video_data['sslSrc'], 'title': video_data['title'], 'description': video_data['description'], 'uploader': video_data['artistName'], 'thumbnail': video_data['artworkUrl'], 'timestamp': timestamp, 'like_count': like_count, }
unlicense
jgcaaprom/android_external_chromium_org
third_party/cython/src/Cython/Compiler/ExprNodes.py
86
440084
# # Parse tree nodes for expressions # import cython cython.declare(error=object, warning=object, warn_once=object, InternalError=object, CompileError=object, UtilityCode=object, TempitaUtilityCode=object, StringEncoding=object, operator=object, Naming=object, Nodes=object, PyrexTypes=object, py_object_type=object, list_type=object, tuple_type=object, set_type=object, dict_type=object, unicode_type=object, str_type=object, bytes_type=object, type_type=object, Builtin=object, Symtab=object, Utils=object, find_coercion_error=object, debug_disposal_code=object, debug_temp_alloc=object, debug_coercion=object, bytearray_type=object, slice_type=object) import sys import copy import operator from Errors import error, warning, warn_once, InternalError, CompileError from Errors import hold_errors, release_errors, held_errors, report_error from Code import UtilityCode, TempitaUtilityCode import StringEncoding import Naming import Nodes from Nodes import Node import PyrexTypes from PyrexTypes import py_object_type, c_long_type, typecast, error_type, \ unspecified_type import TypeSlots from Builtin import list_type, tuple_type, set_type, dict_type, type_type, \ unicode_type, str_type, bytes_type, bytearray_type, basestring_type, slice_type import Builtin import Symtab from Cython import Utils from Annotate import AnnotationItem from Cython.Compiler import Future from Cython.Debugging import print_call_chain from DebugFlags import debug_disposal_code, debug_temp_alloc, \ debug_coercion try: from __builtin__ import basestring except ImportError: basestring = str # Python 3 try: from builtins import bytes except ImportError: bytes = str # Python 2 class NotConstant(object): _obj = None def __new__(cls): if NotConstant._obj is None: NotConstant._obj = super(NotConstant, cls).__new__(cls) return NotConstant._obj def __repr__(self): return "<NOT CONSTANT>" not_a_constant = NotConstant() constant_value_not_set = object() # error messages when coercing from key[0] to key[1] coercion_error_dict = { # string related errors (Builtin.unicode_type, Builtin.bytes_type) : "Cannot convert Unicode string to 'bytes' implicitly, encoding required.", (Builtin.unicode_type, Builtin.str_type) : "Cannot convert Unicode string to 'str' implicitly. This is not portable and requires explicit encoding.", (Builtin.unicode_type, PyrexTypes.c_char_ptr_type) : "Unicode objects only support coercion to Py_UNICODE*.", (Builtin.unicode_type, PyrexTypes.c_uchar_ptr_type) : "Unicode objects only support coercion to Py_UNICODE*.", (Builtin.bytes_type, Builtin.unicode_type) : "Cannot convert 'bytes' object to unicode implicitly, decoding required", (Builtin.bytes_type, Builtin.str_type) : "Cannot convert 'bytes' object to str implicitly. This is not portable to Py3.", (Builtin.bytes_type, Builtin.basestring_type) : "Cannot convert 'bytes' object to basestring implicitly. This is not portable to Py3.", (Builtin.bytes_type, PyrexTypes.c_py_unicode_ptr_type) : "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'.", (Builtin.basestring_type, Builtin.bytes_type) : "Cannot convert 'basestring' object to bytes implicitly. This is not portable.", (Builtin.str_type, Builtin.unicode_type) : "str objects do not support coercion to unicode, use a unicode string literal instead (u'')", (Builtin.str_type, Builtin.bytes_type) : "Cannot convert 'str' to 'bytes' implicitly. This is not portable.", (Builtin.str_type, PyrexTypes.c_char_ptr_type) : "'str' objects do not support coercion to C types (use 'bytes'?).", (Builtin.str_type, PyrexTypes.c_uchar_ptr_type) : "'str' objects do not support coercion to C types (use 'bytes'?).", (Builtin.str_type, PyrexTypes.c_py_unicode_ptr_type) : "'str' objects do not support coercion to C types (use 'unicode'?).", (PyrexTypes.c_char_ptr_type, Builtin.unicode_type) : "Cannot convert 'char*' to unicode implicitly, decoding required", (PyrexTypes.c_uchar_ptr_type, Builtin.unicode_type) : "Cannot convert 'char*' to unicode implicitly, decoding required", } def find_coercion_error(type_tuple, default, env): err = coercion_error_dict.get(type_tuple) if err is None: return default elif ((PyrexTypes.c_char_ptr_type in type_tuple or PyrexTypes.c_uchar_ptr_type in type_tuple) and env.directives['c_string_encoding']): if type_tuple[1].is_pyobject: return default elif env.directives['c_string_encoding'] in ('ascii', 'default'): return default else: return "'%s' objects do not support coercion to C types with non-ascii or non-default c_string_encoding" % type_tuple[0].name else: return err def default_str_type(env): return { 'bytes': bytes_type, 'bytearray': bytearray_type, 'str': str_type, 'unicode': unicode_type }.get(env.directives['c_string_type']) def check_negative_indices(*nodes): """ Raise a warning on nodes that are known to have negative numeric values. Used to find (potential) bugs inside of "wraparound=False" sections. """ for node in nodes: if (node is None or not isinstance(node.constant_result, (int, float, long))): continue if node.constant_result < 0: warning(node.pos, "the result of using negative indices inside of " "code sections marked as 'wraparound=False' is " "undefined", level=1) def infer_sequence_item_type(env, seq_node, index_node=None, seq_type=None): if not seq_node.is_sequence_constructor: if seq_type is None: seq_type = seq_node.infer_type(env) if seq_type is tuple_type: # tuples are immutable => we can safely follow assignments if seq_node.cf_state and len(seq_node.cf_state) == 1: try: seq_node = seq_node.cf_state[0].rhs except AttributeError: pass if seq_node is not None and seq_node.is_sequence_constructor: if index_node is not None and index_node.has_constant_result(): try: item = seq_node.args[index_node.constant_result] except (ValueError, TypeError, IndexError): pass else: return item.infer_type(env) # if we're lucky, all items have the same type item_types = set([item.infer_type(env) for item in seq_node.args]) if len(item_types) == 1: return item_types.pop() return None class ExprNode(Node): # subexprs [string] Class var holding names of subexpr node attrs # type PyrexType Type of the result # result_code string Code fragment # result_ctype string C type of result_code if different from type # is_temp boolean Result is in a temporary variable # is_sequence_constructor # boolean Is a list or tuple constructor expression # is_starred boolean Is a starred expression (e.g. '*a') # saved_subexpr_nodes # [ExprNode or [ExprNode or None] or None] # Cached result of subexpr_nodes() # use_managed_ref boolean use ref-counted temps/assignments/etc. # result_is_used boolean indicates that the result will be dropped and the # result_code/temp_result can safely be set to None result_ctype = None type = None temp_code = None old_temp = None # error checker for multiple frees etc. use_managed_ref = True # can be set by optimisation transforms result_is_used = True # The Analyse Expressions phase for expressions is split # into two sub-phases: # # Analyse Types # Determines the result type of the expression based # on the types of its sub-expressions, and inserts # coercion nodes into the expression tree where needed. # Marks nodes which will need to have temporary variables # allocated. # # Allocate Temps # Allocates temporary variables where needed, and fills # in the result_code field of each node. # # ExprNode provides some convenience routines which # perform both of the above phases. These should only # be called from statement nodes, and only when no # coercion nodes need to be added around the expression # being analysed. In that case, the above two phases # should be invoked separately. # # Framework code in ExprNode provides much of the common # processing for the various phases. It makes use of the # 'subexprs' class attribute of ExprNodes, which should # contain a list of the names of attributes which can # hold sub-nodes or sequences of sub-nodes. # # The framework makes use of a number of abstract methods. # Their responsibilities are as follows. # # Declaration Analysis phase # # analyse_target_declaration # Called during the Analyse Declarations phase to analyse # the LHS of an assignment or argument of a del statement. # Nodes which cannot be the LHS of an assignment need not # implement it. # # Expression Analysis phase # # analyse_types # - Call analyse_types on all sub-expressions. # - Check operand types, and wrap coercion nodes around # sub-expressions where needed. # - Set the type of this node. # - If a temporary variable will be required for the # result, set the is_temp flag of this node. # # analyse_target_types # Called during the Analyse Types phase to analyse # the LHS of an assignment or argument of a del # statement. Similar responsibilities to analyse_types. # # target_code # Called by the default implementation of allocate_target_temps. # Should return a C lvalue for assigning to the node. The default # implementation calls calculate_result_code. # # check_const # - Check that this node and its subnodes form a # legal constant expression. If so, do nothing, # otherwise call not_const. # # The default implementation of check_const # assumes that the expression is not constant. # # check_const_addr # - Same as check_const, except check that the # expression is a C lvalue whose address is # constant. Otherwise, call addr_not_const. # # The default implementation of calc_const_addr # assumes that the expression is not a constant # lvalue. # # Code Generation phase # # generate_evaluation_code # - Call generate_evaluation_code for sub-expressions. # - Perform the functions of generate_result_code # (see below). # - If result is temporary, call generate_disposal_code # on all sub-expressions. # # A default implementation of generate_evaluation_code # is provided which uses the following abstract methods: # # generate_result_code # - Generate any C statements necessary to calculate # the result of this node from the results of its # sub-expressions. # # calculate_result_code # - Should return a C code fragment evaluating to the # result. This is only called when the result is not # a temporary. # # generate_assignment_code # Called on the LHS of an assignment. # - Call generate_evaluation_code for sub-expressions. # - Generate code to perform the assignment. # - If the assignment absorbed a reference, call # generate_post_assignment_code on the RHS, # otherwise call generate_disposal_code on it. # # generate_deletion_code # Called on an argument of a del statement. # - Call generate_evaluation_code for sub-expressions. # - Generate code to perform the deletion. # - Call generate_disposal_code on all sub-expressions. # # is_sequence_constructor = 0 is_string_literal = 0 is_attribute = 0 is_subscript = 0 saved_subexpr_nodes = None is_temp = 0 is_target = 0 is_starred = 0 constant_result = constant_value_not_set # whether this node with a memoryview type should be broadcast memslice_broadcast = False child_attrs = property(fget=operator.attrgetter('subexprs')) def not_implemented(self, method_name): print_call_chain(method_name, "not implemented") ### raise InternalError( "%s.%s not implemented" % (self.__class__.__name__, method_name)) def is_lvalue(self): return 0 def is_addressable(self): return self.is_lvalue() and not self.type.is_memoryviewslice def is_ephemeral(self): # An ephemeral node is one whose result is in # a Python temporary and we suspect there are no # other references to it. Certain operations are # disallowed on such values, since they are # likely to result in a dangling pointer. return self.type.is_pyobject and self.is_temp def subexpr_nodes(self): # Extract a list of subexpression nodes based # on the contents of the subexprs class attribute. nodes = [] for name in self.subexprs: item = getattr(self, name) if item is not None: if type(item) is list: nodes.extend(item) else: nodes.append(item) return nodes def result(self): if self.is_temp: return self.temp_code else: return self.calculate_result_code() def result_as(self, type = None): # Return the result code cast to the specified C type. if (self.is_temp and self.type.is_pyobject and type != py_object_type): # Allocated temporaries are always PyObject *, which may not # reflect the actual type (e.g. an extension type) return typecast(type, py_object_type, self.result()) return typecast(type, self.ctype(), self.result()) def py_result(self): # Return the result code cast to PyObject *. return self.result_as(py_object_type) def ctype(self): # Return the native C type of the result (i.e. the # C type of the result_code expression). return self.result_ctype or self.type def get_constant_c_result_code(self): # Return the constant value of this node as a result code # string, or None if the node is not constant. This method # can be called when the constant result code is required # before the code generation phase. # # The return value is a string that can represent a simple C # value, a constant C name or a constant C expression. If the # node type depends on Python code, this must return None. return None def calculate_constant_result(self): # Calculate the constant compile time result value of this # expression and store it in ``self.constant_result``. Does # nothing by default, thus leaving ``self.constant_result`` # unknown. If valid, the result can be an arbitrary Python # value. # # This must only be called when it is assured that all # sub-expressions have a valid constant_result value. The # ConstantFolding transform will do this. pass def has_constant_result(self): return self.constant_result is not constant_value_not_set and \ self.constant_result is not not_a_constant def compile_time_value(self, denv): # Return value of compile-time expression, or report error. error(self.pos, "Invalid compile-time expression") def compile_time_value_error(self, e): error(self.pos, "Error in compile-time expression: %s: %s" % ( e.__class__.__name__, e)) # ------------- Declaration Analysis ---------------- def analyse_target_declaration(self, env): error(self.pos, "Cannot assign to or delete this") # ------------- Expression Analysis ---------------- def analyse_const_expression(self, env): # Called during the analyse_declarations phase of a # constant expression. Analyses the expression's type, # checks whether it is a legal const expression, # and determines its value. node = self.analyse_types(env) node.check_const() return node def analyse_expressions(self, env): # Convenience routine performing both the Type # Analysis and Temp Allocation phases for a whole # expression. return self.analyse_types(env) def analyse_target_expression(self, env, rhs): # Convenience routine performing both the Type # Analysis and Temp Allocation phases for the LHS of # an assignment. return self.analyse_target_types(env) def analyse_boolean_expression(self, env): # Analyse expression and coerce to a boolean. node = self.analyse_types(env) bool = node.coerce_to_boolean(env) return bool def analyse_temp_boolean_expression(self, env): # Analyse boolean expression and coerce result into # a temporary. This is used when a branch is to be # performed on the result and we won't have an # opportunity to ensure disposal code is executed # afterwards. By forcing the result into a temporary, # we ensure that all disposal has been done by the # time we get the result. node = self.analyse_types(env) return node.coerce_to_boolean(env).coerce_to_simple(env) # --------------- Type Inference ----------------- def type_dependencies(self, env): # Returns the list of entries whose types must be determined # before the type of self can be inferred. if hasattr(self, 'type') and self.type is not None: return () return sum([node.type_dependencies(env) for node in self.subexpr_nodes()], ()) def infer_type(self, env): # Attempt to deduce the type of self. # Differs from analyse_types as it avoids unnecessary # analysis of subexpressions, but can assume everything # in self.type_dependencies() has been resolved. if hasattr(self, 'type') and self.type is not None: return self.type elif hasattr(self, 'entry') and self.entry is not None: return self.entry.type else: self.not_implemented("infer_type") def nonlocally_immutable(self): # Returns whether this variable is a safe reference, i.e. # can't be modified as part of globals or closures. return self.is_literal or self.is_temp or self.type.is_array or self.type.is_cfunction # --------------- Type Analysis ------------------ def analyse_as_module(self, env): # If this node can be interpreted as a reference to a # cimported module, return its scope, else None. return None def analyse_as_type(self, env): # If this node can be interpreted as a reference to a # type, return that type, else None. return None def analyse_as_extension_type(self, env): # If this node can be interpreted as a reference to an # extension type or builtin type, return its type, else None. return None def analyse_types(self, env): self.not_implemented("analyse_types") def analyse_target_types(self, env): return self.analyse_types(env) def nogil_check(self, env): # By default, any expression based on Python objects is # prevented in nogil environments. Subtypes must override # this if they can work without the GIL. if self.type and self.type.is_pyobject: self.gil_error() def gil_assignment_check(self, env): if env.nogil and self.type.is_pyobject: error(self.pos, "Assignment of Python object not allowed without gil") def check_const(self): self.not_const() return False def not_const(self): error(self.pos, "Not allowed in a constant expression") def check_const_addr(self): self.addr_not_const() return False def addr_not_const(self): error(self.pos, "Address is not constant") # ----------------- Result Allocation ----------------- def result_in_temp(self): # Return true if result is in a temporary owned by # this node or one of its subexpressions. Overridden # by certain nodes which can share the result of # a subnode. return self.is_temp def target_code(self): # Return code fragment for use as LHS of a C assignment. return self.calculate_result_code() def calculate_result_code(self): self.not_implemented("calculate_result_code") # def release_target_temp(self, env): # # Release temporaries used by LHS of an assignment. # self.release_subexpr_temps(env) def allocate_temp_result(self, code): if self.temp_code: raise RuntimeError("Temp allocated multiple times in %r: %r" % (self.__class__.__name__, self.pos)) type = self.type if not type.is_void: if type.is_pyobject: type = PyrexTypes.py_object_type self.temp_code = code.funcstate.allocate_temp( type, manage_ref=self.use_managed_ref) else: self.temp_code = None def release_temp_result(self, code): if not self.temp_code: if not self.result_is_used: # not used anyway, so ignore if not set up return if self.old_temp: raise RuntimeError("temp %s released multiple times in %s" % ( self.old_temp, self.__class__.__name__)) else: raise RuntimeError("no temp, but release requested in %s" % ( self.__class__.__name__)) code.funcstate.release_temp(self.temp_code) self.old_temp = self.temp_code self.temp_code = None # ---------------- Code Generation ----------------- def make_owned_reference(self, code): """ If result is a pyobject, make sure we own a reference to it. If the result is in a temp, it is already a new reference. """ if self.type.is_pyobject and not self.result_in_temp(): code.put_incref(self.result(), self.ctype()) def make_owned_memoryviewslice(self, code): """ Make sure we own the reference to this memoryview slice. """ if not self.result_in_temp(): code.put_incref_memoryviewslice(self.result(), have_gil=self.in_nogil_context) def generate_evaluation_code(self, code): # Generate code to evaluate this node and # its sub-expressions, and dispose of any # temporary results of its sub-expressions. self.generate_subexpr_evaluation_code(code) code.mark_pos(self.pos) if self.is_temp: self.allocate_temp_result(code) self.generate_result_code(code) if self.is_temp: # If we are temp we do not need to wait until this node is disposed # before disposing children. self.generate_subexpr_disposal_code(code) self.free_subexpr_temps(code) def generate_subexpr_evaluation_code(self, code): for node in self.subexpr_nodes(): node.generate_evaluation_code(code) def generate_result_code(self, code): self.not_implemented("generate_result_code") def generate_disposal_code(self, code): if self.is_temp: if self.result(): if self.type.is_pyobject: code.put_decref_clear(self.result(), self.ctype()) elif self.type.is_memoryviewslice: code.put_xdecref_memoryviewslice( self.result(), have_gil=not self.in_nogil_context) else: # Already done if self.is_temp self.generate_subexpr_disposal_code(code) def generate_subexpr_disposal_code(self, code): # Generate code to dispose of temporary results # of all sub-expressions. for node in self.subexpr_nodes(): node.generate_disposal_code(code) def generate_post_assignment_code(self, code): if self.is_temp: if self.type.is_pyobject: code.putln("%s = 0;" % self.result()) elif self.type.is_memoryviewslice: code.putln("%s.memview = NULL;" % self.result()) code.putln("%s.data = NULL;" % self.result()) else: self.generate_subexpr_disposal_code(code) def generate_assignment_code(self, rhs, code): # Stub method for nodes which are not legal as # the LHS of an assignment. An error will have # been reported earlier. pass def generate_deletion_code(self, code, ignore_nonexisting=False): # Stub method for nodes that are not legal as # the argument of a del statement. An error # will have been reported earlier. pass def free_temps(self, code): if self.is_temp: if not self.type.is_void: self.release_temp_result(code) else: self.free_subexpr_temps(code) def free_subexpr_temps(self, code): for sub in self.subexpr_nodes(): sub.free_temps(code) def generate_function_definitions(self, env, code): pass # ---------------- Annotation --------------------- def annotate(self, code): for node in self.subexpr_nodes(): node.annotate(code) # ----------------- Coercion ---------------------- def coerce_to(self, dst_type, env): # Coerce the result so that it can be assigned to # something of type dst_type. If processing is necessary, # wraps this node in a coercion node and returns that. # Otherwise, returns this node unchanged. # # This method is called during the analyse_expressions # phase of the src_node's processing. # # Note that subclasses that override this (especially # ConstNodes) must not (re-)set their own .type attribute # here. Since expression nodes may turn up in different # places in the tree (e.g. inside of CloneNodes in cascaded # assignments), this method must return a new node instance # if it changes the type. # src = self src_type = self.type if self.check_for_coercion_error(dst_type, env): return self if dst_type.is_reference and not src_type.is_reference: dst_type = dst_type.ref_base_type if src_type.is_const: src_type = src_type.const_base_type if src_type.is_fused or dst_type.is_fused: # See if we are coercing a fused function to a pointer to a # specialized function if (src_type.is_cfunction and not dst_type.is_fused and dst_type.is_ptr and dst_type.base_type.is_cfunction): dst_type = dst_type.base_type for signature in src_type.get_all_specialized_function_types(): if signature.same_as(dst_type): src.type = signature src.entry = src.type.entry src.entry.used = True return self if src_type.is_fused: error(self.pos, "Type is not specialized") else: error(self.pos, "Cannot coerce to a type that is not specialized") self.type = error_type return self if self.coercion_type is not None: # This is purely for error checking purposes! node = NameNode(self.pos, name='', type=self.coercion_type) node.coerce_to(dst_type, env) if dst_type.is_memoryviewslice: import MemoryView if not src.type.is_memoryviewslice: if src.type.is_pyobject: src = CoerceToMemViewSliceNode(src, dst_type, env) elif src.type.is_array: src = CythonArrayNode.from_carray(src, env).coerce_to( dst_type, env) elif not src_type.is_error: error(self.pos, "Cannot convert '%s' to memoryviewslice" % (src_type,)) elif not MemoryView.src_conforms_to_dst( src.type, dst_type, broadcast=self.memslice_broadcast): if src.type.dtype.same_as(dst_type.dtype): msg = "Memoryview '%s' not conformable to memoryview '%s'." tup = src.type, dst_type else: msg = "Different base types for memoryviews (%s, %s)" tup = src.type.dtype, dst_type.dtype error(self.pos, msg % tup) elif dst_type.is_pyobject: if not src.type.is_pyobject: if dst_type is bytes_type and src.type.is_int: src = CoerceIntToBytesNode(src, env) else: src = CoerceToPyTypeNode(src, env, type=dst_type) if not src.type.subtype_of(dst_type): if src.constant_result is not None: src = PyTypeTestNode(src, dst_type, env) elif src.type.is_pyobject: src = CoerceFromPyTypeNode(dst_type, src, env) elif (dst_type.is_complex and src_type != dst_type and dst_type.assignable_from(src_type)): src = CoerceToComplexNode(src, dst_type, env) else: # neither src nor dst are py types # Added the string comparison, since for c types that # is enough, but Cython gets confused when the types are # in different pxi files. if not (str(src.type) == str(dst_type) or dst_type.assignable_from(src_type)): self.fail_assignment(dst_type) return src def fail_assignment(self, dst_type): error(self.pos, "Cannot assign type '%s' to '%s'" % (self.type, dst_type)) def check_for_coercion_error(self, dst_type, env, fail=False, default=None): if fail and not default: default = "Cannot assign type '%(FROM)s' to '%(TO)s'" message = find_coercion_error((self.type, dst_type), default, env) if message is not None: error(self.pos, message % {'FROM': self.type, 'TO': dst_type}) return True if fail: self.fail_assignment(dst_type) return True return False def coerce_to_pyobject(self, env): return self.coerce_to(PyrexTypes.py_object_type, env) def coerce_to_boolean(self, env): # Coerce result to something acceptable as # a boolean value. # if it's constant, calculate the result now if self.has_constant_result(): bool_value = bool(self.constant_result) return BoolNode(self.pos, value=bool_value, constant_result=bool_value) type = self.type if type.is_enum or type.is_error: return self elif type.is_pyobject or type.is_int or type.is_ptr or type.is_float: return CoerceToBooleanNode(self, env) else: error(self.pos, "Type '%s' not acceptable as a boolean" % type) return self def coerce_to_integer(self, env): # If not already some C integer type, coerce to longint. if self.type.is_int: return self else: return self.coerce_to(PyrexTypes.c_long_type, env) def coerce_to_temp(self, env): # Ensure that the result is in a temporary. if self.result_in_temp(): return self else: return CoerceToTempNode(self, env) def coerce_to_simple(self, env): # Ensure that the result is simple (see is_simple). if self.is_simple(): return self else: return self.coerce_to_temp(env) def is_simple(self): # A node is simple if its result is something that can # be referred to without performing any operations, e.g. # a constant, local var, C global var, struct member # reference, or temporary. return self.result_in_temp() def may_be_none(self): if self.type and not (self.type.is_pyobject or self.type.is_memoryviewslice): return False if self.has_constant_result(): return self.constant_result is not None return True def as_cython_attribute(self): return None def as_none_safe_node(self, message, error="PyExc_TypeError", format_args=()): # Wraps the node in a NoneCheckNode if it is not known to be # not-None (e.g. because it is a Python literal). if self.may_be_none(): return NoneCheckNode(self, error, message, format_args) else: return self @classmethod def from_node(cls, node, **kwargs): """Instantiate this node class from another node, properly copying over all attributes that one would forget otherwise. """ attributes = "cf_state cf_maybe_null cf_is_null constant_result".split() for attr_name in attributes: if attr_name in kwargs: continue try: value = getattr(node, attr_name) except AttributeError: pass else: kwargs[attr_name] = value return cls(node.pos, **kwargs) class AtomicExprNode(ExprNode): # Abstract base class for expression nodes which have # no sub-expressions. subexprs = [] # Override to optimize -- we know we have no children def generate_subexpr_evaluation_code(self, code): pass def generate_subexpr_disposal_code(self, code): pass class PyConstNode(AtomicExprNode): # Abstract base class for constant Python values. is_literal = 1 type = py_object_type def is_simple(self): return 1 def may_be_none(self): return False def analyse_types(self, env): return self def calculate_result_code(self): return self.value def generate_result_code(self, code): pass class NoneNode(PyConstNode): # The constant value None is_none = 1 value = "Py_None" constant_result = None nogil_check = None def compile_time_value(self, denv): return None def may_be_none(self): return True class EllipsisNode(PyConstNode): # '...' in a subscript list. value = "Py_Ellipsis" constant_result = Ellipsis def compile_time_value(self, denv): return Ellipsis class ConstNode(AtomicExprNode): # Abstract base type for literal constant nodes. # # value string C code fragment is_literal = 1 nogil_check = None def is_simple(self): return 1 def nonlocally_immutable(self): return 1 def may_be_none(self): return False def analyse_types(self, env): return self # Types are held in class variables def check_const(self): return True def get_constant_c_result_code(self): return self.calculate_result_code() def calculate_result_code(self): return str(self.value) def generate_result_code(self, code): pass class BoolNode(ConstNode): type = PyrexTypes.c_bint_type # The constant value True or False def calculate_constant_result(self): self.constant_result = self.value def compile_time_value(self, denv): return self.value def calculate_result_code(self): if self.type.is_pyobject: return self.value and 'Py_True' or 'Py_False' else: return str(int(self.value)) def coerce_to(self, dst_type, env): if dst_type.is_pyobject and self.type.is_int: return BoolNode( self.pos, value=self.value, constant_result=self.constant_result, type=Builtin.bool_type) if dst_type.is_int and self.type.is_pyobject: return BoolNode( self.pos, value=self.value, constant_result=self.constant_result, type=PyrexTypes.c_bint_type) return ConstNode.coerce_to(self, dst_type, env) class NullNode(ConstNode): type = PyrexTypes.c_null_ptr_type value = "NULL" constant_result = 0 def get_constant_c_result_code(self): return self.value class CharNode(ConstNode): type = PyrexTypes.c_char_type def calculate_constant_result(self): self.constant_result = ord(self.value) def compile_time_value(self, denv): return ord(self.value) def calculate_result_code(self): return "'%s'" % StringEncoding.escape_char(self.value) class IntNode(ConstNode): # unsigned "" or "U" # longness "" or "L" or "LL" # is_c_literal True/False/None creator considers this a C integer literal unsigned = "" longness = "" is_c_literal = None # unknown def __init__(self, pos, **kwds): ExprNode.__init__(self, pos, **kwds) if 'type' not in kwds: self.type = self.find_suitable_type_for_value() def find_suitable_type_for_value(self): if self.constant_result is constant_value_not_set: try: self.calculate_constant_result() except ValueError: pass # we ignore 'is_c_literal = True' and instead map signed 32bit # integers as C long values if self.is_c_literal or \ self.constant_result in (constant_value_not_set, not_a_constant) or \ self.unsigned or self.longness == 'LL': # clearly a C literal rank = (self.longness == 'LL') and 2 or 1 suitable_type = PyrexTypes.modifiers_and_name_to_type[not self.unsigned, rank, "int"] if self.type: suitable_type = PyrexTypes.widest_numeric_type(suitable_type, self.type) else: # C literal or Python literal - split at 32bit boundary if -2**31 <= self.constant_result < 2**31: if self.type and self.type.is_int: suitable_type = self.type else: suitable_type = PyrexTypes.c_long_type else: suitable_type = PyrexTypes.py_object_type return suitable_type def coerce_to(self, dst_type, env): if self.type is dst_type: return self elif dst_type.is_float: if self.has_constant_result(): return FloatNode(self.pos, value='%d.0' % int(self.constant_result), type=dst_type, constant_result=float(self.constant_result)) else: return FloatNode(self.pos, value=self.value, type=dst_type, constant_result=not_a_constant) if dst_type.is_numeric and not dst_type.is_complex: node = IntNode(self.pos, value=self.value, constant_result=self.constant_result, type = dst_type, is_c_literal = True, unsigned=self.unsigned, longness=self.longness) return node elif dst_type.is_pyobject: node = IntNode(self.pos, value=self.value, constant_result=self.constant_result, type = PyrexTypes.py_object_type, is_c_literal = False, unsigned=self.unsigned, longness=self.longness) else: # FIXME: not setting the type here to keep it working with # complex numbers. Should they be special cased? node = IntNode(self.pos, value=self.value, constant_result=self.constant_result, unsigned=self.unsigned, longness=self.longness) # We still need to perform normal coerce_to processing on the # result, because we might be coercing to an extension type, # in which case a type test node will be needed. return ConstNode.coerce_to(node, dst_type, env) def coerce_to_boolean(self, env): return IntNode( self.pos, value=self.value, constant_result=self.constant_result, type=PyrexTypes.c_bint_type, unsigned=self.unsigned, longness=self.longness) def generate_evaluation_code(self, code): if self.type.is_pyobject: # pre-allocate a Python version of the number plain_integer_string = str(Utils.str_to_number(self.value)) self.result_code = code.get_py_int(plain_integer_string, self.longness) else: self.result_code = self.get_constant_c_result_code() def get_constant_c_result_code(self): return self.value_as_c_integer_string() + self.unsigned + self.longness def value_as_c_integer_string(self): value = self.value if len(value) > 2: # convert C-incompatible Py3 oct/bin notations if value[1] in 'oO': value = value[0] + value[2:] # '0o123' => '0123' elif value[1] in 'bB': value = int(value[2:], 2) return str(value) def calculate_result_code(self): return self.result_code def calculate_constant_result(self): self.constant_result = Utils.str_to_number(self.value) def compile_time_value(self, denv): return Utils.str_to_number(self.value) class FloatNode(ConstNode): type = PyrexTypes.c_double_type def calculate_constant_result(self): self.constant_result = float(self.value) def compile_time_value(self, denv): return float(self.value) def coerce_to(self, dst_type, env): if dst_type.is_pyobject and self.type.is_float: return FloatNode( self.pos, value=self.value, constant_result=self.constant_result, type=Builtin.float_type) if dst_type.is_float and self.type.is_pyobject: return FloatNode( self.pos, value=self.value, constant_result=self.constant_result, type=dst_type) return ConstNode.coerce_to(self, dst_type, env) def calculate_result_code(self): return self.result_code def get_constant_c_result_code(self): strval = self.value assert isinstance(strval, (str, unicode)) cmpval = repr(float(strval)) if cmpval == 'nan': return "(Py_HUGE_VAL * 0)" elif cmpval == 'inf': return "Py_HUGE_VAL" elif cmpval == '-inf': return "(-Py_HUGE_VAL)" else: return strval def generate_evaluation_code(self, code): c_value = self.get_constant_c_result_code() if self.type.is_pyobject: self.result_code = code.get_py_float(self.value, c_value) else: self.result_code = c_value class BytesNode(ConstNode): # A char* or bytes literal # # value BytesLiteral is_string_literal = True # start off as Python 'bytes' to support len() in O(1) type = bytes_type def calculate_constant_result(self): self.constant_result = self.value def as_sliced_node(self, start, stop, step=None): value = StringEncoding.BytesLiteral(self.value[start:stop:step]) value.encoding = self.value.encoding return BytesNode( self.pos, value=value, constant_result=value) def compile_time_value(self, denv): return self.value def analyse_as_type(self, env): type = PyrexTypes.parse_basic_type(self.value) if type is not None: return type from TreeFragment import TreeFragment pos = (self.pos[0], self.pos[1], self.pos[2]-7) declaration = TreeFragment(u"sizeof(%s)" % self.value, name=pos[0].filename, initial_pos=pos) sizeof_node = declaration.root.stats[0].expr sizeof_node = sizeof_node.analyse_types(env) if isinstance(sizeof_node, SizeofTypeNode): return sizeof_node.arg_type def can_coerce_to_char_literal(self): return len(self.value) == 1 def coerce_to_boolean(self, env): # This is special because testing a C char* for truth directly # would yield the wrong result. bool_value = bool(self.value) return BoolNode(self.pos, value=bool_value, constant_result=bool_value) def coerce_to(self, dst_type, env): if self.type == dst_type: return self if dst_type.is_int: if not self.can_coerce_to_char_literal(): error(self.pos, "Only single-character string literals can be coerced into ints.") return self if dst_type.is_unicode_char: error(self.pos, "Bytes literals cannot coerce to Py_UNICODE/Py_UCS4, use a unicode literal instead.") return self return CharNode(self.pos, value=self.value, constant_result=ord(self.value)) node = BytesNode(self.pos, value=self.value, constant_result=self.constant_result) if dst_type.is_pyobject: if dst_type in (py_object_type, Builtin.bytes_type): node.type = Builtin.bytes_type else: self.check_for_coercion_error(dst_type, env, fail=True) return node elif dst_type == PyrexTypes.c_char_ptr_type: node.type = dst_type return node elif dst_type == PyrexTypes.c_uchar_ptr_type: node.type = PyrexTypes.c_char_ptr_type return CastNode(node, PyrexTypes.c_uchar_ptr_type) elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type): node.type = dst_type return node # We still need to perform normal coerce_to processing on the # result, because we might be coercing to an extension type, # in which case a type test node will be needed. return ConstNode.coerce_to(node, dst_type, env) def generate_evaluation_code(self, code): if self.type.is_pyobject: self.result_code = code.get_py_string_const(self.value) else: self.result_code = code.get_string_const(self.value) def get_constant_c_result_code(self): return None # FIXME def calculate_result_code(self): return self.result_code class UnicodeNode(ConstNode): # A Py_UNICODE* or unicode literal # # value EncodedString # bytes_value BytesLiteral the literal parsed as bytes string # ('-3' unicode literals only) is_string_literal = True bytes_value = None type = unicode_type def calculate_constant_result(self): self.constant_result = self.value def as_sliced_node(self, start, stop, step=None): if StringEncoding.string_contains_surrogates(self.value[:stop]): # this is unsafe as it may give different results # in different runtimes return None value = StringEncoding.EncodedString(self.value[start:stop:step]) value.encoding = self.value.encoding if self.bytes_value is not None: bytes_value = StringEncoding.BytesLiteral( self.bytes_value[start:stop:step]) bytes_value.encoding = self.bytes_value.encoding else: bytes_value = None return UnicodeNode( self.pos, value=value, bytes_value=bytes_value, constant_result=value) def coerce_to(self, dst_type, env): if dst_type is self.type: pass elif dst_type.is_unicode_char: if not self.can_coerce_to_char_literal(): error(self.pos, "Only single-character Unicode string literals or " "surrogate pairs can be coerced into Py_UCS4/Py_UNICODE.") return self int_value = ord(self.value) return IntNode(self.pos, type=dst_type, value=str(int_value), constant_result=int_value) elif not dst_type.is_pyobject: if dst_type.is_string and self.bytes_value is not None: # special case: '-3' enforced unicode literal used in a # C char* context return BytesNode(self.pos, value=self.bytes_value ).coerce_to(dst_type, env) if dst_type.is_pyunicode_ptr: node = UnicodeNode(self.pos, value=self.value) node.type = dst_type return node error(self.pos, "Unicode literals do not support coercion to C types other " "than Py_UNICODE/Py_UCS4 (for characters) or Py_UNICODE* " "(for strings).") elif dst_type not in (py_object_type, Builtin.basestring_type): self.check_for_coercion_error(dst_type, env, fail=True) return self def can_coerce_to_char_literal(self): return len(self.value) == 1 ## or (len(self.value) == 2 ## and (0xD800 <= self.value[0] <= 0xDBFF) ## and (0xDC00 <= self.value[1] <= 0xDFFF)) def coerce_to_boolean(self, env): bool_value = bool(self.value) return BoolNode(self.pos, value=bool_value, constant_result=bool_value) def contains_surrogates(self): return StringEncoding.string_contains_surrogates(self.value) def generate_evaluation_code(self, code): if self.type.is_pyobject: if self.contains_surrogates(): # surrogates are not really portable and cannot be # decoded by the UTF-8 codec in Py3.3 self.result_code = code.get_py_const(py_object_type, 'ustring') data_cname = code.get_pyunicode_ptr_const(self.value) code = code.get_cached_constants_writer() code.mark_pos(self.pos) code.putln( "%s = PyUnicode_FromUnicode(%s, (sizeof(%s) / sizeof(Py_UNICODE))-1); %s" % ( self.result_code, data_cname, data_cname, code.error_goto_if_null(self.result_code, self.pos))) code.putln("#if CYTHON_PEP393_ENABLED") code.put_error_if_neg( self.pos, "PyUnicode_READY(%s)" % self.result_code) code.putln("#endif") else: self.result_code = code.get_py_string_const(self.value) else: self.result_code = code.get_pyunicode_ptr_const(self.value) def calculate_result_code(self): return self.result_code def compile_time_value(self, env): return self.value class StringNode(PyConstNode): # A Python str object, i.e. a byte string in Python 2.x and a # unicode string in Python 3.x # # value BytesLiteral (or EncodedString with ASCII content) # unicode_value EncodedString or None # is_identifier boolean type = str_type is_string_literal = True is_identifier = None unicode_value = None def calculate_constant_result(self): if self.unicode_value is not None: # only the Unicode value is portable across Py2/3 self.constant_result = self.unicode_value def as_sliced_node(self, start, stop, step=None): value = type(self.value)(self.value[start:stop:step]) value.encoding = self.value.encoding if self.unicode_value is not None: if StringEncoding.string_contains_surrogates(self.unicode_value[:stop]): # this is unsafe as it may give different results in different runtimes return None unicode_value = StringEncoding.EncodedString( self.unicode_value[start:stop:step]) else: unicode_value = None return StringNode( self.pos, value=value, unicode_value=unicode_value, constant_result=value, is_identifier=self.is_identifier) def coerce_to(self, dst_type, env): if dst_type is not py_object_type and not str_type.subtype_of(dst_type): # if dst_type is Builtin.bytes_type: # # special case: bytes = 'str literal' # return BytesNode(self.pos, value=self.value) if not dst_type.is_pyobject: return BytesNode(self.pos, value=self.value).coerce_to(dst_type, env) if dst_type is not Builtin.basestring_type: self.check_for_coercion_error(dst_type, env, fail=True) return self def can_coerce_to_char_literal(self): return not self.is_identifier and len(self.value) == 1 def generate_evaluation_code(self, code): self.result_code = code.get_py_string_const( self.value, identifier=self.is_identifier, is_str=True, unicode_value=self.unicode_value) def get_constant_c_result_code(self): return None def calculate_result_code(self): return self.result_code def compile_time_value(self, env): return self.value class IdentifierStringNode(StringNode): # A special str value that represents an identifier (bytes in Py2, # unicode in Py3). is_identifier = True class ImagNode(AtomicExprNode): # Imaginary number literal # # value float imaginary part type = PyrexTypes.c_double_complex_type def calculate_constant_result(self): self.constant_result = complex(0.0, self.value) def compile_time_value(self, denv): return complex(0.0, self.value) def analyse_types(self, env): self.type.create_declaration_utility_code(env) return self def may_be_none(self): return False def coerce_to(self, dst_type, env): if self.type is dst_type: return self node = ImagNode(self.pos, value=self.value) if dst_type.is_pyobject: node.is_temp = 1 node.type = PyrexTypes.py_object_type # We still need to perform normal coerce_to processing on the # result, because we might be coercing to an extension type, # in which case a type test node will be needed. return AtomicExprNode.coerce_to(node, dst_type, env) gil_message = "Constructing complex number" def calculate_result_code(self): if self.type.is_pyobject: return self.result() else: return "%s(0, %r)" % (self.type.from_parts, float(self.value)) def generate_result_code(self, code): if self.type.is_pyobject: code.putln( "%s = PyComplex_FromDoubles(0.0, %r); %s" % ( self.result(), float(self.value), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) class NewExprNode(AtomicExprNode): # C++ new statement # # cppclass node c++ class to create type = None def infer_type(self, env): type = self.cppclass.analyse_as_type(env) if type is None or not type.is_cpp_class: error(self.pos, "new operator can only be applied to a C++ class") self.type = error_type return self.cpp_check(env) constructor = type.scope.lookup(u'<init>') if constructor is None: func_type = PyrexTypes.CFuncType(type, [], exception_check='+') type.scope.declare_cfunction(u'<init>', func_type, self.pos) constructor = type.scope.lookup(u'<init>') self.class_type = type self.entry = constructor self.type = constructor.type return self.type def analyse_types(self, env): if self.type is None: self.infer_type(env) return self def may_be_none(self): return False def generate_result_code(self, code): pass def calculate_result_code(self): return "new " + self.class_type.declaration_code("") class NameNode(AtomicExprNode): # Reference to a local or global variable name. # # name string Python name of the variable # entry Entry Symbol table entry # type_entry Entry For extension type names, the original type entry # cf_is_null boolean Is uninitialized before this node # cf_maybe_null boolean Maybe uninitialized before this node # allow_null boolean Don't raise UnboundLocalError # nogil boolean Whether it is used in a nogil context is_name = True is_cython_module = False cython_attribute = None lhs_of_first_assignment = False # TODO: remove me is_used_as_rvalue = 0 entry = None type_entry = None cf_maybe_null = True cf_is_null = False allow_null = False nogil = False inferred_type = None def as_cython_attribute(self): return self.cython_attribute def type_dependencies(self, env): if self.entry is None: self.entry = env.lookup(self.name) if self.entry is not None and self.entry.type.is_unspecified: return (self,) else: return () def infer_type(self, env): if self.entry is None: self.entry = env.lookup(self.name) if self.entry is None or self.entry.type is unspecified_type: if self.inferred_type is not None: return self.inferred_type return py_object_type elif (self.entry.type.is_extension_type or self.entry.type.is_builtin_type) and \ self.name == self.entry.type.name: # Unfortunately the type attribute of type objects # is used for the pointer to the type they represent. return type_type elif self.entry.type.is_cfunction: if self.entry.scope.is_builtin_scope: # special case: optimised builtin functions must be treated as Python objects return py_object_type else: # special case: referring to a C function must return its pointer return PyrexTypes.CPtrType(self.entry.type) else: # If entry is inferred as pyobject it's safe to use local # NameNode's inferred_type. if self.entry.type.is_pyobject and self.inferred_type: # Overflow may happen if integer if not (self.inferred_type.is_int and self.entry.might_overflow): return self.inferred_type return self.entry.type def compile_time_value(self, denv): try: return denv.lookup(self.name) except KeyError: error(self.pos, "Compile-time name '%s' not defined" % self.name) def get_constant_c_result_code(self): if not self.entry or self.entry.type.is_pyobject: return None return self.entry.cname def coerce_to(self, dst_type, env): # If coercing to a generic pyobject and this is a builtin # C function with a Python equivalent, manufacture a NameNode # referring to the Python builtin. #print "NameNode.coerce_to:", self.name, dst_type ### if dst_type is py_object_type: entry = self.entry if entry and entry.is_cfunction: var_entry = entry.as_variable if var_entry: if var_entry.is_builtin and var_entry.is_const: var_entry = env.declare_builtin(var_entry.name, self.pos) node = NameNode(self.pos, name = self.name) node.entry = var_entry node.analyse_rvalue_entry(env) return node return super(NameNode, self).coerce_to(dst_type, env) def analyse_as_module(self, env): # Try to interpret this as a reference to a cimported module. # Returns the module scope, or None. entry = self.entry if not entry: entry = env.lookup(self.name) if entry and entry.as_module: return entry.as_module return None def analyse_as_type(self, env): if self.cython_attribute: type = PyrexTypes.parse_basic_type(self.cython_attribute) else: type = PyrexTypes.parse_basic_type(self.name) if type: return type entry = self.entry if not entry: entry = env.lookup(self.name) if entry and entry.is_type: return entry.type else: return None def analyse_as_extension_type(self, env): # Try to interpret this as a reference to an extension type. # Returns the extension type, or None. entry = self.entry if not entry: entry = env.lookup(self.name) if entry and entry.is_type: if entry.type.is_extension_type or entry.type.is_builtin_type: return entry.type return None def analyse_target_declaration(self, env): if not self.entry: self.entry = env.lookup_here(self.name) if not self.entry: if env.directives['warn.undeclared']: warning(self.pos, "implicit declaration of '%s'" % self.name, 1) if env.directives['infer_types'] != False: type = unspecified_type else: type = py_object_type self.entry = env.declare_var(self.name, type, self.pos) if self.entry.is_declared_generic: self.result_ctype = py_object_type def analyse_types(self, env): self.initialized_check = env.directives['initializedcheck'] if self.entry is None: self.entry = env.lookup(self.name) if not self.entry: self.entry = env.declare_builtin(self.name, self.pos) if not self.entry: self.type = PyrexTypes.error_type return self entry = self.entry if entry: entry.used = 1 if entry.type.is_buffer: import Buffer Buffer.used_buffer_aux_vars(entry) self.analyse_rvalue_entry(env) return self def analyse_target_types(self, env): self.analyse_entry(env, is_target=True) if (not self.is_lvalue() and self.entry.is_cfunction and self.entry.fused_cfunction and self.entry.as_variable): # We need this for the fused 'def' TreeFragment self.entry = self.entry.as_variable self.type = self.entry.type if self.type.is_const: error(self.pos, "Assignment to const '%s'" % self.name) if self.type.is_reference: error(self.pos, "Assignment to reference '%s'" % self.name) if not self.is_lvalue(): error(self.pos, "Assignment to non-lvalue '%s'" % self.name) self.type = PyrexTypes.error_type self.entry.used = 1 if self.entry.type.is_buffer: import Buffer Buffer.used_buffer_aux_vars(self.entry) return self def analyse_rvalue_entry(self, env): #print "NameNode.analyse_rvalue_entry:", self.name ### #print "Entry:", self.entry.__dict__ ### self.analyse_entry(env) entry = self.entry if entry.is_declared_generic: self.result_ctype = py_object_type if entry.is_pyglobal or entry.is_builtin: if entry.is_builtin and entry.is_const: self.is_temp = 0 else: self.is_temp = 1 self.is_used_as_rvalue = 1 elif entry.type.is_memoryviewslice: self.is_temp = False self.is_used_as_rvalue = True self.use_managed_ref = True return self def nogil_check(self, env): self.nogil = True if self.is_used_as_rvalue: entry = self.entry if entry.is_builtin: if not entry.is_const: # cached builtins are ok self.gil_error() elif entry.is_pyglobal: self.gil_error() elif self.entry.type.is_memoryviewslice: if self.cf_is_null or self.cf_maybe_null: import MemoryView MemoryView.err_if_nogil_initialized_check(self.pos, env) gil_message = "Accessing Python global or builtin" def analyse_entry(self, env, is_target=False): #print "NameNode.analyse_entry:", self.name ### self.check_identifier_kind() entry = self.entry type = entry.type if (not is_target and type.is_pyobject and self.inferred_type and self.inferred_type.is_builtin_type): # assume that type inference is smarter than the static entry type = self.inferred_type self.type = type def check_identifier_kind(self): # Check that this is an appropriate kind of name for use in an # expression. Also finds the variable entry associated with # an extension type. entry = self.entry if entry.is_type and entry.type.is_extension_type: self.type_entry = entry if not (entry.is_const or entry.is_variable or entry.is_builtin or entry.is_cfunction or entry.is_cpp_class): if self.entry.as_variable: self.entry = self.entry.as_variable else: error(self.pos, "'%s' is not a constant, variable or function identifier" % self.name) def is_simple(self): # If it's not a C variable, it'll be in a temp. return 1 def may_be_none(self): if self.cf_state and self.type and (self.type.is_pyobject or self.type.is_memoryviewslice): # gard against infinite recursion on self-dependencies if getattr(self, '_none_checking', False): # self-dependency - either this node receives a None # value from *another* node, or it can not reference # None at this point => safe to assume "not None" return False self._none_checking = True # evaluate control flow state to see if there were any # potential None values assigned to the node so far may_be_none = False for assignment in self.cf_state: if assignment.rhs.may_be_none(): may_be_none = True break del self._none_checking return may_be_none return super(NameNode, self).may_be_none() def nonlocally_immutable(self): if ExprNode.nonlocally_immutable(self): return True entry = self.entry if not entry or entry.in_closure: return False return entry.is_local or entry.is_arg or entry.is_builtin or entry.is_readonly def calculate_target_results(self, env): pass def check_const(self): entry = self.entry if entry is not None and not (entry.is_const or entry.is_cfunction or entry.is_builtin): self.not_const() return False return True def check_const_addr(self): entry = self.entry if not (entry.is_cglobal or entry.is_cfunction or entry.is_builtin): self.addr_not_const() return False return True def is_lvalue(self): return self.entry.is_variable and \ not self.entry.type.is_array and \ not self.entry.is_readonly def is_addressable(self): return self.entry.is_variable and not self.type.is_memoryviewslice def is_ephemeral(self): # Name nodes are never ephemeral, even if the # result is in a temporary. return 0 def calculate_result_code(self): entry = self.entry if not entry: return "<error>" # There was an error earlier return entry.cname def generate_result_code(self, code): assert hasattr(self, 'entry') entry = self.entry if entry is None: return # There was an error earlier if entry.is_builtin and entry.is_const: return # Lookup already cached elif entry.is_pyclass_attr: assert entry.type.is_pyobject, "Python global or builtin not a Python object" interned_cname = code.intern_identifier(self.entry.name) if entry.is_builtin: namespace = Naming.builtins_cname else: # entry.is_pyglobal namespace = entry.scope.namespace_cname if not self.cf_is_null: code.putln( '%s = PyObject_GetItem(%s, %s);' % ( self.result(), namespace, interned_cname)) code.putln('if (unlikely(!%s)) {' % self.result()) code.putln('PyErr_Clear();') code.globalstate.use_utility_code( UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c")) code.putln( '%s = __Pyx_GetModuleGlobalName(%s);' % ( self.result(), interned_cname)) if not self.cf_is_null: code.putln("}") code.putln(code.error_goto_if_null(self.result(), self.pos)) code.put_gotref(self.py_result()) elif entry.is_builtin: assert entry.type.is_pyobject, "Python global or builtin not a Python object" interned_cname = code.intern_identifier(self.entry.name) code.globalstate.use_utility_code( UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c")) code.putln( '%s = __Pyx_GetBuiltinName(%s); %s' % ( self.result(), interned_cname, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) elif entry.is_pyglobal: assert entry.type.is_pyobject, "Python global or builtin not a Python object" interned_cname = code.intern_identifier(self.entry.name) if entry.scope.is_module_scope: code.globalstate.use_utility_code( UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c")) code.putln( '%s = __Pyx_GetModuleGlobalName(%s); %s' % ( self.result(), interned_cname, code.error_goto_if_null(self.result(), self.pos))) else: # FIXME: is_pyglobal is also used for class namespace code.globalstate.use_utility_code( UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c")) code.putln( '%s = __Pyx_GetNameInClass(%s, %s); %s' % ( self.result(), entry.scope.namespace_cname, interned_cname, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) elif entry.is_local or entry.in_closure or entry.from_closure or entry.type.is_memoryviewslice: # Raise UnboundLocalError for objects and memoryviewslices raise_unbound = ( (self.cf_maybe_null or self.cf_is_null) and not self.allow_null) null_code = entry.type.check_for_null_code(entry.cname) memslice_check = entry.type.is_memoryviewslice and self.initialized_check if null_code and raise_unbound and (entry.type.is_pyobject or memslice_check): code.put_error_if_unbound(self.pos, entry, self.in_nogil_context) def generate_assignment_code(self, rhs, code): #print "NameNode.generate_assignment_code:", self.name ### entry = self.entry if entry is None: return # There was an error earlier if (self.entry.type.is_ptr and isinstance(rhs, ListNode) and not self.lhs_of_first_assignment and not rhs.in_module_scope): error(self.pos, "Literal list must be assigned to pointer at time of declaration") # is_pyglobal seems to be True for module level-globals only. # We use this to access class->tp_dict if necessary. if entry.is_pyglobal: assert entry.type.is_pyobject, "Python global or builtin not a Python object" interned_cname = code.intern_identifier(self.entry.name) namespace = self.entry.scope.namespace_cname if entry.is_member: # if the entry is a member we have to cheat: SetAttr does not work # on types, so we create a descriptor which is then added to tp_dict setter = 'PyDict_SetItem' namespace = '%s->tp_dict' % namespace elif entry.scope.is_module_scope: setter = 'PyDict_SetItem' namespace = Naming.moddict_cname elif entry.is_pyclass_attr: setter = 'PyObject_SetItem' else: assert False, repr(entry) code.put_error_if_neg( self.pos, '%s(%s, %s, %s)' % ( setter, namespace, interned_cname, rhs.py_result())) if debug_disposal_code: print("NameNode.generate_assignment_code:") print("...generating disposal code for %s" % rhs) rhs.generate_disposal_code(code) rhs.free_temps(code) if entry.is_member: # in Py2.6+, we need to invalidate the method cache code.putln("PyType_Modified(%s);" % entry.scope.parent_type.typeptr_cname) else: if self.type.is_memoryviewslice: self.generate_acquire_memoryviewslice(rhs, code) elif self.type.is_buffer: # Generate code for doing the buffer release/acquisition. # This might raise an exception in which case the assignment (done # below) will not happen. # # The reason this is not in a typetest-like node is because the # variables that the acquired buffer info is stored to is allocated # per entry and coupled with it. self.generate_acquire_buffer(rhs, code) assigned = False if self.type.is_pyobject: #print "NameNode.generate_assignment_code: to", self.name ### #print "...from", rhs ### #print "...LHS type", self.type, "ctype", self.ctype() ### #print "...RHS type", rhs.type, "ctype", rhs.ctype() ### if self.use_managed_ref: rhs.make_owned_reference(code) is_external_ref = entry.is_cglobal or self.entry.in_closure or self.entry.from_closure if is_external_ref: if not self.cf_is_null: if self.cf_maybe_null: code.put_xgotref(self.py_result()) else: code.put_gotref(self.py_result()) assigned = True if entry.is_cglobal: code.put_decref_set( self.result(), rhs.result_as(self.ctype())) else: if not self.cf_is_null: if self.cf_maybe_null: code.put_xdecref_set( self.result(), rhs.result_as(self.ctype())) else: code.put_decref_set( self.result(), rhs.result_as(self.ctype())) else: assigned = False if is_external_ref: code.put_giveref(rhs.py_result()) if not self.type.is_memoryviewslice: if not assigned: code.putln('%s = %s;' % ( self.result(), rhs.result_as(self.ctype()))) if debug_disposal_code: print("NameNode.generate_assignment_code:") print("...generating post-assignment code for %s" % rhs) rhs.generate_post_assignment_code(code) elif rhs.result_in_temp(): rhs.generate_post_assignment_code(code) rhs.free_temps(code) def generate_acquire_memoryviewslice(self, rhs, code): """ Slices, coercions from objects, return values etc are new references. We have a borrowed reference in case of dst = src """ import MemoryView MemoryView.put_acquire_memoryviewslice( lhs_cname=self.result(), lhs_type=self.type, lhs_pos=self.pos, rhs=rhs, code=code, have_gil=not self.in_nogil_context, first_assignment=self.cf_is_null) def generate_acquire_buffer(self, rhs, code): # rhstmp is only used in case the rhs is a complicated expression leading to # the object, to avoid repeating the same C expression for every reference # to the rhs. It does NOT hold a reference. pretty_rhs = isinstance(rhs, NameNode) or rhs.is_temp if pretty_rhs: rhstmp = rhs.result_as(self.ctype()) else: rhstmp = code.funcstate.allocate_temp(self.entry.type, manage_ref=False) code.putln('%s = %s;' % (rhstmp, rhs.result_as(self.ctype()))) import Buffer Buffer.put_assign_to_buffer(self.result(), rhstmp, self.entry, is_initialized=not self.lhs_of_first_assignment, pos=self.pos, code=code) if not pretty_rhs: code.putln("%s = 0;" % rhstmp) code.funcstate.release_temp(rhstmp) def generate_deletion_code(self, code, ignore_nonexisting=False): if self.entry is None: return # There was an error earlier elif self.entry.is_pyclass_attr: namespace = self.entry.scope.namespace_cname interned_cname = code.intern_identifier(self.entry.name) if ignore_nonexisting: key_error_code = 'PyErr_Clear(); else' else: # minor hack: fake a NameError on KeyError key_error_code = ( '{ PyErr_Clear(); PyErr_Format(PyExc_NameError, "name \'%%s\' is not defined", "%s"); }' % self.entry.name) code.putln( 'if (unlikely(PyObject_DelItem(%s, %s) < 0)) {' ' if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) %s' ' %s ' '}' % (namespace, interned_cname, key_error_code, code.error_goto(self.pos))) elif self.entry.is_pyglobal: code.globalstate.use_utility_code( UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c")) interned_cname = code.intern_identifier(self.entry.name) del_code = '__Pyx_PyObject_DelAttrStr(%s, %s)' % ( Naming.module_cname, interned_cname) if ignore_nonexisting: code.putln('if (unlikely(%s < 0)) { if (likely(PyErr_ExceptionMatches(PyExc_AttributeError))) PyErr_Clear(); else %s }' % ( del_code, code.error_goto(self.pos))) else: code.put_error_if_neg(self.pos, del_code) elif self.entry.type.is_pyobject or self.entry.type.is_memoryviewslice: if not self.cf_is_null: if self.cf_maybe_null and not ignore_nonexisting: code.put_error_if_unbound(self.pos, self.entry) if self.entry.type.is_pyobject: if self.entry.in_closure: # generator if ignore_nonexisting and self.cf_maybe_null: code.put_xgotref(self.result()) else: code.put_gotref(self.result()) if ignore_nonexisting and self.cf_maybe_null: code.put_xdecref(self.result(), self.ctype()) else: code.put_decref(self.result(), self.ctype()) code.putln('%s = NULL;' % self.result()) else: code.put_xdecref_memoryviewslice(self.entry.cname, have_gil=not self.nogil) else: error(self.pos, "Deletion of C names not supported") def annotate(self, code): if hasattr(self, 'is_called') and self.is_called: pos = (self.pos[0], self.pos[1], self.pos[2] - len(self.name) - 1) if self.type.is_pyobject: style, text = 'py_call', 'python function (%s)' else: style, text = 'c_call', 'c function (%s)' code.annotate(pos, AnnotationItem(style, text % self.type, size=len(self.name))) class BackquoteNode(ExprNode): # `expr` # # arg ExprNode type = py_object_type subexprs = ['arg'] def analyse_types(self, env): self.arg = self.arg.analyse_types(env) self.arg = self.arg.coerce_to_pyobject(env) self.is_temp = 1 return self gil_message = "Backquote expression" def calculate_constant_result(self): self.constant_result = repr(self.arg.constant_result) def generate_result_code(self, code): code.putln( "%s = PyObject_Repr(%s); %s" % ( self.result(), self.arg.py_result(), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) class ImportNode(ExprNode): # Used as part of import statement implementation. # Implements result = # __import__(module_name, globals(), None, name_list, level) # # module_name StringNode dotted name of module. Empty module # name means importing the parent package according # to level # name_list ListNode or None list of names to be imported # level int relative import level: # -1: attempt both relative import and absolute import; # 0: absolute import; # >0: the number of parent directories to search # relative to the current module. # None: decide the level according to language level and # directives type = py_object_type subexprs = ['module_name', 'name_list'] def analyse_types(self, env): if self.level is None: if (env.directives['py2_import'] or Future.absolute_import not in env.global_scope().context.future_directives): self.level = -1 else: self.level = 0 module_name = self.module_name.analyse_types(env) self.module_name = module_name.coerce_to_pyobject(env) if self.name_list: name_list = self.name_list.analyse_types(env) self.name_list = name_list.coerce_to_pyobject(env) self.is_temp = 1 env.use_utility_code(UtilityCode.load_cached("Import", "ImportExport.c")) return self gil_message = "Python import" def generate_result_code(self, code): if self.name_list: name_list_code = self.name_list.py_result() else: name_list_code = "0" code.putln( "%s = __Pyx_Import(%s, %s, %d); %s" % ( self.result(), self.module_name.py_result(), name_list_code, self.level, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) class IteratorNode(ExprNode): # Used as part of for statement implementation. # # Implements result = iter(sequence) # # sequence ExprNode type = py_object_type iter_func_ptr = None counter_cname = None cpp_iterator_cname = None reversed = False # currently only used for list/tuple types (see Optimize.py) subexprs = ['sequence'] def analyse_types(self, env): self.sequence = self.sequence.analyse_types(env) if (self.sequence.type.is_array or self.sequence.type.is_ptr) and \ not self.sequence.type.is_string: # C array iteration will be transformed later on self.type = self.sequence.type elif self.sequence.type.is_cpp_class: self.analyse_cpp_types(env) else: self.sequence = self.sequence.coerce_to_pyobject(env) if self.sequence.type is list_type or \ self.sequence.type is tuple_type: self.sequence = self.sequence.as_none_safe_node("'NoneType' object is not iterable") self.is_temp = 1 return self gil_message = "Iterating over Python object" _func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType( PyrexTypes.py_object_type, [ PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None), ])) def type_dependencies(self, env): return self.sequence.type_dependencies(env) def infer_type(self, env): sequence_type = self.sequence.infer_type(env) if sequence_type.is_array or sequence_type.is_ptr: return sequence_type elif sequence_type.is_cpp_class: begin = sequence_type.scope.lookup("begin") if begin is not None: return begin.type.return_type elif sequence_type.is_pyobject: return sequence_type return py_object_type def analyse_cpp_types(self, env): sequence_type = self.sequence.type if sequence_type.is_ptr: sequence_type = sequence_type.base_type begin = sequence_type.scope.lookup("begin") end = sequence_type.scope.lookup("end") if (begin is None or not begin.type.is_cfunction or begin.type.args): error(self.pos, "missing begin() on %s" % self.sequence.type) self.type = error_type return if (end is None or not end.type.is_cfunction or end.type.args): error(self.pos, "missing end() on %s" % self.sequence.type) self.type = error_type return iter_type = begin.type.return_type if iter_type.is_cpp_class: if env.lookup_operator_for_types( self.pos, "!=", [iter_type, end.type.return_type]) is None: error(self.pos, "missing operator!= on result of begin() on %s" % self.sequence.type) self.type = error_type return if env.lookup_operator_for_types(self.pos, '++', [iter_type]) is None: error(self.pos, "missing operator++ on result of begin() on %s" % self.sequence.type) self.type = error_type return if env.lookup_operator_for_types(self.pos, '*', [iter_type]) is None: error(self.pos, "missing operator* on result of begin() on %s" % self.sequence.type) self.type = error_type return self.type = iter_type elif iter_type.is_ptr: if not (iter_type == end.type.return_type): error(self.pos, "incompatible types for begin() and end()") self.type = iter_type else: error(self.pos, "result type of begin() on %s must be a C++ class or pointer" % self.sequence.type) self.type = error_type return def generate_result_code(self, code): sequence_type = self.sequence.type if sequence_type.is_cpp_class: if self.sequence.is_name: # safe: C++ won't allow you to reassign to class references begin_func = "%s.begin" % self.sequence.result() else: sequence_type = PyrexTypes.c_ptr_type(sequence_type) self.cpp_iterator_cname = code.funcstate.allocate_temp(sequence_type, manage_ref=False) code.putln("%s = &%s;" % (self.cpp_iterator_cname, self.sequence.result())) begin_func = "%s->begin" % self.cpp_iterator_cname # TODO: Limit scope. code.putln("%s = %s();" % (self.result(), begin_func)) return if sequence_type.is_array or sequence_type.is_ptr: raise InternalError("for in carray slice not transformed") is_builtin_sequence = sequence_type is list_type or \ sequence_type is tuple_type if not is_builtin_sequence: # reversed() not currently optimised (see Optimize.py) assert not self.reversed, "internal error: reversed() only implemented for list/tuple objects" self.may_be_a_sequence = not sequence_type.is_builtin_type if self.may_be_a_sequence: code.putln( "if (PyList_CheckExact(%s) || PyTuple_CheckExact(%s)) {" % ( self.sequence.py_result(), self.sequence.py_result())) if is_builtin_sequence or self.may_be_a_sequence: self.counter_cname = code.funcstate.allocate_temp( PyrexTypes.c_py_ssize_t_type, manage_ref=False) if self.reversed: if sequence_type is list_type: init_value = 'PyList_GET_SIZE(%s) - 1' % self.result() else: init_value = 'PyTuple_GET_SIZE(%s) - 1' % self.result() else: init_value = '0' code.putln( "%s = %s; __Pyx_INCREF(%s); %s = %s;" % ( self.result(), self.sequence.py_result(), self.result(), self.counter_cname, init_value )) if not is_builtin_sequence: self.iter_func_ptr = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False) if self.may_be_a_sequence: code.putln("%s = NULL;" % self.iter_func_ptr) code.putln("} else {") code.put("%s = -1; " % self.counter_cname) code.putln("%s = PyObject_GetIter(%s); %s" % ( self.result(), self.sequence.py_result(), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) code.putln("%s = Py_TYPE(%s)->tp_iternext;" % (self.iter_func_ptr, self.py_result())) if self.may_be_a_sequence: code.putln("}") def generate_next_sequence_item(self, test_name, result_name, code): assert self.counter_cname, "internal error: counter_cname temp not prepared" final_size = 'Py%s_GET_SIZE(%s)' % (test_name, self.py_result()) if self.sequence.is_sequence_constructor: item_count = len(self.sequence.args) if self.sequence.mult_factor is None: final_size = item_count elif isinstance(self.sequence.mult_factor.constant_result, (int, long)): final_size = item_count * self.sequence.mult_factor.constant_result code.putln("if (%s >= %s) break;" % (self.counter_cname, final_size)) if self.reversed: inc_dec = '--' else: inc_dec = '++' code.putln("#if CYTHON_COMPILING_IN_CPYTHON") code.putln( "%s = Py%s_GET_ITEM(%s, %s); __Pyx_INCREF(%s); %s%s; %s" % ( result_name, test_name, self.py_result(), self.counter_cname, result_name, self.counter_cname, inc_dec, # use the error label to avoid C compiler warnings if we only use it below code.error_goto_if_neg('0', self.pos) )) code.putln("#else") code.putln( "%s = PySequence_ITEM(%s, %s); %s%s; %s" % ( result_name, self.py_result(), self.counter_cname, self.counter_cname, inc_dec, code.error_goto_if_null(result_name, self.pos))) code.putln("#endif") def generate_iter_next_result_code(self, result_name, code): sequence_type = self.sequence.type if self.reversed: code.putln("if (%s < 0) break;" % self.counter_cname) if sequence_type.is_cpp_class: if self.cpp_iterator_cname: end_func = "%s->end" % self.cpp_iterator_cname else: end_func = "%s.end" % self.sequence.result() # TODO: Cache end() call? code.putln("if (!(%s != %s())) break;" % ( self.result(), end_func)) code.putln("%s = *%s;" % ( result_name, self.result())) code.putln("++%s;" % self.result()) return elif sequence_type is list_type: self.generate_next_sequence_item('List', result_name, code) return elif sequence_type is tuple_type: self.generate_next_sequence_item('Tuple', result_name, code) return if self.may_be_a_sequence: for test_name in ('List', 'Tuple'): code.putln("if (!%s && Py%s_CheckExact(%s)) {" % ( self.iter_func_ptr, test_name, self.py_result())) self.generate_next_sequence_item(test_name, result_name, code) code.put("} else ") code.putln("{") code.putln( "%s = %s(%s);" % ( result_name, self.iter_func_ptr, self.py_result())) code.putln("if (unlikely(!%s)) {" % result_name) code.putln("PyObject* exc_type = PyErr_Occurred();") code.putln("if (exc_type) {") code.putln("if (likely(exc_type == PyExc_StopIteration ||" " PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();") code.putln("else %s" % code.error_goto(self.pos)) code.putln("}") code.putln("break;") code.putln("}") code.put_gotref(result_name) code.putln("}") def free_temps(self, code): if self.counter_cname: code.funcstate.release_temp(self.counter_cname) if self.iter_func_ptr: code.funcstate.release_temp(self.iter_func_ptr) self.iter_func_ptr = None if self.cpp_iterator_cname: code.funcstate.release_temp(self.cpp_iterator_cname) ExprNode.free_temps(self, code) class NextNode(AtomicExprNode): # Used as part of for statement implementation. # Implements result = iterator.next() # Created during analyse_types phase. # The iterator is not owned by this node. # # iterator IteratorNode def __init__(self, iterator): AtomicExprNode.__init__(self, iterator.pos) self.iterator = iterator def type_dependencies(self, env): return self.iterator.type_dependencies(env) def infer_type(self, env, iterator_type = None): if iterator_type is None: iterator_type = self.iterator.infer_type(env) if iterator_type.is_ptr or iterator_type.is_array: return iterator_type.base_type elif iterator_type.is_cpp_class: item_type = env.lookup_operator_for_types(self.pos, "*", [iterator_type]).type.return_type if item_type.is_reference: item_type = item_type.ref_base_type if item_type.is_const: item_type = item_type.const_base_type return item_type else: # Avoid duplication of complicated logic. fake_index_node = IndexNode( self.pos, base=self.iterator.sequence, index=IntNode(self.pos, value='PY_SSIZE_T_MAX', type=PyrexTypes.c_py_ssize_t_type)) return fake_index_node.infer_type(env) def analyse_types(self, env): self.type = self.infer_type(env, self.iterator.type) self.is_temp = 1 return self def generate_result_code(self, code): self.iterator.generate_iter_next_result_code(self.result(), code) class WithExitCallNode(ExprNode): # The __exit__() call of a 'with' statement. Used in both the # except and finally clauses. # with_stat WithStatNode the surrounding 'with' statement # args TupleNode or ResultStatNode the exception info tuple subexprs = ['args'] test_if_run = True def analyse_types(self, env): self.args = self.args.analyse_types(env) self.type = PyrexTypes.c_bint_type self.is_temp = True return self def generate_evaluation_code(self, code): if self.test_if_run: # call only if it was not already called (and decref-cleared) code.putln("if (%s) {" % self.with_stat.exit_var) self.args.generate_evaluation_code(code) result_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False) code.mark_pos(self.pos) code.globalstate.use_utility_code(UtilityCode.load_cached( "PyObjectCall", "ObjectHandling.c")) code.putln("%s = __Pyx_PyObject_Call(%s, %s, NULL);" % ( result_var, self.with_stat.exit_var, self.args.result())) code.put_decref_clear(self.with_stat.exit_var, type=py_object_type) self.args.generate_disposal_code(code) self.args.free_temps(code) code.putln(code.error_goto_if_null(result_var, self.pos)) code.put_gotref(result_var) if self.result_is_used: self.allocate_temp_result(code) code.putln("%s = __Pyx_PyObject_IsTrue(%s);" % (self.result(), result_var)) code.put_decref_clear(result_var, type=py_object_type) if self.result_is_used: code.put_error_if_neg(self.pos, self.result()) code.funcstate.release_temp(result_var) if self.test_if_run: code.putln("}") class ExcValueNode(AtomicExprNode): # Node created during analyse_types phase # of an ExceptClauseNode to fetch the current # exception value. type = py_object_type def __init__(self, pos): ExprNode.__init__(self, pos) def set_var(self, var): self.var = var def calculate_result_code(self): return self.var def generate_result_code(self, code): pass def analyse_types(self, env): return self class TempNode(ExprNode): # Node created during analyse_types phase # of some nodes to hold a temporary value. # # Note: One must call "allocate" and "release" on # the node during code generation to get/release the temp. # This is because the temp result is often used outside of # the regular cycle. subexprs = [] def __init__(self, pos, type, env=None): ExprNode.__init__(self, pos) self.type = type if type.is_pyobject: self.result_ctype = py_object_type self.is_temp = 1 def analyse_types(self, env): return self def analyse_target_declaration(self, env): pass def generate_result_code(self, code): pass def allocate(self, code): self.temp_cname = code.funcstate.allocate_temp(self.type, manage_ref=True) def release(self, code): code.funcstate.release_temp(self.temp_cname) self.temp_cname = None def result(self): try: return self.temp_cname except: assert False, "Remember to call allocate/release on TempNode" raise # Do not participate in normal temp alloc/dealloc: def allocate_temp_result(self, code): pass def release_temp_result(self, code): pass class PyTempNode(TempNode): # TempNode holding a Python value. def __init__(self, pos, env): TempNode.__init__(self, pos, PyrexTypes.py_object_type, env) class RawCNameExprNode(ExprNode): subexprs = [] def __init__(self, pos, type=None, cname=None): ExprNode.__init__(self, pos, type=type) if cname is not None: self.cname = cname def analyse_types(self, env): return self def set_cname(self, cname): self.cname = cname def result(self): return self.cname def generate_result_code(self, code): pass #------------------------------------------------------------------- # # Parallel nodes (cython.parallel.thread(savailable|id)) # #------------------------------------------------------------------- class ParallelThreadsAvailableNode(AtomicExprNode): """ Note: this is disabled and not a valid directive at this moment Implements cython.parallel.threadsavailable(). If we are called from the sequential part of the application, we need to call omp_get_max_threads(), and in the parallel part we can just call omp_get_num_threads() """ type = PyrexTypes.c_int_type def analyse_types(self, env): self.is_temp = True # env.add_include_file("omp.h") return self def generate_result_code(self, code): code.putln("#ifdef _OPENMP") code.putln("if (omp_in_parallel()) %s = omp_get_max_threads();" % self.temp_code) code.putln("else %s = omp_get_num_threads();" % self.temp_code) code.putln("#else") code.putln("%s = 1;" % self.temp_code) code.putln("#endif") def result(self): return self.temp_code class ParallelThreadIdNode(AtomicExprNode): #, Nodes.ParallelNode): """ Implements cython.parallel.threadid() """ type = PyrexTypes.c_int_type def analyse_types(self, env): self.is_temp = True # env.add_include_file("omp.h") return self def generate_result_code(self, code): code.putln("#ifdef _OPENMP") code.putln("%s = omp_get_thread_num();" % self.temp_code) code.putln("#else") code.putln("%s = 0;" % self.temp_code) code.putln("#endif") def result(self): return self.temp_code #------------------------------------------------------------------- # # Trailer nodes # #------------------------------------------------------------------- class IndexNode(ExprNode): # Sequence indexing. # # base ExprNode # index ExprNode # indices [ExprNode] # type_indices [PyrexType] # is_buffer_access boolean Whether this is a buffer access. # # indices is used on buffer access, index on non-buffer access. # The former contains a clean list of index parameters, the # latter whatever Python object is needed for index access. # # is_fused_index boolean Whether the index is used to specialize a # c(p)def function subexprs = ['base', 'index', 'indices'] indices = None type_indices = None is_subscript = True is_fused_index = False # Whether we're assigning to a buffer (in that case it needs to be # writable) writable_needed = False # Whether we are indexing or slicing a memoryviewslice memslice_index = False memslice_slice = False is_memslice_copy = False memslice_ellipsis_noop = False warned_untyped_idx = False # set by SingleAssignmentNode after analyse_types() is_memslice_scalar_assignment = False def __init__(self, pos, index, **kw): ExprNode.__init__(self, pos, index=index, **kw) self._index = index def calculate_constant_result(self): self.constant_result = \ self.base.constant_result[self.index.constant_result] def compile_time_value(self, denv): base = self.base.compile_time_value(denv) index = self.index.compile_time_value(denv) try: return base[index] except Exception, e: self.compile_time_value_error(e) def is_ephemeral(self): return self.base.is_ephemeral() def is_simple(self): if self.is_buffer_access or self.memslice_index: return False elif self.memslice_slice: return True base = self.base return (base.is_simple() and self.index.is_simple() and base.type and (base.type.is_ptr or base.type.is_array)) def may_be_none(self): base_type = self.base.type if base_type: if base_type.is_string: return False if isinstance(self.index, SliceNode): # slicing! if base_type in (bytes_type, str_type, unicode_type, basestring_type, list_type, tuple_type): return False return ExprNode.may_be_none(self) def analyse_target_declaration(self, env): pass def analyse_as_type(self, env): base_type = self.base.analyse_as_type(env) if base_type and not base_type.is_pyobject: if base_type.is_cpp_class: if isinstance(self.index, TupleNode): template_values = self.index.args else: template_values = [self.index] import Nodes type_node = Nodes.TemplatedTypeNode( pos = self.pos, positional_args = template_values, keyword_args = None) return type_node.analyse(env, base_type = base_type) else: index = self.index.compile_time_value(env) if index is not None: return PyrexTypes.CArrayType(base_type, int(index)) error(self.pos, "Array size must be a compile time constant") return None def type_dependencies(self, env): return self.base.type_dependencies(env) + self.index.type_dependencies(env) def infer_type(self, env): base_type = self.base.infer_type(env) if isinstance(self.index, SliceNode): # slicing! if base_type.is_string: # sliced C strings must coerce to Python return bytes_type elif base_type.is_pyunicode_ptr: # sliced Py_UNICODE* strings must coerce to Python return unicode_type elif base_type in (unicode_type, bytes_type, str_type, bytearray_type, list_type, tuple_type): # slicing these returns the same type return base_type else: # TODO: Handle buffers (hopefully without too much redundancy). return py_object_type index_type = self.index.infer_type(env) if index_type and index_type.is_int or isinstance(self.index, IntNode): # indexing! if base_type is unicode_type: # Py_UCS4 will automatically coerce to a unicode string # if required, so this is safe. We only infer Py_UCS4 # when the index is a C integer type. Otherwise, we may # need to use normal Python item access, in which case # it's faster to return the one-char unicode string than # to receive it, throw it away, and potentially rebuild it # on a subsequent PyObject coercion. return PyrexTypes.c_py_ucs4_type elif base_type is str_type: # always returns str - Py2: bytes, Py3: unicode return base_type elif base_type is bytearray_type: return PyrexTypes.c_uchar_type elif isinstance(self.base, BytesNode): #if env.global_scope().context.language_level >= 3: # # inferring 'char' can be made to work in Python 3 mode # return PyrexTypes.c_char_type # Py2/3 return different types on indexing bytes objects return py_object_type elif base_type in (tuple_type, list_type): # if base is a literal, take a look at its values item_type = infer_sequence_item_type( env, self.base, self.index, seq_type=base_type) if item_type is not None: return item_type elif base_type.is_ptr or base_type.is_array: return base_type.base_type if base_type.is_cpp_class: class FakeOperand: def __init__(self, **kwds): self.__dict__.update(kwds) operands = [ FakeOperand(pos=self.pos, type=base_type), FakeOperand(pos=self.pos, type=index_type), ] index_func = env.lookup_operator('[]', operands) if index_func is not None: return index_func.type.return_type # may be slicing or indexing, we don't know if base_type in (unicode_type, str_type): # these types always returns their own type on Python indexing/slicing return base_type else: # TODO: Handle buffers (hopefully without too much redundancy). return py_object_type def analyse_types(self, env): return self.analyse_base_and_index_types(env, getting=True) def analyse_target_types(self, env): node = self.analyse_base_and_index_types(env, setting=True) if node.type.is_const: error(self.pos, "Assignment to const dereference") if not node.is_lvalue(): error(self.pos, "Assignment to non-lvalue of type '%s'" % node.type) return node def analyse_base_and_index_types(self, env, getting=False, setting=False, analyse_base=True): # Note: This might be cleaned up by having IndexNode # parsed in a saner way and only construct the tuple if # needed. # Note that this function must leave IndexNode in a cloneable state. # For buffers, self.index is packed out on the initial analysis, and # when cloning self.indices is copied. self.is_buffer_access = False # a[...] = b self.is_memslice_copy = False # incomplete indexing, Ellipsis indexing or slicing self.memslice_slice = False # integer indexing self.memslice_index = False if analyse_base: self.base = self.base.analyse_types(env) if self.base.type.is_error: # Do not visit child tree if base is undeclared to avoid confusing # error messages self.type = PyrexTypes.error_type return self is_slice = isinstance(self.index, SliceNode) if not env.directives['wraparound']: if is_slice: check_negative_indices(self.index.start, self.index.stop) else: check_negative_indices(self.index) # Potentially overflowing index value. if not is_slice and isinstance(self.index, IntNode) and Utils.long_literal(self.index.value): self.index = self.index.coerce_to_pyobject(env) is_memslice = self.base.type.is_memoryviewslice # Handle the case where base is a literal char* (and we expect a string, not an int) if not is_memslice and (isinstance(self.base, BytesNode) or is_slice): if self.base.type.is_string or not (self.base.type.is_ptr or self.base.type.is_array): self.base = self.base.coerce_to_pyobject(env) skip_child_analysis = False buffer_access = False if self.indices: indices = self.indices elif isinstance(self.index, TupleNode): indices = self.index.args else: indices = [self.index] if (is_memslice and not self.indices and isinstance(self.index, EllipsisNode)): # Memoryviewslice copying self.is_memslice_copy = True elif is_memslice: # memoryviewslice indexing or slicing import MemoryView skip_child_analysis = True newaxes = [newaxis for newaxis in indices if newaxis.is_none] have_slices, indices = MemoryView.unellipsify(indices, newaxes, self.base.type.ndim) self.memslice_index = (not newaxes and len(indices) == self.base.type.ndim) axes = [] index_type = PyrexTypes.c_py_ssize_t_type new_indices = [] if len(indices) - len(newaxes) > self.base.type.ndim: self.type = error_type error(indices[self.base.type.ndim].pos, "Too many indices specified for type %s" % self.base.type) return self axis_idx = 0 for i, index in enumerate(indices[:]): index = index.analyse_types(env) if not index.is_none: access, packing = self.base.type.axes[axis_idx] axis_idx += 1 if isinstance(index, SliceNode): self.memslice_slice = True if index.step.is_none: axes.append((access, packing)) else: axes.append((access, 'strided')) # Coerce start, stop and step to temps of the right type for attr in ('start', 'stop', 'step'): value = getattr(index, attr) if not value.is_none: value = value.coerce_to(index_type, env) #value = value.coerce_to_temp(env) setattr(index, attr, value) new_indices.append(value) elif index.is_none: self.memslice_slice = True new_indices.append(index) axes.append(('direct', 'strided')) elif index.type.is_int or index.type.is_pyobject: if index.type.is_pyobject and not self.warned_untyped_idx: warning(index.pos, "Index should be typed for more " "efficient access", level=2) IndexNode.warned_untyped_idx = True self.memslice_index = True index = index.coerce_to(index_type, env) indices[i] = index new_indices.append(index) else: self.type = error_type error(index.pos, "Invalid index for memoryview specified") return self self.memslice_index = self.memslice_index and not self.memslice_slice self.original_indices = indices # All indices with all start/stop/step for slices. # We need to keep this around self.indices = new_indices self.env = env elif self.base.type.is_buffer: # Buffer indexing if len(indices) == self.base.type.ndim: buffer_access = True skip_child_analysis = True for x in indices: x = x.analyse_types(env) if not x.type.is_int: buffer_access = False if buffer_access and not self.base.type.is_memoryviewslice: assert hasattr(self.base, "entry") # Must be a NameNode-like node # On cloning, indices is cloned. Otherwise, unpack index into indices assert not (buffer_access and isinstance(self.index, CloneNode)) self.nogil = env.nogil if buffer_access or self.memslice_index: #if self.base.type.is_memoryviewslice and not self.base.is_name: # self.base = self.base.coerce_to_temp(env) self.base = self.base.coerce_to_simple(env) self.indices = indices self.index = None self.type = self.base.type.dtype self.is_buffer_access = True self.buffer_type = self.base.type #self.base.entry.type if getting and self.type.is_pyobject: self.is_temp = True if setting and self.base.type.is_memoryviewslice: self.base.type.writable_needed = True elif setting: if not self.base.entry.type.writable: error(self.pos, "Writing to readonly buffer") else: self.writable_needed = True if self.base.type.is_buffer: self.base.entry.buffer_aux.writable_needed = True elif self.is_memslice_copy: self.type = self.base.type if getting: self.memslice_ellipsis_noop = True else: self.memslice_broadcast = True elif self.memslice_slice: self.index = None self.is_temp = True self.use_managed_ref = True if not MemoryView.validate_axes(self.pos, axes): self.type = error_type return self self.type = PyrexTypes.MemoryViewSliceType( self.base.type.dtype, axes) if (self.base.type.is_memoryviewslice and not self.base.is_name and not self.base.result_in_temp()): self.base = self.base.coerce_to_temp(env) if setting: self.memslice_broadcast = True else: base_type = self.base.type if not base_type.is_cfunction: if isinstance(self.index, TupleNode): self.index = self.index.analyse_types( env, skip_children=skip_child_analysis) elif not skip_child_analysis: self.index = self.index.analyse_types(env) self.original_index_type = self.index.type if base_type.is_unicode_char: # we infer Py_UNICODE/Py_UCS4 for unicode strings in some # cases, but indexing must still work for them if setting: warning(self.pos, "cannot assign to Unicode string index", level=1) elif self.index.constant_result in (0, -1): # uchar[0] => uchar return self.base self.base = self.base.coerce_to_pyobject(env) base_type = self.base.type if base_type.is_pyobject: if self.index.type.is_int and base_type is not dict_type: if (getting and (base_type in (list_type, tuple_type, bytearray_type)) and (not self.index.type.signed or not env.directives['wraparound'] or (isinstance(self.index, IntNode) and self.index.has_constant_result() and self.index.constant_result >= 0)) and not env.directives['boundscheck']): self.is_temp = 0 else: self.is_temp = 1 self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env).coerce_to_simple(env) self.original_index_type.create_to_py_utility_code(env) else: self.index = self.index.coerce_to_pyobject(env) self.is_temp = 1 if self.index.type.is_int and base_type is unicode_type: # Py_UNICODE/Py_UCS4 will automatically coerce to a unicode string # if required, so this is fast and safe self.type = PyrexTypes.c_py_ucs4_type elif self.index.type.is_int and base_type is bytearray_type: if setting: self.type = PyrexTypes.c_uchar_type else: # not using 'uchar' to enable fast and safe error reporting as '-1' self.type = PyrexTypes.c_int_type elif is_slice and base_type in (bytes_type, str_type, unicode_type, list_type, tuple_type): self.type = base_type else: item_type = None if base_type in (list_type, tuple_type) and self.index.type.is_int: item_type = infer_sequence_item_type( env, self.base, self.index, seq_type=base_type) if item_type is None: item_type = py_object_type self.type = item_type if base_type in (list_type, tuple_type, dict_type): # do the None check explicitly (not in a helper) to allow optimising it away self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable") else: if base_type.is_ptr or base_type.is_array: self.type = base_type.base_type if is_slice: self.type = base_type elif self.index.type.is_pyobject: self.index = self.index.coerce_to( PyrexTypes.c_py_ssize_t_type, env) elif not self.index.type.is_int: error(self.pos, "Invalid index type '%s'" % self.index.type) elif base_type.is_cpp_class: function = env.lookup_operator("[]", [self.base, self.index]) if function is None: error(self.pos, "Indexing '%s' not supported for index type '%s'" % (base_type, self.index.type)) self.type = PyrexTypes.error_type self.result_code = "<error>" return self func_type = function.type if func_type.is_ptr: func_type = func_type.base_type self.index = self.index.coerce_to(func_type.args[0].type, env) self.type = func_type.return_type if setting and not func_type.return_type.is_reference: error(self.pos, "Can't set non-reference result '%s'" % self.type) elif base_type.is_cfunction: if base_type.is_fused: self.parse_indexed_fused_cdef(env) else: self.type_indices = self.parse_index_as_types(env) if base_type.templates is None: error(self.pos, "Can only parameterize template functions.") elif len(base_type.templates) != len(self.type_indices): error(self.pos, "Wrong number of template arguments: expected %s, got %s" % ( (len(base_type.templates), len(self.type_indices)))) self.type = base_type.specialize(dict(zip(base_type.templates, self.type_indices))) else: error(self.pos, "Attempting to index non-array type '%s'" % base_type) self.type = PyrexTypes.error_type self.wrap_in_nonecheck_node(env, getting) return self def wrap_in_nonecheck_node(self, env, getting): if not env.directives['nonecheck'] or not self.base.may_be_none(): return if self.base.type.is_memoryviewslice: if self.is_memslice_copy and not getting: msg = "Cannot assign to None memoryview slice" elif self.memslice_slice: msg = "Cannot slice None memoryview slice" else: msg = "Cannot index None memoryview slice" else: msg = "'NoneType' object is not subscriptable" self.base = self.base.as_none_safe_node(msg) def parse_index_as_types(self, env, required=True): if isinstance(self.index, TupleNode): indices = self.index.args else: indices = [self.index] type_indices = [] for index in indices: type_indices.append(index.analyse_as_type(env)) if type_indices[-1] is None: if required: error(index.pos, "not parsable as a type") return None return type_indices def parse_indexed_fused_cdef(self, env): """ Interpret fused_cdef_func[specific_type1, ...] Note that if this method is called, we are an indexed cdef function with fused argument types, and this IndexNode will be replaced by the NameNode with specific entry just after analysis of expressions by AnalyseExpressionsTransform. """ self.type = PyrexTypes.error_type self.is_fused_index = True base_type = self.base.type specific_types = [] positions = [] if self.index.is_name or self.index.is_attribute: positions.append(self.index.pos) elif isinstance(self.index, TupleNode): for arg in self.index.args: positions.append(arg.pos) specific_types = self.parse_index_as_types(env, required=False) if specific_types is None: self.index = self.index.analyse_types(env) if not self.base.entry.as_variable: error(self.pos, "Can only index fused functions with types") else: # A cpdef function indexed with Python objects self.base.entry = self.entry = self.base.entry.as_variable self.base.type = self.type = self.entry.type self.base.is_temp = True self.is_temp = True self.entry.used = True self.is_fused_index = False return for i, type in enumerate(specific_types): specific_types[i] = type.specialize_fused(env) fused_types = base_type.get_fused_types() if len(specific_types) > len(fused_types): return error(self.pos, "Too many types specified") elif len(specific_types) < len(fused_types): t = fused_types[len(specific_types)] return error(self.pos, "Not enough types specified to specialize " "the function, %s is still fused" % t) # See if our index types form valid specializations for pos, specific_type, fused_type in zip(positions, specific_types, fused_types): if not Utils.any([specific_type.same_as(t) for t in fused_type.types]): return error(pos, "Type not in fused type") if specific_type is None or specific_type.is_error: return fused_to_specific = dict(zip(fused_types, specific_types)) type = base_type.specialize(fused_to_specific) if type.is_fused: # Only partially specific, this is invalid error(self.pos, "Index operation makes function only partially specific") else: # Fully specific, find the signature with the specialized entry for signature in self.base.type.get_all_specialized_function_types(): if type.same_as(signature): self.type = signature if self.base.is_attribute: # Pretend to be a normal attribute, for cdef extension # methods self.entry = signature.entry self.is_attribute = True self.obj = self.base.obj self.type.entry.used = True self.base.type = signature self.base.entry = signature.entry break else: # This is a bug raise InternalError("Couldn't find the right signature") gil_message = "Indexing Python object" def nogil_check(self, env): if self.is_buffer_access or self.memslice_index or self.memslice_slice: if not self.memslice_slice and env.directives['boundscheck']: # error(self.pos, "Cannot check buffer index bounds without gil; " # "use boundscheck(False) directive") warning(self.pos, "Use boundscheck(False) for faster access", level=1) if self.type.is_pyobject: error(self.pos, "Cannot access buffer with object dtype without gil") return super(IndexNode, self).nogil_check(env) def check_const_addr(self): return self.base.check_const_addr() and self.index.check_const() def is_lvalue(self): # NOTE: references currently have both is_reference and is_ptr # set. Since pointers and references have different lvalue # rules, we must be careful to separate the two. if self.type.is_reference: if self.type.ref_base_type.is_array: # fixed-sized arrays aren't l-values return False elif self.type.is_ptr: # non-const pointers can always be reassigned return True elif self.type.is_array: # fixed-sized arrays aren't l-values return False # Just about everything else returned by the index operator # can be an lvalue. return True def calculate_result_code(self): if self.is_buffer_access: return "(*%s)" % self.buffer_ptr_code elif self.is_memslice_copy: return self.base.result() elif self.base.type in (list_type, tuple_type, bytearray_type): if self.base.type is list_type: index_code = "PyList_GET_ITEM(%s, %s)" elif self.base.type is tuple_type: index_code = "PyTuple_GET_ITEM(%s, %s)" elif self.base.type is bytearray_type: index_code = "((unsigned char)(PyByteArray_AS_STRING(%s)[%s]))" else: assert False, "unexpected base type in indexing: %s" % self.base.type elif self.base.type.is_cfunction: return "%s<%s>" % ( self.base.result(), ",".join([param.declaration_code("") for param in self.type_indices])) else: if (self.type.is_ptr or self.type.is_array) and self.type == self.base.type: error(self.pos, "Invalid use of pointer slice") return index_code = "(%s[%s])" return index_code % (self.base.result(), self.index.result()) def extra_index_params(self, code): if self.index.type.is_int: is_list = self.base.type is list_type wraparound = ( bool(code.globalstate.directives['wraparound']) and self.original_index_type.signed and not (isinstance(self.index.constant_result, (int, long)) and self.index.constant_result >= 0)) boundscheck = bool(code.globalstate.directives['boundscheck']) return ", %s, %d, %s, %d, %d, %d" % ( self.original_index_type.declaration_code(""), self.original_index_type.signed and 1 or 0, self.original_index_type.to_py_function, is_list, wraparound, boundscheck) else: return "" def generate_subexpr_evaluation_code(self, code): self.base.generate_evaluation_code(code) if self.type_indices is not None: pass elif self.indices is None: self.index.generate_evaluation_code(code) else: for i in self.indices: i.generate_evaluation_code(code) def generate_subexpr_disposal_code(self, code): self.base.generate_disposal_code(code) if self.type_indices is not None: pass elif self.indices is None: self.index.generate_disposal_code(code) else: for i in self.indices: i.generate_disposal_code(code) def free_subexpr_temps(self, code): self.base.free_temps(code) if self.indices is None: self.index.free_temps(code) else: for i in self.indices: i.free_temps(code) def generate_result_code(self, code): if self.is_buffer_access or self.memslice_index: buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code) if self.type.is_pyobject: # is_temp is True, so must pull out value and incref it. # NOTE: object temporary results for nodes are declared # as PyObject *, so we need a cast code.putln("%s = (PyObject *) *%s;" % (self.temp_code, self.buffer_ptr_code)) code.putln("__Pyx_INCREF((PyObject*)%s);" % self.temp_code) elif self.memslice_slice: self.put_memoryviewslice_slice_code(code) elif self.is_temp: if self.type.is_pyobject: error_value = 'NULL' if self.index.type.is_int: if self.base.type is list_type: function = "__Pyx_GetItemInt_List" elif self.base.type is tuple_type: function = "__Pyx_GetItemInt_Tuple" else: function = "__Pyx_GetItemInt" code.globalstate.use_utility_code( TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c")) else: if self.base.type is dict_type: function = "__Pyx_PyDict_GetItem" code.globalstate.use_utility_code( UtilityCode.load_cached("DictGetItem", "ObjectHandling.c")) else: function = "PyObject_GetItem" elif self.type.is_unicode_char and self.base.type is unicode_type: assert self.index.type.is_int function = "__Pyx_GetItemInt_Unicode" error_value = '(Py_UCS4)-1' code.globalstate.use_utility_code( UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c")) elif self.base.type is bytearray_type: assert self.index.type.is_int assert self.type.is_int function = "__Pyx_GetItemInt_ByteArray" error_value = '-1' code.globalstate.use_utility_code( UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c")) else: assert False, "unexpected type %s and base type %s for indexing" % ( self.type, self.base.type) if self.index.type.is_int: index_code = self.index.result() else: index_code = self.index.py_result() code.putln( "%s = %s(%s, %s%s); if (unlikely(%s == %s)) %s;" % ( self.result(), function, self.base.py_result(), index_code, self.extra_index_params(code), self.result(), error_value, code.error_goto(self.pos))) if self.type.is_pyobject: code.put_gotref(self.py_result()) def generate_setitem_code(self, value_code, code): if self.index.type.is_int: if self.base.type is bytearray_type: code.globalstate.use_utility_code( UtilityCode.load_cached("SetItemIntByteArray", "StringTools.c")) function = "__Pyx_SetItemInt_ByteArray" else: code.globalstate.use_utility_code( UtilityCode.load_cached("SetItemInt", "ObjectHandling.c")) function = "__Pyx_SetItemInt" index_code = self.index.result() else: index_code = self.index.py_result() if self.base.type is dict_type: function = "PyDict_SetItem" # It would seem that we could specialized lists/tuples, but that # shouldn't happen here. # Both PyList_SetItem() and PyTuple_SetItem() take a Py_ssize_t as # index instead of an object, and bad conversion here would give # the wrong exception. Also, tuples are supposed to be immutable, # and raise a TypeError when trying to set their entries # (PyTuple_SetItem() is for creating new tuples from scratch). else: function = "PyObject_SetItem" code.putln( "if (unlikely(%s(%s, %s, %s%s) < 0)) %s" % ( function, self.base.py_result(), index_code, value_code, self.extra_index_params(code), code.error_goto(self.pos))) def generate_buffer_setitem_code(self, rhs, code, op=""): # Used from generate_assignment_code and InPlaceAssignmentNode buffer_entry, ptrexpr = self.buffer_lookup_code(code) if self.buffer_type.dtype.is_pyobject: # Must manage refcounts. Decref what is already there # and incref what we put in. ptr = code.funcstate.allocate_temp(buffer_entry.buf_ptr_type, manage_ref=False) rhs_code = rhs.result() code.putln("%s = %s;" % (ptr, ptrexpr)) code.put_gotref("*%s" % ptr) code.putln("__Pyx_INCREF(%s); __Pyx_DECREF(*%s);" % ( rhs_code, ptr)) code.putln("*%s %s= %s;" % (ptr, op, rhs_code)) code.put_giveref("*%s" % ptr) code.funcstate.release_temp(ptr) else: # Simple case code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result())) def generate_assignment_code(self, rhs, code): generate_evaluation_code = (self.is_memslice_scalar_assignment or self.memslice_slice) if generate_evaluation_code: self.generate_evaluation_code(code) else: self.generate_subexpr_evaluation_code(code) if self.is_buffer_access or self.memslice_index: self.generate_buffer_setitem_code(rhs, code) elif self.is_memslice_scalar_assignment: self.generate_memoryviewslice_assign_scalar_code(rhs, code) elif self.memslice_slice or self.is_memslice_copy: self.generate_memoryviewslice_setslice_code(rhs, code) elif self.type.is_pyobject: self.generate_setitem_code(rhs.py_result(), code) elif self.base.type is bytearray_type: value_code = self._check_byte_value(code, rhs) self.generate_setitem_code(value_code, code) else: code.putln( "%s = %s;" % ( self.result(), rhs.result())) if generate_evaluation_code: self.generate_disposal_code(code) else: self.generate_subexpr_disposal_code(code) self.free_subexpr_temps(code) rhs.generate_disposal_code(code) rhs.free_temps(code) def _check_byte_value(self, code, rhs): # TODO: should we do this generally on downcasts, or just here? assert rhs.type.is_int, repr(rhs.type) value_code = rhs.result() if rhs.has_constant_result(): if 0 <= rhs.constant_result < 256: return value_code needs_cast = True # make at least the C compiler happy warning(rhs.pos, "value outside of range(0, 256)" " when assigning to byte: %s" % rhs.constant_result, level=1) else: needs_cast = rhs.type != PyrexTypes.c_uchar_type if not self.nogil: conditions = [] if rhs.is_literal or rhs.type.signed: conditions.append('%s < 0' % value_code) if (rhs.is_literal or not (rhs.is_temp and rhs.type in ( PyrexTypes.c_uchar_type, PyrexTypes.c_char_type, PyrexTypes.c_schar_type))): conditions.append('%s > 255' % value_code) if conditions: code.putln("if (unlikely(%s)) {" % ' || '.join(conditions)) code.putln( 'PyErr_SetString(PyExc_ValueError,' ' "byte must be in range(0, 256)"); %s' % code.error_goto(self.pos)) code.putln("}") if needs_cast: value_code = '((unsigned char)%s)' % value_code return value_code def generate_deletion_code(self, code, ignore_nonexisting=False): self.generate_subexpr_evaluation_code(code) #if self.type.is_pyobject: if self.index.type.is_int: function = "__Pyx_DelItemInt" index_code = self.index.result() code.globalstate.use_utility_code( UtilityCode.load_cached("DelItemInt", "ObjectHandling.c")) else: index_code = self.index.py_result() if self.base.type is dict_type: function = "PyDict_DelItem" else: function = "PyObject_DelItem" code.putln( "if (%s(%s, %s%s) < 0) %s" % ( function, self.base.py_result(), index_code, self.extra_index_params(code), code.error_goto(self.pos))) self.generate_subexpr_disposal_code(code) self.free_subexpr_temps(code) def buffer_entry(self): import Buffer, MemoryView base = self.base if self.base.is_nonecheck: base = base.arg if base.is_name: entry = base.entry else: # SimpleCallNode is_simple is not consistent with coerce_to_simple assert base.is_simple() or base.is_temp cname = base.result() entry = Symtab.Entry(cname, cname, self.base.type, self.base.pos) if entry.type.is_buffer: buffer_entry = Buffer.BufferEntry(entry) else: buffer_entry = MemoryView.MemoryViewSliceBufferEntry(entry) return buffer_entry def buffer_lookup_code(self, code): "ndarray[1, 2, 3] and memslice[1, 2, 3]" # Assign indices to temps index_temps = [code.funcstate.allocate_temp(i.type, manage_ref=False) for i in self.indices] for temp, index in zip(index_temps, self.indices): code.putln("%s = %s;" % (temp, index.result())) # Generate buffer access code using these temps import Buffer buffer_entry = self.buffer_entry() if buffer_entry.type.is_buffer: negative_indices = buffer_entry.type.negative_indices else: negative_indices = Buffer.buffer_defaults['negative_indices'] return buffer_entry, Buffer.put_buffer_lookup_code( entry=buffer_entry, index_signeds=[i.type.signed for i in self.indices], index_cnames=index_temps, directives=code.globalstate.directives, pos=self.pos, code=code, negative_indices=negative_indices, in_nogil_context=self.in_nogil_context) def put_memoryviewslice_slice_code(self, code): "memslice[:]" buffer_entry = self.buffer_entry() have_gil = not self.in_nogil_context if sys.version_info < (3,): def next_(it): return it.next() else: next_ = next have_slices = False it = iter(self.indices) for index in self.original_indices: is_slice = isinstance(index, SliceNode) have_slices = have_slices or is_slice if is_slice: if not index.start.is_none: index.start = next_(it) if not index.stop.is_none: index.stop = next_(it) if not index.step.is_none: index.step = next_(it) else: next_(it) assert not list(it) buffer_entry.generate_buffer_slice_code(code, self.original_indices, self.result(), have_gil=have_gil, have_slices=have_slices, directives=code.globalstate.directives) def generate_memoryviewslice_setslice_code(self, rhs, code): "memslice1[...] = memslice2 or memslice1[:] = memslice2" import MemoryView MemoryView.copy_broadcast_memview_src_to_dst(rhs, self, code) def generate_memoryviewslice_assign_scalar_code(self, rhs, code): "memslice1[...] = 0.0 or memslice1[:] = 0.0" import MemoryView MemoryView.assign_scalar(self, rhs, code) class SliceIndexNode(ExprNode): # 2-element slice indexing # # base ExprNode # start ExprNode or None # stop ExprNode or None # slice ExprNode or None constant slice object subexprs = ['base', 'start', 'stop', 'slice'] slice = None def infer_type(self, env): base_type = self.base.infer_type(env) if base_type.is_string or base_type.is_cpp_class: return bytes_type elif base_type.is_pyunicode_ptr: return unicode_type elif base_type in (bytes_type, str_type, unicode_type, basestring_type, list_type, tuple_type): return base_type elif base_type.is_ptr or base_type.is_array: return PyrexTypes.c_array_type(base_type.base_type, None) return py_object_type def may_be_none(self): base_type = self.base.type if base_type: if base_type.is_string: return False if base_type in (bytes_type, str_type, unicode_type, basestring_type, list_type, tuple_type): return False return ExprNode.may_be_none(self) def calculate_constant_result(self): if self.start is None: start = None else: start = self.start.constant_result if self.stop is None: stop = None else: stop = self.stop.constant_result self.constant_result = self.base.constant_result[start:stop] def compile_time_value(self, denv): base = self.base.compile_time_value(denv) if self.start is None: start = 0 else: start = self.start.compile_time_value(denv) if self.stop is None: stop = None else: stop = self.stop.compile_time_value(denv) try: return base[start:stop] except Exception, e: self.compile_time_value_error(e) def analyse_target_declaration(self, env): pass def analyse_target_types(self, env): node = self.analyse_types(env, getting=False) # when assigning, we must accept any Python type if node.type.is_pyobject: node.type = py_object_type return node def analyse_types(self, env, getting=True): self.base = self.base.analyse_types(env) if self.base.type.is_memoryviewslice: none_node = NoneNode(self.pos) index = SliceNode(self.pos, start=self.start or none_node, stop=self.stop or none_node, step=none_node) index_node = IndexNode(self.pos, index, base=self.base) return index_node.analyse_base_and_index_types( env, getting=getting, setting=not getting, analyse_base=False) if self.start: self.start = self.start.analyse_types(env) if self.stop: self.stop = self.stop.analyse_types(env) if not env.directives['wraparound']: check_negative_indices(self.start, self.stop) base_type = self.base.type if base_type.is_string or base_type.is_cpp_string: self.type = default_str_type(env) elif base_type.is_pyunicode_ptr: self.type = unicode_type elif base_type.is_ptr: self.type = base_type elif base_type.is_array: # we need a ptr type here instead of an array type, as # array types can result in invalid type casts in the C # code self.type = PyrexTypes.CPtrType(base_type.base_type) else: self.base = self.base.coerce_to_pyobject(env) self.type = py_object_type if base_type.is_builtin_type: # slicing builtin types returns something of the same type self.type = base_type self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable") if self.type is py_object_type: if (not self.start or self.start.is_literal) and \ (not self.stop or self.stop.is_literal): # cache the constant slice object, in case we need it none_node = NoneNode(self.pos) self.slice = SliceNode( self.pos, start=copy.deepcopy(self.start or none_node), stop=copy.deepcopy(self.stop or none_node), step=none_node ).analyse_types(env) else: c_int = PyrexTypes.c_py_ssize_t_type if self.start: self.start = self.start.coerce_to(c_int, env) if self.stop: self.stop = self.stop.coerce_to(c_int, env) self.is_temp = 1 return self nogil_check = Node.gil_error gil_message = "Slicing Python object" get_slice_utility_code = TempitaUtilityCode.load( "SliceObject", "ObjectHandling.c", context={'access': 'Get'}) set_slice_utility_code = TempitaUtilityCode.load( "SliceObject", "ObjectHandling.c", context={'access': 'Set'}) def coerce_to(self, dst_type, env): if ((self.base.type.is_string or self.base.type.is_cpp_string) and dst_type in (bytes_type, bytearray_type, str_type, unicode_type)): if (dst_type not in (bytes_type, bytearray_type) and not env.directives['c_string_encoding']): error(self.pos, "default encoding required for conversion from '%s' to '%s'" % (self.base.type, dst_type)) self.type = dst_type return super(SliceIndexNode, self).coerce_to(dst_type, env) def generate_result_code(self, code): if not self.type.is_pyobject: error(self.pos, "Slicing is not currently supported for '%s'." % self.type) return base_result = self.base.result() result = self.result() start_code = self.start_code() stop_code = self.stop_code() if self.base.type.is_string: base_result = self.base.result() if self.base.type != PyrexTypes.c_char_ptr_type: base_result = '((const char*)%s)' % base_result if self.type is bytearray_type: type_name = 'ByteArray' else: type_name = self.type.name.title() if self.stop is None: code.putln( "%s = __Pyx_Py%s_FromString(%s + %s); %s" % ( result, type_name, base_result, start_code, code.error_goto_if_null(result, self.pos))) else: code.putln( "%s = __Pyx_Py%s_FromStringAndSize(%s + %s, %s - %s); %s" % ( result, type_name, base_result, start_code, stop_code, start_code, code.error_goto_if_null(result, self.pos))) elif self.base.type.is_pyunicode_ptr: base_result = self.base.result() if self.base.type != PyrexTypes.c_py_unicode_ptr_type: base_result = '((const Py_UNICODE*)%s)' % base_result if self.stop is None: code.putln( "%s = __Pyx_PyUnicode_FromUnicode(%s + %s); %s" % ( result, base_result, start_code, code.error_goto_if_null(result, self.pos))) else: code.putln( "%s = __Pyx_PyUnicode_FromUnicodeAndLength(%s + %s, %s - %s); %s" % ( result, base_result, start_code, stop_code, start_code, code.error_goto_if_null(result, self.pos))) elif self.base.type is unicode_type: code.globalstate.use_utility_code( UtilityCode.load_cached("PyUnicode_Substring", "StringTools.c")) code.putln( "%s = __Pyx_PyUnicode_Substring(%s, %s, %s); %s" % ( result, base_result, start_code, stop_code, code.error_goto_if_null(result, self.pos))) elif self.type is py_object_type: code.globalstate.use_utility_code(self.get_slice_utility_code) (has_c_start, has_c_stop, c_start, c_stop, py_start, py_stop, py_slice) = self.get_slice_config() code.putln( "%s = __Pyx_PyObject_GetSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d); %s" % ( result, self.base.py_result(), c_start, c_stop, py_start, py_stop, py_slice, has_c_start, has_c_stop, bool(code.globalstate.directives['wraparound']), code.error_goto_if_null(result, self.pos))) else: if self.base.type is list_type: code.globalstate.use_utility_code( TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c")) cfunc = '__Pyx_PyList_GetSlice' elif self.base.type is tuple_type: code.globalstate.use_utility_code( TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c")) cfunc = '__Pyx_PyTuple_GetSlice' else: cfunc = '__Pyx_PySequence_GetSlice' code.putln( "%s = %s(%s, %s, %s); %s" % ( result, cfunc, self.base.py_result(), start_code, stop_code, code.error_goto_if_null(result, self.pos))) code.put_gotref(self.py_result()) def generate_assignment_code(self, rhs, code): self.generate_subexpr_evaluation_code(code) if self.type.is_pyobject: code.globalstate.use_utility_code(self.set_slice_utility_code) (has_c_start, has_c_stop, c_start, c_stop, py_start, py_stop, py_slice) = self.get_slice_config() code.put_error_if_neg(self.pos, "__Pyx_PyObject_SetSlice(%s, %s, %s, %s, %s, %s, %s, %d, %d, %d)" % ( self.base.py_result(), rhs.py_result(), c_start, c_stop, py_start, py_stop, py_slice, has_c_start, has_c_stop, bool(code.globalstate.directives['wraparound']))) else: start_offset = '' if self.start: start_offset = self.start_code() if start_offset == '0': start_offset = '' else: start_offset += '+' if rhs.type.is_array: array_length = rhs.type.size self.generate_slice_guard_code(code, array_length) else: error(self.pos, "Slice assignments from pointers are not yet supported.") # FIXME: fix the array size according to start/stop array_length = self.base.type.size for i in range(array_length): code.putln("%s[%s%s] = %s[%d];" % ( self.base.result(), start_offset, i, rhs.result(), i)) self.generate_subexpr_disposal_code(code) self.free_subexpr_temps(code) rhs.generate_disposal_code(code) rhs.free_temps(code) def generate_deletion_code(self, code, ignore_nonexisting=False): if not self.base.type.is_pyobject: error(self.pos, "Deleting slices is only supported for Python types, not '%s'." % self.type) return self.generate_subexpr_evaluation_code(code) code.globalstate.use_utility_code(self.set_slice_utility_code) (has_c_start, has_c_stop, c_start, c_stop, py_start, py_stop, py_slice) = self.get_slice_config() code.put_error_if_neg(self.pos, "__Pyx_PyObject_DelSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d)" % ( self.base.py_result(), c_start, c_stop, py_start, py_stop, py_slice, has_c_start, has_c_stop, bool(code.globalstate.directives['wraparound']))) self.generate_subexpr_disposal_code(code) self.free_subexpr_temps(code) def get_slice_config(self): has_c_start, c_start, py_start = False, '0', 'NULL' if self.start: has_c_start = not self.start.type.is_pyobject if has_c_start: c_start = self.start.result() else: py_start = '&%s' % self.start.py_result() has_c_stop, c_stop, py_stop = False, '0', 'NULL' if self.stop: has_c_stop = not self.stop.type.is_pyobject if has_c_stop: c_stop = self.stop.result() else: py_stop = '&%s' % self.stop.py_result() py_slice = self.slice and '&%s' % self.slice.py_result() or 'NULL' return (has_c_start, has_c_stop, c_start, c_stop, py_start, py_stop, py_slice) def generate_slice_guard_code(self, code, target_size): if not self.base.type.is_array: return slice_size = self.base.type.size start = stop = None if self.stop: stop = self.stop.result() try: stop = int(stop) if stop < 0: slice_size = self.base.type.size + stop else: slice_size = stop stop = None except ValueError: pass if self.start: start = self.start.result() try: start = int(start) if start < 0: start = self.base.type.size + start slice_size -= start start = None except ValueError: pass check = None if slice_size < 0: if target_size > 0: error(self.pos, "Assignment to empty slice.") elif start is None and stop is None: # we know the exact slice length if target_size != slice_size: error(self.pos, "Assignment to slice of wrong length, expected %d, got %d" % ( slice_size, target_size)) elif start is not None: if stop is None: stop = slice_size check = "(%s)-(%s)" % (stop, start) else: # stop is not None: check = stop if check: code.putln("if (unlikely((%s) != %d)) {" % (check, target_size)) code.putln('PyErr_Format(PyExc_ValueError, "Assignment to slice of wrong length, expected %%" CYTHON_FORMAT_SSIZE_T "d, got %%" CYTHON_FORMAT_SSIZE_T "d", (Py_ssize_t)%d, (Py_ssize_t)(%s));' % ( target_size, check)) code.putln(code.error_goto(self.pos)) code.putln("}") def start_code(self): if self.start: return self.start.result() else: return "0" def stop_code(self): if self.stop: return self.stop.result() elif self.base.type.is_array: return self.base.type.size else: return "PY_SSIZE_T_MAX" def calculate_result_code(self): # self.result() is not used, but this method must exist return "<unused>" class SliceNode(ExprNode): # start:stop:step in subscript list # # start ExprNode # stop ExprNode # step ExprNode subexprs = ['start', 'stop', 'step'] type = slice_type is_temp = 1 def calculate_constant_result(self): self.constant_result = slice( self.start.constant_result, self.stop.constant_result, self.step.constant_result) def compile_time_value(self, denv): start = self.start.compile_time_value(denv) stop = self.stop.compile_time_value(denv) step = self.step.compile_time_value(denv) try: return slice(start, stop, step) except Exception, e: self.compile_time_value_error(e) def may_be_none(self): return False def analyse_types(self, env): start = self.start.analyse_types(env) stop = self.stop.analyse_types(env) step = self.step.analyse_types(env) self.start = start.coerce_to_pyobject(env) self.stop = stop.coerce_to_pyobject(env) self.step = step.coerce_to_pyobject(env) if self.start.is_literal and self.stop.is_literal and self.step.is_literal: self.is_literal = True self.is_temp = False return self gil_message = "Constructing Python slice object" def calculate_result_code(self): return self.result_code def generate_result_code(self, code): if self.is_literal: self.result_code = code.get_py_const(py_object_type, 'slice', cleanup_level=2) code = code.get_cached_constants_writer() code.mark_pos(self.pos) code.putln( "%s = PySlice_New(%s, %s, %s); %s" % ( self.result(), self.start.py_result(), self.stop.py_result(), self.step.py_result(), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) if self.is_literal: code.put_giveref(self.py_result()) def __deepcopy__(self, memo): """ There is a copy bug in python 2.4 for slice objects. """ return SliceNode( self.pos, start=copy.deepcopy(self.start, memo), stop=copy.deepcopy(self.stop, memo), step=copy.deepcopy(self.step, memo), is_temp=self.is_temp, is_literal=self.is_literal, constant_result=self.constant_result) class CallNode(ExprNode): # allow overriding the default 'may_be_none' behaviour may_return_none = None def infer_type(self, env): function = self.function func_type = function.infer_type(env) if isinstance(function, NewExprNode): # note: needs call to infer_type() above return PyrexTypes.CPtrType(function.class_type) if func_type is py_object_type: # function might have lied for safety => try to find better type entry = getattr(function, 'entry', None) if entry is not None: func_type = entry.type or func_type if func_type.is_ptr: func_type = func_type.base_type if func_type.is_cfunction: return func_type.return_type elif func_type is type_type: if function.is_name and function.entry and function.entry.type: result_type = function.entry.type if result_type.is_extension_type: return result_type elif result_type.is_builtin_type: if function.entry.name == 'float': return PyrexTypes.c_double_type elif function.entry.name in Builtin.types_that_construct_their_instance: return result_type return py_object_type def type_dependencies(self, env): # TODO: Update when Danilo's C++ code merged in to handle the # the case of function overloading. return self.function.type_dependencies(env) def is_simple(self): # C function calls could be considered simple, but they may # have side-effects that may hit when multiple operations must # be effected in order, e.g. when constructing the argument # sequence for a function call or comparing values. return False def may_be_none(self): if self.may_return_none is not None: return self.may_return_none func_type = self.function.type if func_type is type_type and self.function.is_name: entry = self.function.entry if entry.type.is_extension_type: return False if (entry.type.is_builtin_type and entry.name in Builtin.types_that_construct_their_instance): return False return ExprNode.may_be_none(self) def analyse_as_type_constructor(self, env): type = self.function.analyse_as_type(env) if type and type.is_struct_or_union: args, kwds = self.explicit_args_kwds() items = [] for arg, member in zip(args, type.scope.var_entries): items.append(DictItemNode(pos=arg.pos, key=StringNode(pos=arg.pos, value=member.name), value=arg)) if kwds: items += kwds.key_value_pairs self.key_value_pairs = items self.__class__ = DictNode self.analyse_types(env) # FIXME self.coerce_to(type, env) return True elif type and type.is_cpp_class: self.args = [ arg.analyse_types(env) for arg in self.args ] constructor = type.scope.lookup("<init>") self.function = RawCNameExprNode(self.function.pos, constructor.type) self.function.entry = constructor self.function.set_cname(type.declaration_code("")) self.analyse_c_function_call(env) self.type = type return True def is_lvalue(self): return self.type.is_reference def nogil_check(self, env): func_type = self.function_type() if func_type.is_pyobject: self.gil_error() elif not getattr(func_type, 'nogil', False): self.gil_error() gil_message = "Calling gil-requiring function" class SimpleCallNode(CallNode): # Function call without keyword, * or ** args. # # function ExprNode # args [ExprNode] # arg_tuple ExprNode or None used internally # self ExprNode or None used internally # coerced_self ExprNode or None used internally # wrapper_call bool used internally # has_optional_args bool used internally # nogil bool used internally subexprs = ['self', 'coerced_self', 'function', 'args', 'arg_tuple'] self = None coerced_self = None arg_tuple = None wrapper_call = False has_optional_args = False nogil = False analysed = False def compile_time_value(self, denv): function = self.function.compile_time_value(denv) args = [arg.compile_time_value(denv) for arg in self.args] try: return function(*args) except Exception, e: self.compile_time_value_error(e) def analyse_as_type(self, env): attr = self.function.as_cython_attribute() if attr == 'pointer': if len(self.args) != 1: error(self.args.pos, "only one type allowed.") else: type = self.args[0].analyse_as_type(env) if not type: error(self.args[0].pos, "Unknown type") else: return PyrexTypes.CPtrType(type) def explicit_args_kwds(self): return self.args, None def analyse_types(self, env): if self.analyse_as_type_constructor(env): return self if self.analysed: return self self.analysed = True self.function.is_called = 1 self.function = self.function.analyse_types(env) function = self.function if function.is_attribute and function.entry and function.entry.is_cmethod: # Take ownership of the object from which the attribute # was obtained, because we need to pass it as 'self'. self.self = function.obj function.obj = CloneNode(self.self) func_type = self.function_type() if func_type.is_pyobject: self.arg_tuple = TupleNode(self.pos, args = self.args) self.arg_tuple = self.arg_tuple.analyse_types(env) self.args = None if func_type is Builtin.type_type and function.is_name and \ function.entry and \ function.entry.is_builtin and \ function.entry.name in Builtin.types_that_construct_their_instance: # calling a builtin type that returns a specific object type if function.entry.name == 'float': # the following will come true later on in a transform self.type = PyrexTypes.c_double_type self.result_ctype = PyrexTypes.c_double_type else: self.type = Builtin.builtin_types[function.entry.name] self.result_ctype = py_object_type self.may_return_none = False elif function.is_name and function.type_entry: # We are calling an extension type constructor. As # long as we do not support __new__(), the result type # is clear self.type = function.type_entry.type self.result_ctype = py_object_type self.may_return_none = False else: self.type = py_object_type self.is_temp = 1 else: self.args = [ arg.analyse_types(env) for arg in self.args ] self.analyse_c_function_call(env) return self def function_type(self): # Return the type of the function being called, coercing a function # pointer to a function if necessary. If the function has fused # arguments, return the specific type. func_type = self.function.type if func_type.is_ptr: func_type = func_type.base_type return func_type def analyse_c_function_call(self, env): if self.function.type is error_type: self.type = error_type return if self.self: args = [self.self] + self.args else: args = self.args if self.function.type.is_cpp_class: overloaded_entry = self.function.type.scope.lookup("operator()") if overloaded_entry is None: self.type = PyrexTypes.error_type self.result_code = "<error>" return elif hasattr(self.function, 'entry'): overloaded_entry = self.function.entry elif (isinstance(self.function, IndexNode) and self.function.is_fused_index): overloaded_entry = self.function.type.entry else: overloaded_entry = None if overloaded_entry: if self.function.type.is_fused: functypes = self.function.type.get_all_specialized_function_types() alternatives = [f.entry for f in functypes] else: alternatives = overloaded_entry.all_alternatives() entry = PyrexTypes.best_match(args, alternatives, self.pos, env) if not entry: self.type = PyrexTypes.error_type self.result_code = "<error>" return entry.used = True self.function.entry = entry self.function.type = entry.type func_type = self.function_type() else: entry = None func_type = self.function_type() if not func_type.is_cfunction: error(self.pos, "Calling non-function type '%s'" % func_type) self.type = PyrexTypes.error_type self.result_code = "<error>" return # Check no. of args max_nargs = len(func_type.args) expected_nargs = max_nargs - func_type.optional_arg_count actual_nargs = len(args) if func_type.optional_arg_count and expected_nargs != actual_nargs: self.has_optional_args = 1 self.is_temp = 1 # check 'self' argument if entry and entry.is_cmethod and func_type.args: formal_arg = func_type.args[0] arg = args[0] if formal_arg.not_none: if self.self: self.self = self.self.as_none_safe_node( "'NoneType' object has no attribute '%s'", error='PyExc_AttributeError', format_args=[entry.name]) else: # unbound method arg = arg.as_none_safe_node( "descriptor '%s' requires a '%s' object but received a 'NoneType'", format_args=[entry.name, formal_arg.type.name]) if self.self: if formal_arg.accept_builtin_subtypes: arg = CMethodSelfCloneNode(self.self) else: arg = CloneNode(self.self) arg = self.coerced_self = arg.coerce_to(formal_arg.type, env) elif formal_arg.type.is_builtin_type: # special case: unbound methods of builtins accept subtypes arg = arg.coerce_to(formal_arg.type, env) if arg.type.is_builtin_type and isinstance(arg, PyTypeTestNode): arg.exact_builtin_type = False args[0] = arg # Coerce arguments some_args_in_temps = False for i in xrange(min(max_nargs, actual_nargs)): formal_arg = func_type.args[i] formal_type = formal_arg.type arg = args[i].coerce_to(formal_type, env) if formal_arg.not_none: # C methods must do the None checks at *call* time arg = arg.as_none_safe_node( "cannot pass None into a C function argument that is declared 'not None'") if arg.is_temp: if i > 0: # first argument in temp doesn't impact subsequent arguments some_args_in_temps = True elif arg.type.is_pyobject and not env.nogil: if i == 0 and self.self is not None: # a method's cloned "self" argument is ok pass elif arg.nonlocally_immutable(): # plain local variables are ok pass else: # we do not safely own the argument's reference, # but we must make sure it cannot be collected # before we return from the function, so we create # an owned temp reference to it if i > 0: # first argument doesn't matter some_args_in_temps = True arg = arg.coerce_to_temp(env) args[i] = arg # handle additional varargs parameters for i in xrange(max_nargs, actual_nargs): arg = args[i] if arg.type.is_pyobject: arg_ctype = arg.type.default_coerced_ctype() if arg_ctype is None: error(self.args[i].pos, "Python object cannot be passed as a varargs parameter") else: args[i] = arg = arg.coerce_to(arg_ctype, env) if arg.is_temp and i > 0: some_args_in_temps = True if some_args_in_temps: # if some args are temps and others are not, they may get # constructed in the wrong order (temps first) => make # sure they are either all temps or all not temps (except # for the last argument, which is evaluated last in any # case) for i in xrange(actual_nargs-1): if i == 0 and self.self is not None: continue # self is ok arg = args[i] if arg.nonlocally_immutable(): # locals, C functions, unassignable types are safe. pass elif arg.type.is_cpp_class: # Assignment has side effects, avoid. pass elif env.nogil and arg.type.is_pyobject: # can't copy a Python reference into a temp in nogil # env (this is safe: a construction would fail in # nogil anyway) pass else: #self.args[i] = arg.coerce_to_temp(env) # instead: issue a warning if i > 0 or i == 1 and self.self is not None: # skip first arg warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0) break self.args[:] = args # Calc result type and code fragment if isinstance(self.function, NewExprNode): self.type = PyrexTypes.CPtrType(self.function.class_type) else: self.type = func_type.return_type if self.function.is_name or self.function.is_attribute: if self.function.entry and self.function.entry.utility_code: self.is_temp = 1 # currently doesn't work for self.calculate_result_code() if self.type.is_pyobject: self.result_ctype = py_object_type self.is_temp = 1 elif func_type.exception_value is not None \ or func_type.exception_check: self.is_temp = 1 elif self.type.is_memoryviewslice: self.is_temp = 1 # func_type.exception_check = True # Called in 'nogil' context? self.nogil = env.nogil if (self.nogil and func_type.exception_check and func_type.exception_check != '+'): env.use_utility_code(pyerr_occurred_withgil_utility_code) # C++ exception handler if func_type.exception_check == '+': if func_type.exception_value is None: env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) def calculate_result_code(self): return self.c_call_code() def c_call_code(self): func_type = self.function_type() if self.type is PyrexTypes.error_type or not func_type.is_cfunction: return "<error>" formal_args = func_type.args arg_list_code = [] args = list(zip(formal_args, self.args)) max_nargs = len(func_type.args) expected_nargs = max_nargs - func_type.optional_arg_count actual_nargs = len(self.args) for formal_arg, actual_arg in args[:expected_nargs]: arg_code = actual_arg.result_as(formal_arg.type) arg_list_code.append(arg_code) if func_type.is_overridable: arg_list_code.append(str(int(self.wrapper_call or self.function.entry.is_unbound_cmethod))) if func_type.optional_arg_count: if expected_nargs == actual_nargs: optional_args = 'NULL' else: optional_args = "&%s" % self.opt_arg_struct arg_list_code.append(optional_args) for actual_arg in self.args[len(formal_args):]: arg_list_code.append(actual_arg.result()) result = "%s(%s)" % (self.function.result(), ', '.join(arg_list_code)) return result def generate_result_code(self, code): func_type = self.function_type() if self.function.is_name or self.function.is_attribute: if self.function.entry and self.function.entry.utility_code: code.globalstate.use_utility_code(self.function.entry.utility_code) if func_type.is_pyobject: arg_code = self.arg_tuple.py_result() code.globalstate.use_utility_code(UtilityCode.load_cached( "PyObjectCall", "ObjectHandling.c")) code.putln( "%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % ( self.result(), self.function.py_result(), arg_code, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) elif func_type.is_cfunction: if self.has_optional_args: actual_nargs = len(self.args) expected_nargs = len(func_type.args) - func_type.optional_arg_count self.opt_arg_struct = code.funcstate.allocate_temp( func_type.op_arg_struct.base_type, manage_ref=True) code.putln("%s.%s = %s;" % ( self.opt_arg_struct, Naming.pyrex_prefix + "n", len(self.args) - expected_nargs)) args = list(zip(func_type.args, self.args)) for formal_arg, actual_arg in args[expected_nargs:actual_nargs]: code.putln("%s.%s = %s;" % ( self.opt_arg_struct, func_type.opt_arg_cname(formal_arg.name), actual_arg.result_as(formal_arg.type))) exc_checks = [] if self.type.is_pyobject and self.is_temp: exc_checks.append("!%s" % self.result()) elif self.type.is_memoryviewslice: assert self.is_temp exc_checks.append(self.type.error_condition(self.result())) else: exc_val = func_type.exception_value exc_check = func_type.exception_check if exc_val is not None: exc_checks.append("%s == %s" % (self.result(), exc_val)) if exc_check: if self.nogil: exc_checks.append("__Pyx_ErrOccurredWithGIL()") else: exc_checks.append("PyErr_Occurred()") if self.is_temp or exc_checks: rhs = self.c_call_code() if self.result(): lhs = "%s = " % self.result() if self.is_temp and self.type.is_pyobject: #return_type = self.type # func_type.return_type #print "SimpleCallNode.generate_result_code: casting", rhs, \ # "from", return_type, "to pyobject" ### rhs = typecast(py_object_type, self.type, rhs) else: lhs = "" if func_type.exception_check == '+': if func_type.exception_value is None: raise_py_exception = "__Pyx_CppExn2PyErr();" elif func_type.exception_value.type.is_pyobject: raise_py_exception = 'try { throw; } catch(const std::exception& exn) { PyErr_SetString(%s, exn.what()); } catch(...) { PyErr_SetNone(%s); }' % ( func_type.exception_value.entry.cname, func_type.exception_value.entry.cname) else: raise_py_exception = '%s(); if (!PyErr_Occurred()) PyErr_SetString(PyExc_RuntimeError , "Error converting c++ exception.");' % func_type.exception_value.entry.cname code.putln("try {") code.putln("%s%s;" % (lhs, rhs)) code.putln("} catch(...) {") if self.nogil: code.put_ensure_gil(declare_gilstate=True) code.putln(raise_py_exception) if self.nogil: code.put_release_ensured_gil() code.putln(code.error_goto(self.pos)) code.putln("}") else: if exc_checks: goto_error = code.error_goto_if(" && ".join(exc_checks), self.pos) else: goto_error = "" code.putln("%s%s; %s" % (lhs, rhs, goto_error)) if self.type.is_pyobject and self.result(): code.put_gotref(self.py_result()) if self.has_optional_args: code.funcstate.release_temp(self.opt_arg_struct) class InlinedDefNodeCallNode(CallNode): # Inline call to defnode # # function PyCFunctionNode # function_name NameNode # args [ExprNode] subexprs = ['args', 'function_name'] is_temp = 1 type = py_object_type function = None function_name = None def can_be_inlined(self): func_type= self.function.def_node if func_type.star_arg or func_type.starstar_arg: return False if len(func_type.args) != len(self.args): return False return True def analyse_types(self, env): self.function_name = self.function_name.analyse_types(env) self.args = [ arg.analyse_types(env) for arg in self.args ] func_type = self.function.def_node actual_nargs = len(self.args) # Coerce arguments some_args_in_temps = False for i in xrange(actual_nargs): formal_type = func_type.args[i].type arg = self.args[i].coerce_to(formal_type, env) if arg.is_temp: if i > 0: # first argument in temp doesn't impact subsequent arguments some_args_in_temps = True elif arg.type.is_pyobject and not env.nogil: if arg.nonlocally_immutable(): # plain local variables are ok pass else: # we do not safely own the argument's reference, # but we must make sure it cannot be collected # before we return from the function, so we create # an owned temp reference to it if i > 0: # first argument doesn't matter some_args_in_temps = True arg = arg.coerce_to_temp(env) self.args[i] = arg if some_args_in_temps: # if some args are temps and others are not, they may get # constructed in the wrong order (temps first) => make # sure they are either all temps or all not temps (except # for the last argument, which is evaluated last in any # case) for i in xrange(actual_nargs-1): arg = self.args[i] if arg.nonlocally_immutable(): # locals, C functions, unassignable types are safe. pass elif arg.type.is_cpp_class: # Assignment has side effects, avoid. pass elif env.nogil and arg.type.is_pyobject: # can't copy a Python reference into a temp in nogil # env (this is safe: a construction would fail in # nogil anyway) pass else: #self.args[i] = arg.coerce_to_temp(env) # instead: issue a warning if i > 0: warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0) break return self def generate_result_code(self, code): arg_code = [self.function_name.py_result()] func_type = self.function.def_node for arg, proto_arg in zip(self.args, func_type.args): if arg.type.is_pyobject: arg_code.append(arg.result_as(proto_arg.type)) else: arg_code.append(arg.result()) arg_code = ', '.join(arg_code) code.putln( "%s = %s(%s); %s" % ( self.result(), self.function.def_node.entry.pyfunc_cname, arg_code, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) class PythonCapiFunctionNode(ExprNode): subexprs = [] def __init__(self, pos, py_name, cname, func_type, utility_code = None): ExprNode.__init__(self, pos, name=py_name, cname=cname, type=func_type, utility_code=utility_code) def analyse_types(self, env): return self def generate_result_code(self, code): if self.utility_code: code.globalstate.use_utility_code(self.utility_code) def calculate_result_code(self): return self.cname class PythonCapiCallNode(SimpleCallNode): # Python C-API Function call (only created in transforms) # By default, we assume that the call never returns None, as this # is true for most C-API functions in CPython. If this does not # apply to a call, set the following to True (or None to inherit # the default behaviour). may_return_none = False def __init__(self, pos, function_name, func_type, utility_code = None, py_name=None, **kwargs): self.type = func_type.return_type self.result_ctype = self.type self.function = PythonCapiFunctionNode( pos, py_name, function_name, func_type, utility_code = utility_code) # call this last so that we can override the constructed # attributes above with explicit keyword arguments if required SimpleCallNode.__init__(self, pos, **kwargs) class GeneralCallNode(CallNode): # General Python function call, including keyword, # * and ** arguments. # # function ExprNode # positional_args ExprNode Tuple of positional arguments # keyword_args ExprNode or None Dict of keyword arguments type = py_object_type subexprs = ['function', 'positional_args', 'keyword_args'] nogil_check = Node.gil_error def compile_time_value(self, denv): function = self.function.compile_time_value(denv) positional_args = self.positional_args.compile_time_value(denv) keyword_args = self.keyword_args.compile_time_value(denv) try: return function(*positional_args, **keyword_args) except Exception, e: self.compile_time_value_error(e) def explicit_args_kwds(self): if (self.keyword_args and not isinstance(self.keyword_args, DictNode) or not isinstance(self.positional_args, TupleNode)): raise CompileError(self.pos, 'Compile-time keyword arguments must be explicit.') return self.positional_args.args, self.keyword_args def analyse_types(self, env): if self.analyse_as_type_constructor(env): return self self.function = self.function.analyse_types(env) if not self.function.type.is_pyobject: if self.function.type.is_error: self.type = error_type return self if hasattr(self.function, 'entry'): node = self.map_to_simple_call_node() if node is not None and node is not self: return node.analyse_types(env) elif self.function.entry.as_variable: self.function = self.function.coerce_to_pyobject(env) elif node is self: error(self.pos, "Non-trivial keyword arguments and starred " "arguments not allowed in cdef functions.") else: # error was already reported pass else: self.function = self.function.coerce_to_pyobject(env) if self.keyword_args: self.keyword_args = self.keyword_args.analyse_types(env) self.positional_args = self.positional_args.analyse_types(env) self.positional_args = \ self.positional_args.coerce_to_pyobject(env) function = self.function if function.is_name and function.type_entry: # We are calling an extension type constructor. As long # as we do not support __new__(), the result type is clear self.type = function.type_entry.type self.result_ctype = py_object_type self.may_return_none = False else: self.type = py_object_type self.is_temp = 1 return self def map_to_simple_call_node(self): """ Tries to map keyword arguments to declared positional arguments. Returns self to try a Python call, None to report an error or a SimpleCallNode if the mapping succeeds. """ if not isinstance(self.positional_args, TupleNode): # has starred argument return self if not isinstance(self.keyword_args, DictNode): # keywords come from arbitrary expression => nothing to do here return self function = self.function entry = getattr(function, 'entry', None) if not entry: return self function_type = entry.type if function_type.is_ptr: function_type = function_type.base_type if not function_type.is_cfunction: return self pos_args = self.positional_args.args kwargs = self.keyword_args declared_args = function_type.args if entry.is_cmethod: declared_args = declared_args[1:] # skip 'self' if len(pos_args) > len(declared_args): error(self.pos, "function call got too many positional arguments, " "expected %d, got %s" % (len(declared_args), len(pos_args))) return None matched_args = set([ arg.name for arg in declared_args[:len(pos_args)] if arg.name ]) unmatched_args = declared_args[len(pos_args):] matched_kwargs_count = 0 args = list(pos_args) # check for duplicate keywords seen = set(matched_args) has_errors = False for arg in kwargs.key_value_pairs: name = arg.key.value if name in seen: error(arg.pos, "argument '%s' passed twice" % name) has_errors = True # continue to report more errors if there are any seen.add(name) # match keywords that are passed in order for decl_arg, arg in zip(unmatched_args, kwargs.key_value_pairs): name = arg.key.value if decl_arg.name == name: matched_args.add(name) matched_kwargs_count += 1 args.append(arg.value) else: break # match keyword arguments that are passed out-of-order, but keep # the evaluation of non-simple arguments in order by moving them # into temps from Cython.Compiler.UtilNodes import EvalWithTempExprNode, LetRefNode temps = [] if len(kwargs.key_value_pairs) > matched_kwargs_count: unmatched_args = declared_args[len(args):] keywords = dict([ (arg.key.value, (i+len(pos_args), arg)) for i, arg in enumerate(kwargs.key_value_pairs) ]) first_missing_keyword = None for decl_arg in unmatched_args: name = decl_arg.name if name not in keywords: # missing keyword argument => either done or error if not first_missing_keyword: first_missing_keyword = name continue elif first_missing_keyword: if entry.as_variable: # we might be able to convert the function to a Python # object, which then allows full calling semantics # with default values in gaps - currently, we only # support optional arguments at the end return self # wasn't the last keyword => gaps are not supported error(self.pos, "C function call is missing " "argument '%s'" % first_missing_keyword) return None pos, arg = keywords[name] matched_args.add(name) matched_kwargs_count += 1 if arg.value.is_simple(): args.append(arg.value) else: temp = LetRefNode(arg.value) assert temp.is_simple() args.append(temp) temps.append((pos, temp)) if temps: # may have to move preceding non-simple args into temps final_args = [] new_temps = [] first_temp_arg = temps[0][-1] for arg_value in args: if arg_value is first_temp_arg: break # done if arg_value.is_simple(): final_args.append(arg_value) else: temp = LetRefNode(arg_value) new_temps.append(temp) final_args.append(temp) if new_temps: args = final_args temps = new_temps + [ arg for i,arg in sorted(temps) ] # check for unexpected keywords for arg in kwargs.key_value_pairs: name = arg.key.value if name not in matched_args: has_errors = True error(arg.pos, "C function got unexpected keyword argument '%s'" % name) if has_errors: # error was reported already return None # all keywords mapped to positional arguments # if we are missing arguments, SimpleCallNode will figure it out node = SimpleCallNode(self.pos, function=function, args=args) for temp in temps[::-1]: node = EvalWithTempExprNode(temp, node) return node def generate_result_code(self, code): if self.type.is_error: return if self.keyword_args: kwargs = self.keyword_args.py_result() else: kwargs = 'NULL' code.globalstate.use_utility_code(UtilityCode.load_cached( "PyObjectCall", "ObjectHandling.c")) code.putln( "%s = __Pyx_PyObject_Call(%s, %s, %s); %s" % ( self.result(), self.function.py_result(), self.positional_args.py_result(), kwargs, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) class AsTupleNode(ExprNode): # Convert argument to tuple. Used for normalising # the * argument of a function call. # # arg ExprNode subexprs = ['arg'] def calculate_constant_result(self): self.constant_result = tuple(self.arg.constant_result) def compile_time_value(self, denv): arg = self.arg.compile_time_value(denv) try: return tuple(arg) except Exception, e: self.compile_time_value_error(e) def analyse_types(self, env): self.arg = self.arg.analyse_types(env) self.arg = self.arg.coerce_to_pyobject(env) self.type = tuple_type self.is_temp = 1 return self def may_be_none(self): return False nogil_check = Node.gil_error gil_message = "Constructing Python tuple" def generate_result_code(self, code): code.putln( "%s = PySequence_Tuple(%s); %s" % ( self.result(), self.arg.py_result(), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) class AttributeNode(ExprNode): # obj.attribute # # obj ExprNode # attribute string # needs_none_check boolean Used if obj is an extension type. # If set to True, it is known that the type is not None. # # Used internally: # # is_py_attr boolean Is a Python getattr operation # member string C name of struct member # is_called boolean Function call is being done on result # entry Entry Symbol table entry of attribute is_attribute = 1 subexprs = ['obj'] type = PyrexTypes.error_type entry = None is_called = 0 needs_none_check = True is_memslice_transpose = False is_special_lookup = False def as_cython_attribute(self): if (isinstance(self.obj, NameNode) and self.obj.is_cython_module and not self.attribute == u"parallel"): return self.attribute cy = self.obj.as_cython_attribute() if cy: return "%s.%s" % (cy, self.attribute) return None def coerce_to(self, dst_type, env): # If coercing to a generic pyobject and this is a cpdef function # we can create the corresponding attribute if dst_type is py_object_type: entry = self.entry if entry and entry.is_cfunction and entry.as_variable: # must be a cpdef function self.is_temp = 1 self.entry = entry.as_variable self.analyse_as_python_attribute(env) return self return ExprNode.coerce_to(self, dst_type, env) def calculate_constant_result(self): attr = self.attribute if attr.startswith("__") and attr.endswith("__"): return self.constant_result = getattr(self.obj.constant_result, attr) def compile_time_value(self, denv): attr = self.attribute if attr.startswith("__") and attr.endswith("__"): error(self.pos, "Invalid attribute name '%s' in compile-time expression" % attr) return None obj = self.obj.compile_time_value(denv) try: return getattr(obj, attr) except Exception, e: self.compile_time_value_error(e) def type_dependencies(self, env): return self.obj.type_dependencies(env) def infer_type(self, env): # FIXME: this is way too redundant with analyse_types() node = self.analyse_as_cimported_attribute_node(env, target=False) if node is not None: return node.entry.type node = self.analyse_as_unbound_cmethod_node(env) if node is not None: return node.entry.type obj_type = self.obj.infer_type(env) self.analyse_attribute(env, obj_type=obj_type) if obj_type.is_builtin_type and self.type.is_cfunction: # special case: C-API replacements for C methods of # builtin types cannot be inferred as C functions as # that would prevent their use as bound methods return py_object_type return self.type def analyse_target_declaration(self, env): pass def analyse_target_types(self, env): node = self.analyse_types(env, target = 1) if node.type.is_const: error(self.pos, "Assignment to const attribute '%s'" % self.attribute) if not node.is_lvalue(): error(self.pos, "Assignment to non-lvalue of type '%s'" % self.type) return node def analyse_types(self, env, target = 0): self.initialized_check = env.directives['initializedcheck'] node = self.analyse_as_cimported_attribute_node(env, target) if node is None and not target: node = self.analyse_as_unbound_cmethod_node(env) if node is None: node = self.analyse_as_ordinary_attribute_node(env, target) assert node is not None if node.entry: node.entry.used = True if node.is_attribute: node.wrap_obj_in_nonecheck(env) return node def analyse_as_cimported_attribute_node(self, env, target): # Try to interpret this as a reference to an imported # C const, type, var or function. If successful, mutates # this node into a NameNode and returns 1, otherwise # returns 0. module_scope = self.obj.analyse_as_module(env) if module_scope: entry = module_scope.lookup_here(self.attribute) if entry and ( entry.is_cglobal or entry.is_cfunction or entry.is_type or entry.is_const): return self.as_name_node(env, entry, target) return None def analyse_as_unbound_cmethod_node(self, env): # Try to interpret this as a reference to an unbound # C method of an extension type or builtin type. If successful, # creates a corresponding NameNode and returns it, otherwise # returns None. type = self.obj.analyse_as_extension_type(env) if type: entry = type.scope.lookup_here(self.attribute) if entry and entry.is_cmethod: if type.is_builtin_type: if not self.is_called: # must handle this as Python object return None ubcm_entry = entry else: # Create a temporary entry describing the C method # as an ordinary function. ubcm_entry = Symtab.Entry(entry.name, "%s->%s" % (type.vtabptr_cname, entry.cname), entry.type) ubcm_entry.is_cfunction = 1 ubcm_entry.func_cname = entry.func_cname ubcm_entry.is_unbound_cmethod = 1 return self.as_name_node(env, ubcm_entry, target=False) return None def analyse_as_type(self, env): module_scope = self.obj.analyse_as_module(env) if module_scope: return module_scope.lookup_type(self.attribute) if not self.obj.is_string_literal: base_type = self.obj.analyse_as_type(env) if base_type and hasattr(base_type, 'scope') and base_type.scope is not None: return base_type.scope.lookup_type(self.attribute) return None def analyse_as_extension_type(self, env): # Try to interpret this as a reference to an extension type # in a cimported module. Returns the extension type, or None. module_scope = self.obj.analyse_as_module(env) if module_scope: entry = module_scope.lookup_here(self.attribute) if entry and entry.is_type: if entry.type.is_extension_type or entry.type.is_builtin_type: return entry.type return None def analyse_as_module(self, env): # Try to interpret this as a reference to a cimported module # in another cimported module. Returns the module scope, or None. module_scope = self.obj.analyse_as_module(env) if module_scope: entry = module_scope.lookup_here(self.attribute) if entry and entry.as_module: return entry.as_module return None def as_name_node(self, env, entry, target): # Create a corresponding NameNode from this node and complete the # analyse_types phase. node = NameNode.from_node(self, name=self.attribute, entry=entry) if target: node = node.analyse_target_types(env) else: node = node.analyse_rvalue_entry(env) node.entry.used = 1 return node def analyse_as_ordinary_attribute_node(self, env, target): self.obj = self.obj.analyse_types(env) self.analyse_attribute(env) if self.entry and self.entry.is_cmethod and not self.is_called: # error(self.pos, "C method can only be called") pass ## Reference to C array turns into pointer to first element. #while self.type.is_array: # self.type = self.type.element_ptr_type() if self.is_py_attr: if not target: self.is_temp = 1 self.result_ctype = py_object_type elif target and self.obj.type.is_builtin_type: error(self.pos, "Assignment to an immutable object field") #elif self.type.is_memoryviewslice and not target: # self.is_temp = True return self def analyse_attribute(self, env, obj_type = None): # Look up attribute and set self.type and self.member. immutable_obj = obj_type is not None # used during type inference self.is_py_attr = 0 self.member = self.attribute if obj_type is None: if self.obj.type.is_string or self.obj.type.is_pyunicode_ptr: self.obj = self.obj.coerce_to_pyobject(env) obj_type = self.obj.type else: if obj_type.is_string or obj_type.is_pyunicode_ptr: obj_type = py_object_type if obj_type.is_ptr or obj_type.is_array: obj_type = obj_type.base_type self.op = "->" elif obj_type.is_extension_type or obj_type.is_builtin_type: self.op = "->" else: self.op = "." if obj_type.has_attributes: if obj_type.attributes_known(): if (obj_type.is_memoryviewslice and not obj_type.scope.lookup_here(self.attribute)): if self.attribute == 'T': self.is_memslice_transpose = True self.is_temp = True self.use_managed_ref = True self.type = self.obj.type return else: obj_type.declare_attribute(self.attribute, env, self.pos) entry = obj_type.scope.lookup_here(self.attribute) if entry and entry.is_member: entry = None else: error(self.pos, "Cannot select attribute of incomplete type '%s'" % obj_type) self.type = PyrexTypes.error_type return self.entry = entry if entry: if obj_type.is_extension_type and entry.name == "__weakref__": error(self.pos, "Illegal use of special attribute __weakref__") # def methods need the normal attribute lookup # because they do not have struct entries # fused function go through assignment synthesis # (foo = pycfunction(foo_func_obj)) and need to go through # regular Python lookup as well if (entry.is_variable and not entry.fused_cfunction) or entry.is_cmethod: self.type = entry.type self.member = entry.cname return else: # If it's not a variable or C method, it must be a Python # method of an extension type, so we treat it like a Python # attribute. pass # If we get here, the base object is not a struct/union/extension # type, or it is an extension type and the attribute is either not # declared or is declared as a Python method. Treat it as a Python # attribute reference. self.analyse_as_python_attribute(env, obj_type, immutable_obj) def analyse_as_python_attribute(self, env, obj_type=None, immutable_obj=False): if obj_type is None: obj_type = self.obj.type # mangle private '__*' Python attributes used inside of a class self.attribute = env.mangle_class_private_name(self.attribute) self.member = self.attribute self.type = py_object_type self.is_py_attr = 1 if not obj_type.is_pyobject and not obj_type.is_error: if obj_type.can_coerce_to_pyobject(env): if not immutable_obj: self.obj = self.obj.coerce_to_pyobject(env) elif (obj_type.is_cfunction and (self.obj.is_name or self.obj.is_attribute) and self.obj.entry.as_variable and self.obj.entry.as_variable.type.is_pyobject): # might be an optimised builtin function => unpack it if not immutable_obj: self.obj = self.obj.coerce_to_pyobject(env) else: error(self.pos, "Object of type '%s' has no attribute '%s'" % (obj_type, self.attribute)) def wrap_obj_in_nonecheck(self, env): if not env.directives['nonecheck']: return msg = None format_args = () if (self.obj.type.is_extension_type and self.needs_none_check and not self.is_py_attr): msg = "'NoneType' object has no attribute '%s'" format_args = (self.attribute,) elif self.obj.type.is_memoryviewslice: if self.is_memslice_transpose: msg = "Cannot transpose None memoryview slice" else: entry = self.obj.type.scope.lookup_here(self.attribute) if entry: # copy/is_c_contig/shape/strides etc msg = "Cannot access '%s' attribute of None memoryview slice" format_args = (entry.name,) if msg: self.obj = self.obj.as_none_safe_node(msg, 'PyExc_AttributeError', format_args=format_args) def nogil_check(self, env): if self.is_py_attr: self.gil_error() elif self.type.is_memoryviewslice: import MemoryView MemoryView.err_if_nogil_initialized_check(self.pos, env, 'attribute') gil_message = "Accessing Python attribute" def is_simple(self): if self.obj: return self.result_in_temp() or self.obj.is_simple() else: return NameNode.is_simple(self) def is_lvalue(self): if self.obj: return not self.type.is_array else: return NameNode.is_lvalue(self) def is_ephemeral(self): if self.obj: return self.obj.is_ephemeral() else: return NameNode.is_ephemeral(self) def calculate_result_code(self): #print "AttributeNode.calculate_result_code:", self.member ### #print "...obj node =", self.obj, "code", self.obj.result() ### #print "...obj type", self.obj.type, "ctype", self.obj.ctype() ### obj = self.obj obj_code = obj.result_as(obj.type) #print "...obj_code =", obj_code ### if self.entry and self.entry.is_cmethod: if obj.type.is_extension_type and not self.entry.is_builtin_cmethod: if self.entry.final_func_cname: return self.entry.final_func_cname if self.type.from_fused: # If the attribute was specialized through indexing, make # sure to get the right fused name, as our entry was # replaced by our parent index node # (AnalyseExpressionsTransform) self.member = self.entry.cname return "((struct %s *)%s%s%s)->%s" % ( obj.type.vtabstruct_cname, obj_code, self.op, obj.type.vtabslot_cname, self.member) elif self.result_is_used: return self.member # Generating no code at all for unused access to optimised builtin # methods fixes the problem that some optimisations only exist as # macros, i.e. there is no function pointer to them, so we would # generate invalid C code here. return elif obj.type.is_complex: return "__Pyx_C%s(%s)" % (self.member.upper(), obj_code) else: if obj.type.is_builtin_type and self.entry and self.entry.is_variable: # accessing a field of a builtin type, need to cast better than result_as() does obj_code = obj.type.cast_code(obj.result(), to_object_struct = True) return "%s%s%s" % (obj_code, self.op, self.member) def generate_result_code(self, code): if self.is_py_attr: if self.is_special_lookup: code.globalstate.use_utility_code( UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c")) lookup_func_name = '__Pyx_PyObject_LookupSpecial' else: code.globalstate.use_utility_code( UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c")) lookup_func_name = '__Pyx_PyObject_GetAttrStr' code.putln( '%s = %s(%s, %s); %s' % ( self.result(), lookup_func_name, self.obj.py_result(), code.intern_identifier(self.attribute), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) elif self.type.is_memoryviewslice: if self.is_memslice_transpose: # transpose the slice for access, packing in self.type.axes: if access == 'ptr': error(self.pos, "Transposing not supported for slices " "with indirect dimensions") return code.putln("%s = %s;" % (self.result(), self.obj.result())) if self.obj.is_name or (self.obj.is_attribute and self.obj.is_memslice_transpose): code.put_incref_memoryviewslice(self.result(), have_gil=True) T = "__pyx_memslice_transpose(&%s) == 0" code.putln(code.error_goto_if(T % self.result(), self.pos)) elif self.initialized_check: code.putln( 'if (unlikely(!%s.memview)) {' 'PyErr_SetString(PyExc_AttributeError,' '"Memoryview is not initialized");' '%s' '}' % (self.result(), code.error_goto(self.pos))) else: # result_code contains what is needed, but we may need to insert # a check and raise an exception if self.obj.type.is_extension_type: pass elif self.entry and self.entry.is_cmethod and self.entry.utility_code: # C method implemented as function call with utility code code.globalstate.use_utility_code(self.entry.utility_code) def generate_disposal_code(self, code): if self.is_temp and self.type.is_memoryviewslice and self.is_memslice_transpose: # mirror condition for putting the memview incref here: if self.obj.is_name or (self.obj.is_attribute and self.obj.is_memslice_transpose): code.put_xdecref_memoryviewslice( self.result(), have_gil=True) else: ExprNode.generate_disposal_code(self, code) def generate_assignment_code(self, rhs, code): self.obj.generate_evaluation_code(code) if self.is_py_attr: code.globalstate.use_utility_code( UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c")) code.put_error_if_neg(self.pos, '__Pyx_PyObject_SetAttrStr(%s, %s, %s)' % ( self.obj.py_result(), code.intern_identifier(self.attribute), rhs.py_result())) rhs.generate_disposal_code(code) rhs.free_temps(code) elif self.obj.type.is_complex: code.putln("__Pyx_SET_C%s(%s, %s);" % ( self.member.upper(), self.obj.result_as(self.obj.type), rhs.result_as(self.ctype()))) else: select_code = self.result() if self.type.is_pyobject and self.use_managed_ref: rhs.make_owned_reference(code) code.put_giveref(rhs.py_result()) code.put_gotref(select_code) code.put_decref(select_code, self.ctype()) elif self.type.is_memoryviewslice: import MemoryView MemoryView.put_assign_to_memviewslice( select_code, rhs, rhs.result(), self.type, code) if not self.type.is_memoryviewslice: code.putln( "%s = %s;" % ( select_code, rhs.result_as(self.ctype()))) #rhs.result())) rhs.generate_post_assignment_code(code) rhs.free_temps(code) self.obj.generate_disposal_code(code) self.obj.free_temps(code) def generate_deletion_code(self, code, ignore_nonexisting=False): self.obj.generate_evaluation_code(code) if self.is_py_attr or (self.entry.scope.is_property_scope and u'__del__' in self.entry.scope.entries): code.globalstate.use_utility_code( UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c")) code.put_error_if_neg(self.pos, '__Pyx_PyObject_DelAttrStr(%s, %s)' % ( self.obj.py_result(), code.intern_identifier(self.attribute))) else: error(self.pos, "Cannot delete C attribute of extension type") self.obj.generate_disposal_code(code) self.obj.free_temps(code) def annotate(self, code): if self.is_py_attr: style, text = 'py_attr', 'python attribute (%s)' else: style, text = 'c_attr', 'c attribute (%s)' code.annotate(self.pos, AnnotationItem(style, text % self.type, size=len(self.attribute))) #------------------------------------------------------------------- # # Constructor nodes # #------------------------------------------------------------------- class StarredTargetNode(ExprNode): # A starred expression like "*a" # # This is only allowed in sequence assignment targets such as # # a, *b = (1,2,3,4) => a = 1 ; b = [2,3,4] # # and will be removed during type analysis (or generate an error # if it's found at unexpected places). # # target ExprNode subexprs = ['target'] is_starred = 1 type = py_object_type is_temp = 1 def __init__(self, pos, target): ExprNode.__init__(self, pos) self.target = target def analyse_declarations(self, env): error(self.pos, "can use starred expression only as assignment target") self.target.analyse_declarations(env) def analyse_types(self, env): error(self.pos, "can use starred expression only as assignment target") self.target = self.target.analyse_types(env) self.type = self.target.type return self def analyse_target_declaration(self, env): self.target.analyse_target_declaration(env) def analyse_target_types(self, env): self.target = self.target.analyse_target_types(env) self.type = self.target.type return self def calculate_result_code(self): return "" def generate_result_code(self, code): pass class SequenceNode(ExprNode): # Base class for list and tuple constructor nodes. # Contains common code for performing sequence unpacking. # # args [ExprNode] # unpacked_items [ExprNode] or None # coerced_unpacked_items [ExprNode] or None # mult_factor ExprNode the integer number of content repetitions ([1,2]*3) subexprs = ['args', 'mult_factor'] is_sequence_constructor = 1 unpacked_items = None mult_factor = None slow = False # trade speed for code size (e.g. use PyTuple_Pack()) def compile_time_value_list(self, denv): return [arg.compile_time_value(denv) for arg in self.args] def replace_starred_target_node(self): # replace a starred node in the targets by the contained expression self.starred_assignment = False args = [] for arg in self.args: if arg.is_starred: if self.starred_assignment: error(arg.pos, "more than 1 starred expression in assignment") self.starred_assignment = True arg = arg.target arg.is_starred = True args.append(arg) self.args = args def analyse_target_declaration(self, env): self.replace_starred_target_node() for arg in self.args: arg.analyse_target_declaration(env) def analyse_types(self, env, skip_children=False): for i in range(len(self.args)): arg = self.args[i] if not skip_children: arg = arg.analyse_types(env) self.args[i] = arg.coerce_to_pyobject(env) if self.mult_factor: self.mult_factor = self.mult_factor.analyse_types(env) if not self.mult_factor.type.is_int: self.mult_factor = self.mult_factor.coerce_to_pyobject(env) self.is_temp = 1 # not setting self.type here, subtypes do this return self def may_be_none(self): return False def analyse_target_types(self, env): if self.mult_factor: error(self.pos, "can't assign to multiplied sequence") self.unpacked_items = [] self.coerced_unpacked_items = [] self.any_coerced_items = False for i, arg in enumerate(self.args): arg = self.args[i] = arg.analyse_target_types(env) if arg.is_starred: if not arg.type.assignable_from(Builtin.list_type): error(arg.pos, "starred target must have Python object (list) type") if arg.type is py_object_type: arg.type = Builtin.list_type unpacked_item = PyTempNode(self.pos, env) coerced_unpacked_item = unpacked_item.coerce_to(arg.type, env) if unpacked_item is not coerced_unpacked_item: self.any_coerced_items = True self.unpacked_items.append(unpacked_item) self.coerced_unpacked_items.append(coerced_unpacked_item) self.type = py_object_type return self def generate_result_code(self, code): self.generate_operation_code(code) def generate_sequence_packing_code(self, code, target=None, plain=False): if target is None: target = self.result() size_factor = c_mult = '' mult_factor = None if self.mult_factor and not plain: mult_factor = self.mult_factor if mult_factor.type.is_int: c_mult = mult_factor.result() if isinstance(mult_factor.constant_result, (int,long)) \ and mult_factor.constant_result > 0: size_factor = ' * %s' % mult_factor.constant_result else: size_factor = ' * ((%s<0) ? 0:%s)' % (c_mult, c_mult) if self.type is Builtin.tuple_type and (self.is_literal or self.slow) and not c_mult: # use PyTuple_Pack() to avoid generating huge amounts of one-time code code.putln('%s = PyTuple_Pack(%d, %s); %s' % ( target, len(self.args), ', '.join([ arg.py_result() for arg in self.args ]), code.error_goto_if_null(target, self.pos))) code.put_gotref(target) else: # build the tuple/list step by step, potentially multiplying it as we go if self.type is Builtin.list_type: create_func, set_item_func = 'PyList_New', 'PyList_SET_ITEM' elif self.type is Builtin.tuple_type: create_func, set_item_func = 'PyTuple_New', 'PyTuple_SET_ITEM' else: raise InternalError("sequence packing for unexpected type %s" % self.type) arg_count = len(self.args) code.putln("%s = %s(%s%s); %s" % ( target, create_func, arg_count, size_factor, code.error_goto_if_null(target, self.pos))) code.put_gotref(target) if c_mult: # FIXME: can't use a temp variable here as the code may # end up in the constant building function. Temps # currently don't work there. #counter = code.funcstate.allocate_temp(mult_factor.type, manage_ref=False) counter = Naming.quick_temp_cname code.putln('{ Py_ssize_t %s;' % counter) if arg_count == 1: offset = counter else: offset = '%s * %s' % (counter, arg_count) code.putln('for (%s=0; %s < %s; %s++) {' % ( counter, counter, c_mult, counter )) else: offset = '' for i in xrange(arg_count): arg = self.args[i] if c_mult or not arg.result_in_temp(): code.put_incref(arg.result(), arg.ctype()) code.putln("%s(%s, %s, %s);" % ( set_item_func, target, (offset and i) and ('%s + %s' % (offset, i)) or (offset or i), arg.py_result())) code.put_giveref(arg.py_result()) if c_mult: code.putln('}') #code.funcstate.release_temp(counter) code.putln('}') if mult_factor is not None and mult_factor.type.is_pyobject: code.putln('{ PyObject* %s = PyNumber_InPlaceMultiply(%s, %s); %s' % ( Naming.quick_temp_cname, target, mult_factor.py_result(), code.error_goto_if_null(Naming.quick_temp_cname, self.pos) )) code.put_gotref(Naming.quick_temp_cname) code.put_decref(target, py_object_type) code.putln('%s = %s;' % (target, Naming.quick_temp_cname)) code.putln('}') def generate_subexpr_disposal_code(self, code): if self.mult_factor and self.mult_factor.type.is_int: super(SequenceNode, self).generate_subexpr_disposal_code(code) elif self.type is Builtin.tuple_type and (self.is_literal or self.slow): super(SequenceNode, self).generate_subexpr_disposal_code(code) else: # We call generate_post_assignment_code here instead # of generate_disposal_code, because values were stored # in the tuple using a reference-stealing operation. for arg in self.args: arg.generate_post_assignment_code(code) # Should NOT call free_temps -- this is invoked by the default # generate_evaluation_code which will do that. if self.mult_factor: self.mult_factor.generate_disposal_code(code) def generate_assignment_code(self, rhs, code): if self.starred_assignment: self.generate_starred_assignment_code(rhs, code) else: self.generate_parallel_assignment_code(rhs, code) for item in self.unpacked_items: item.release(code) rhs.free_temps(code) _func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType( PyrexTypes.py_object_type, [ PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None), ])) def generate_parallel_assignment_code(self, rhs, code): # Need to work around the fact that generate_evaluation_code # allocates the temps in a rather hacky way -- the assignment # is evaluated twice, within each if-block. for item in self.unpacked_items: item.allocate(code) special_unpack = (rhs.type is py_object_type or rhs.type in (tuple_type, list_type) or not rhs.type.is_builtin_type) long_enough_for_a_loop = len(self.unpacked_items) > 3 if special_unpack: self.generate_special_parallel_unpacking_code( code, rhs, use_loop=long_enough_for_a_loop) else: code.putln("{") self.generate_generic_parallel_unpacking_code( code, rhs, self.unpacked_items, use_loop=long_enough_for_a_loop) code.putln("}") for value_node in self.coerced_unpacked_items: value_node.generate_evaluation_code(code) for i in range(len(self.args)): self.args[i].generate_assignment_code( self.coerced_unpacked_items[i], code) def generate_special_parallel_unpacking_code(self, code, rhs, use_loop): sequence_type_test = '1' none_check = "likely(%s != Py_None)" % rhs.py_result() if rhs.type is list_type: sequence_types = ['List'] if rhs.may_be_none(): sequence_type_test = none_check elif rhs.type is tuple_type: sequence_types = ['Tuple'] if rhs.may_be_none(): sequence_type_test = none_check else: sequence_types = ['Tuple', 'List'] tuple_check = 'likely(PyTuple_CheckExact(%s))' % rhs.py_result() list_check = 'PyList_CheckExact(%s)' % rhs.py_result() sequence_type_test = "(%s) || (%s)" % (tuple_check, list_check) code.putln("if (%s) {" % sequence_type_test) code.putln("PyObject* sequence = %s;" % rhs.py_result()) # list/tuple => check size code.putln("#if CYTHON_COMPILING_IN_CPYTHON") code.putln("Py_ssize_t size = Py_SIZE(sequence);") code.putln("#else") code.putln("Py_ssize_t size = PySequence_Size(sequence);") # < 0 => exception code.putln("#endif") code.putln("if (unlikely(size != %d)) {" % len(self.args)) code.globalstate.use_utility_code(raise_too_many_values_to_unpack) code.putln("if (size > %d) __Pyx_RaiseTooManyValuesError(%d);" % ( len(self.args), len(self.args))) code.globalstate.use_utility_code(raise_need_more_values_to_unpack) code.putln("else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);") code.putln(code.error_goto(self.pos)) code.putln("}") code.putln("#if CYTHON_COMPILING_IN_CPYTHON") # unpack items from list/tuple in unrolled loop (can't fail) if len(sequence_types) == 2: code.putln("if (likely(Py%s_CheckExact(sequence))) {" % sequence_types[0]) for i, item in enumerate(self.unpacked_items): code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % ( item.result(), sequence_types[0], i)) if len(sequence_types) == 2: code.putln("} else {") for i, item in enumerate(self.unpacked_items): code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % ( item.result(), sequence_types[1], i)) code.putln("}") for item in self.unpacked_items: code.put_incref(item.result(), item.ctype()) code.putln("#else") # in non-CPython, use the PySequence protocol (which can fail) if not use_loop: for i, item in enumerate(self.unpacked_items): code.putln("%s = PySequence_ITEM(sequence, %d); %s" % ( item.result(), i, code.error_goto_if_null(item.result(), self.pos))) code.put_gotref(item.result()) else: code.putln("{") code.putln("Py_ssize_t i;") code.putln("PyObject** temps[%s] = {%s};" % ( len(self.unpacked_items), ','.join(['&%s' % item.result() for item in self.unpacked_items]))) code.putln("for (i=0; i < %s; i++) {" % len(self.unpacked_items)) code.putln("PyObject* item = PySequence_ITEM(sequence, i); %s" % ( code.error_goto_if_null('item', self.pos))) code.put_gotref('item') code.putln("*(temps[i]) = item;") code.putln("}") code.putln("}") code.putln("#endif") rhs.generate_disposal_code(code) if sequence_type_test == '1': code.putln("}") # all done elif sequence_type_test == none_check: # either tuple/list or None => save some code by generating the error directly code.putln("} else {") code.globalstate.use_utility_code( UtilityCode.load_cached("RaiseNoneIterError", "ObjectHandling.c")) code.putln("__Pyx_RaiseNoneNotIterableError(); %s" % code.error_goto(self.pos)) code.putln("}") # all done else: code.putln("} else {") # needs iteration fallback code self.generate_generic_parallel_unpacking_code( code, rhs, self.unpacked_items, use_loop=use_loop) code.putln("}") def generate_generic_parallel_unpacking_code(self, code, rhs, unpacked_items, use_loop, terminate=True): code.globalstate.use_utility_code(raise_need_more_values_to_unpack) code.globalstate.use_utility_code(UtilityCode.load_cached("IterFinish", "ObjectHandling.c")) code.putln("Py_ssize_t index = -1;") # must be at the start of a C block! if use_loop: code.putln("PyObject** temps[%s] = {%s};" % ( len(self.unpacked_items), ','.join(['&%s' % item.result() for item in unpacked_items]))) iterator_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True) code.putln( "%s = PyObject_GetIter(%s); %s" % ( iterator_temp, rhs.py_result(), code.error_goto_if_null(iterator_temp, self.pos))) code.put_gotref(iterator_temp) rhs.generate_disposal_code(code) iternext_func = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False) code.putln("%s = Py_TYPE(%s)->tp_iternext;" % ( iternext_func, iterator_temp)) unpacking_error_label = code.new_label('unpacking_failed') unpack_code = "%s(%s)" % (iternext_func, iterator_temp) if use_loop: code.putln("for (index=0; index < %s; index++) {" % len(unpacked_items)) code.put("PyObject* item = %s; if (unlikely(!item)) " % unpack_code) code.put_goto(unpacking_error_label) code.put_gotref("item") code.putln("*(temps[index]) = item;") code.putln("}") else: for i, item in enumerate(unpacked_items): code.put( "index = %d; %s = %s; if (unlikely(!%s)) " % ( i, item.result(), unpack_code, item.result())) code.put_goto(unpacking_error_label) code.put_gotref(item.py_result()) if terminate: code.globalstate.use_utility_code( UtilityCode.load_cached("UnpackItemEndCheck", "ObjectHandling.c")) code.put_error_if_neg(self.pos, "__Pyx_IternextUnpackEndCheck(%s, %d)" % ( unpack_code, len(unpacked_items))) code.putln("%s = NULL;" % iternext_func) code.put_decref_clear(iterator_temp, py_object_type) unpacking_done_label = code.new_label('unpacking_done') code.put_goto(unpacking_done_label) code.put_label(unpacking_error_label) code.put_decref_clear(iterator_temp, py_object_type) code.putln("%s = NULL;" % iternext_func) code.putln("if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);") code.putln(code.error_goto(self.pos)) code.put_label(unpacking_done_label) code.funcstate.release_temp(iternext_func) if terminate: code.funcstate.release_temp(iterator_temp) iterator_temp = None return iterator_temp def generate_starred_assignment_code(self, rhs, code): for i, arg in enumerate(self.args): if arg.is_starred: starred_target = self.unpacked_items[i] unpacked_fixed_items_left = self.unpacked_items[:i] unpacked_fixed_items_right = self.unpacked_items[i+1:] break else: assert False iterator_temp = None if unpacked_fixed_items_left: for item in unpacked_fixed_items_left: item.allocate(code) code.putln('{') iterator_temp = self.generate_generic_parallel_unpacking_code( code, rhs, unpacked_fixed_items_left, use_loop=True, terminate=False) for i, item in enumerate(unpacked_fixed_items_left): value_node = self.coerced_unpacked_items[i] value_node.generate_evaluation_code(code) code.putln('}') starred_target.allocate(code) target_list = starred_target.result() code.putln("%s = PySequence_List(%s); %s" % ( target_list, iterator_temp or rhs.py_result(), code.error_goto_if_null(target_list, self.pos))) code.put_gotref(target_list) if iterator_temp: code.put_decref_clear(iterator_temp, py_object_type) code.funcstate.release_temp(iterator_temp) else: rhs.generate_disposal_code(code) if unpacked_fixed_items_right: code.globalstate.use_utility_code(raise_need_more_values_to_unpack) length_temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False) code.putln('%s = PyList_GET_SIZE(%s);' % (length_temp, target_list)) code.putln("if (unlikely(%s < %d)) {" % (length_temp, len(unpacked_fixed_items_right))) code.putln("__Pyx_RaiseNeedMoreValuesError(%d+%s); %s" % ( len(unpacked_fixed_items_left), length_temp, code.error_goto(self.pos))) code.putln('}') for item in unpacked_fixed_items_right[::-1]: item.allocate(code) for i, (item, coerced_arg) in enumerate(zip(unpacked_fixed_items_right[::-1], self.coerced_unpacked_items[::-1])): code.putln('#if CYTHON_COMPILING_IN_CPYTHON') code.putln("%s = PyList_GET_ITEM(%s, %s-%d); " % ( item.py_result(), target_list, length_temp, i+1)) # resize the list the hard way code.putln("((PyVarObject*)%s)->ob_size--;" % target_list) code.putln('#else') code.putln("%s = PySequence_ITEM(%s, %s-%d); " % ( item.py_result(), target_list, length_temp, i+1)) code.putln('#endif') code.put_gotref(item.py_result()) coerced_arg.generate_evaluation_code(code) code.putln('#if !CYTHON_COMPILING_IN_CPYTHON') sublist_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True) code.putln('%s = PySequence_GetSlice(%s, 0, %s-%d); %s' % ( sublist_temp, target_list, length_temp, len(unpacked_fixed_items_right), code.error_goto_if_null(sublist_temp, self.pos))) code.put_gotref(sublist_temp) code.funcstate.release_temp(length_temp) code.put_decref(target_list, py_object_type) code.putln('%s = %s; %s = NULL;' % (target_list, sublist_temp, sublist_temp)) code.putln('#else') code.putln('%s = %s;' % (sublist_temp, sublist_temp)) # avoid warning about unused variable code.funcstate.release_temp(sublist_temp) code.putln('#endif') for i, arg in enumerate(self.args): arg.generate_assignment_code(self.coerced_unpacked_items[i], code) def annotate(self, code): for arg in self.args: arg.annotate(code) if self.unpacked_items: for arg in self.unpacked_items: arg.annotate(code) for arg in self.coerced_unpacked_items: arg.annotate(code) class TupleNode(SequenceNode): # Tuple constructor. type = tuple_type is_partly_literal = False gil_message = "Constructing Python tuple" def analyse_types(self, env, skip_children=False): if len(self.args) == 0: node = self node.is_temp = False node.is_literal = True else: node = SequenceNode.analyse_types(self, env, skip_children) for child in node.args: if not child.is_literal: break else: if not node.mult_factor or node.mult_factor.is_literal and \ isinstance(node.mult_factor.constant_result, (int, long)): node.is_temp = False node.is_literal = True else: if not node.mult_factor.type.is_pyobject: node.mult_factor = node.mult_factor.coerce_to_pyobject(env) node.is_temp = True node.is_partly_literal = True return node def is_simple(self): # either temp or constant => always simple return True def nonlocally_immutable(self): # either temp or constant => always safe return True def calculate_result_code(self): if len(self.args) > 0: return self.result_code else: return Naming.empty_tuple def calculate_constant_result(self): self.constant_result = tuple([ arg.constant_result for arg in self.args]) def compile_time_value(self, denv): values = self.compile_time_value_list(denv) try: return tuple(values) except Exception, e: self.compile_time_value_error(e) def generate_operation_code(self, code): if len(self.args) == 0: # result_code is Naming.empty_tuple return if self.is_partly_literal: # underlying tuple is const, but factor is not tuple_target = code.get_py_const(py_object_type, 'tuple', cleanup_level=2) const_code = code.get_cached_constants_writer() const_code.mark_pos(self.pos) self.generate_sequence_packing_code(const_code, tuple_target, plain=True) const_code.put_giveref(tuple_target) code.putln('%s = PyNumber_Multiply(%s, %s); %s' % ( self.result(), tuple_target, self.mult_factor.py_result(), code.error_goto_if_null(self.result(), self.pos) )) code.put_gotref(self.py_result()) elif self.is_literal: # non-empty cached tuple => result is global constant, # creation code goes into separate code writer self.result_code = code.get_py_const(py_object_type, 'tuple', cleanup_level=2) code = code.get_cached_constants_writer() code.mark_pos(self.pos) self.generate_sequence_packing_code(code) code.put_giveref(self.py_result()) else: self.generate_sequence_packing_code(code) class ListNode(SequenceNode): # List constructor. # obj_conversion_errors [PyrexError] used internally # orignial_args [ExprNode] used internally obj_conversion_errors = [] type = list_type in_module_scope = False gil_message = "Constructing Python list" def type_dependencies(self, env): return () def infer_type(self, env): # TOOD: Infer non-object list arrays. return list_type def analyse_expressions(self, env): node = SequenceNode.analyse_expressions(self, env) return node.coerce_to_pyobject(env) def analyse_types(self, env): hold_errors() self.original_args = list(self.args) node = SequenceNode.analyse_types(self, env) node.obj_conversion_errors = held_errors() release_errors(ignore=True) if env.is_module_scope: self.in_module_scope = True return node def coerce_to(self, dst_type, env): if dst_type.is_pyobject: for err in self.obj_conversion_errors: report_error(err) self.obj_conversion_errors = [] if not self.type.subtype_of(dst_type): error(self.pos, "Cannot coerce list to type '%s'" % dst_type) elif self.mult_factor: error(self.pos, "Cannot coerce multiplied list to '%s'" % dst_type) elif dst_type.is_ptr and dst_type.base_type is not PyrexTypes.c_void_type: base_type = dst_type.base_type self.type = PyrexTypes.CArrayType(base_type, len(self.args)) for i in range(len(self.original_args)): arg = self.args[i] if isinstance(arg, CoerceToPyTypeNode): arg = arg.arg self.args[i] = arg.coerce_to(base_type, env) elif dst_type.is_struct: if len(self.args) > len(dst_type.scope.var_entries): error(self.pos, "Too may members for '%s'" % dst_type) else: if len(self.args) < len(dst_type.scope.var_entries): warning(self.pos, "Too few members for '%s'" % dst_type, 1) for i, (arg, member) in enumerate(zip(self.original_args, dst_type.scope.var_entries)): if isinstance(arg, CoerceToPyTypeNode): arg = arg.arg self.args[i] = arg.coerce_to(member.type, env) self.type = dst_type else: self.type = error_type error(self.pos, "Cannot coerce list to type '%s'" % dst_type) return self def as_tuple(self): t = TupleNode(self.pos, args=self.args, mult_factor=self.mult_factor) if isinstance(self.constant_result, list): t.constant_result = tuple(self.constant_result) return t def allocate_temp_result(self, code): if self.type.is_array and self.in_module_scope: self.temp_code = code.funcstate.allocate_temp( self.type, manage_ref=False, static=True) else: SequenceNode.allocate_temp_result(self, code) def release_temp_result(self, env): if self.type.is_array: # To be valid C++, we must allocate the memory on the stack # manually and be sure not to reuse it for something else. pass else: SequenceNode.release_temp_result(self, env) def calculate_constant_result(self): if self.mult_factor: raise ValueError() # may exceed the compile time memory self.constant_result = [ arg.constant_result for arg in self.args] def compile_time_value(self, denv): l = self.compile_time_value_list(denv) if self.mult_factor: l *= self.mult_factor.compile_time_value(denv) return l def generate_operation_code(self, code): if self.type.is_pyobject: for err in self.obj_conversion_errors: report_error(err) self.generate_sequence_packing_code(code) elif self.type.is_array: for i, arg in enumerate(self.args): code.putln("%s[%s] = %s;" % ( self.result(), i, arg.result())) elif self.type.is_struct: for arg, member in zip(self.args, self.type.scope.var_entries): code.putln("%s.%s = %s;" % ( self.result(), member.cname, arg.result())) else: raise InternalError("List type never specified") class ScopedExprNode(ExprNode): # Abstract base class for ExprNodes that have their own local # scope, such as generator expressions. # # expr_scope Scope the inner scope of the expression subexprs = [] expr_scope = None # does this node really have a local scope, e.g. does it leak loop # variables or not? non-leaking Py3 behaviour is default, except # for list comprehensions where the behaviour differs in Py2 and # Py3 (set in Parsing.py based on parser context) has_local_scope = True def init_scope(self, outer_scope, expr_scope=None): if expr_scope is not None: self.expr_scope = expr_scope elif self.has_local_scope: self.expr_scope = Symtab.GeneratorExpressionScope(outer_scope) else: self.expr_scope = None def analyse_declarations(self, env): self.init_scope(env) def analyse_scoped_declarations(self, env): # this is called with the expr_scope as env pass def analyse_types(self, env): # no recursion here, the children will be analysed separately below return self def analyse_scoped_expressions(self, env): # this is called with the expr_scope as env return self def generate_evaluation_code(self, code): # set up local variables and free their references on exit generate_inner_evaluation_code = super(ScopedExprNode, self).generate_evaluation_code if not self.has_local_scope or not self.expr_scope.var_entries: # no local variables => delegate, done generate_inner_evaluation_code(code) return code.putln('{ /* enter inner scope */') py_entries = [] for entry in self.expr_scope.var_entries: if not entry.in_closure: code.put_var_declaration(entry) if entry.type.is_pyobject and entry.used: py_entries.append(entry) if not py_entries: # no local Python references => no cleanup required generate_inner_evaluation_code(code) code.putln('} /* exit inner scope */') return # must free all local Python references at each exit point old_loop_labels = tuple(code.new_loop_labels()) old_error_label = code.new_error_label() generate_inner_evaluation_code(code) # normal (non-error) exit for entry in py_entries: code.put_var_decref(entry) # error/loop body exit points exit_scope = code.new_label('exit_scope') code.put_goto(exit_scope) for label, old_label in ([(code.error_label, old_error_label)] + list(zip(code.get_loop_labels(), old_loop_labels))): if code.label_used(label): code.put_label(label) for entry in py_entries: code.put_var_decref(entry) code.put_goto(old_label) code.put_label(exit_scope) code.putln('} /* exit inner scope */') code.set_loop_labels(old_loop_labels) code.error_label = old_error_label class ComprehensionNode(ScopedExprNode): # A list/set/dict comprehension child_attrs = ["loop"] is_temp = True def infer_type(self, env): return self.type def analyse_declarations(self, env): self.append.target = self # this is used in the PyList_Append of the inner loop self.init_scope(env) def analyse_scoped_declarations(self, env): self.loop.analyse_declarations(env) def analyse_types(self, env): if not self.has_local_scope: self.loop = self.loop.analyse_expressions(env) return self def analyse_scoped_expressions(self, env): if self.has_local_scope: self.loop = self.loop.analyse_expressions(env) return self def may_be_none(self): return False def generate_result_code(self, code): self.generate_operation_code(code) def generate_operation_code(self, code): if self.type is Builtin.list_type: create_code = 'PyList_New(0)' elif self.type is Builtin.set_type: create_code = 'PySet_New(NULL)' elif self.type is Builtin.dict_type: create_code = 'PyDict_New()' else: raise InternalError("illegal type for comprehension: %s" % self.type) code.putln('%s = %s; %s' % ( self.result(), create_code, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.result()) self.loop.generate_execution_code(code) def annotate(self, code): self.loop.annotate(code) class ComprehensionAppendNode(Node): # Need to be careful to avoid infinite recursion: # target must not be in child_attrs/subexprs child_attrs = ['expr'] target = None type = PyrexTypes.c_int_type def analyse_expressions(self, env): self.expr = self.expr.analyse_expressions(env) if not self.expr.type.is_pyobject: self.expr = self.expr.coerce_to_pyobject(env) return self def generate_execution_code(self, code): if self.target.type is list_type: code.globalstate.use_utility_code( UtilityCode.load_cached("ListCompAppend", "Optimize.c")) function = "__Pyx_ListComp_Append" elif self.target.type is set_type: function = "PySet_Add" else: raise InternalError( "Invalid type for comprehension node: %s" % self.target.type) self.expr.generate_evaluation_code(code) code.putln(code.error_goto_if("%s(%s, (PyObject*)%s)" % ( function, self.target.result(), self.expr.result() ), self.pos)) self.expr.generate_disposal_code(code) self.expr.free_temps(code) def generate_function_definitions(self, env, code): self.expr.generate_function_definitions(env, code) def annotate(self, code): self.expr.annotate(code) class DictComprehensionAppendNode(ComprehensionAppendNode): child_attrs = ['key_expr', 'value_expr'] def analyse_expressions(self, env): self.key_expr = self.key_expr.analyse_expressions(env) if not self.key_expr.type.is_pyobject: self.key_expr = self.key_expr.coerce_to_pyobject(env) self.value_expr = self.value_expr.analyse_expressions(env) if not self.value_expr.type.is_pyobject: self.value_expr = self.value_expr.coerce_to_pyobject(env) return self def generate_execution_code(self, code): self.key_expr.generate_evaluation_code(code) self.value_expr.generate_evaluation_code(code) code.putln(code.error_goto_if("PyDict_SetItem(%s, (PyObject*)%s, (PyObject*)%s)" % ( self.target.result(), self.key_expr.result(), self.value_expr.result() ), self.pos)) self.key_expr.generate_disposal_code(code) self.key_expr.free_temps(code) self.value_expr.generate_disposal_code(code) self.value_expr.free_temps(code) def generate_function_definitions(self, env, code): self.key_expr.generate_function_definitions(env, code) self.value_expr.generate_function_definitions(env, code) def annotate(self, code): self.key_expr.annotate(code) self.value_expr.annotate(code) class InlinedGeneratorExpressionNode(ScopedExprNode): # An inlined generator expression for which the result is # calculated inside of the loop. This will only be created by # transforms when replacing builtin calls on generator # expressions. # # loop ForStatNode the for-loop, not containing any YieldExprNodes # result_node ResultRefNode the reference to the result value temp # orig_func String the name of the builtin function this node replaces child_attrs = ["loop"] loop_analysed = False type = py_object_type def analyse_scoped_declarations(self, env): self.loop.analyse_declarations(env) def may_be_none(self): return False def annotate(self, code): self.loop.annotate(code) def infer_type(self, env): return self.result_node.infer_type(env) def analyse_types(self, env): if not self.has_local_scope: self.loop_analysed = True self.loop = self.loop.analyse_expressions(env) self.type = self.result_node.type self.is_temp = True return self def analyse_scoped_expressions(self, env): self.loop_analysed = True if self.has_local_scope: self.loop = self.loop.analyse_expressions(env) return self def coerce_to(self, dst_type, env): if self.orig_func == 'sum' and dst_type.is_numeric and not self.loop_analysed: # We can optimise by dropping the aggregation variable and # the add operations into C. This can only be done safely # before analysing the loop body, after that, the result # reference type will have infected expressions and # assignments. self.result_node.type = self.type = dst_type return self return super(InlinedGeneratorExpressionNode, self).coerce_to(dst_type, env) def generate_result_code(self, code): self.result_node.result_code = self.result() self.loop.generate_execution_code(code) class SetNode(ExprNode): # Set constructor. type = set_type subexprs = ['args'] gil_message = "Constructing Python set" def analyse_types(self, env): for i in range(len(self.args)): arg = self.args[i] arg = arg.analyse_types(env) self.args[i] = arg.coerce_to_pyobject(env) self.type = set_type self.is_temp = 1 return self def may_be_none(self): return False def calculate_constant_result(self): self.constant_result = set([ arg.constant_result for arg in self.args]) def compile_time_value(self, denv): values = [arg.compile_time_value(denv) for arg in self.args] try: return set(values) except Exception, e: self.compile_time_value_error(e) def generate_evaluation_code(self, code): code.globalstate.use_utility_code(Builtin.py_set_utility_code) self.allocate_temp_result(code) code.putln( "%s = PySet_New(0); %s" % ( self.result(), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) for arg in self.args: arg.generate_evaluation_code(code) code.put_error_if_neg( self.pos, "PySet_Add(%s, %s)" % (self.result(), arg.py_result())) arg.generate_disposal_code(code) arg.free_temps(code) class DictNode(ExprNode): # Dictionary constructor. # # key_value_pairs [DictItemNode] # exclude_null_values [boolean] Do not add NULL values to dict # # obj_conversion_errors [PyrexError] used internally subexprs = ['key_value_pairs'] is_temp = 1 exclude_null_values = False type = dict_type obj_conversion_errors = [] @classmethod def from_pairs(cls, pos, pairs): return cls(pos, key_value_pairs=[ DictItemNode(pos, key=k, value=v) for k, v in pairs]) def calculate_constant_result(self): self.constant_result = dict([ item.constant_result for item in self.key_value_pairs]) def compile_time_value(self, denv): pairs = [(item.key.compile_time_value(denv), item.value.compile_time_value(denv)) for item in self.key_value_pairs] try: return dict(pairs) except Exception, e: self.compile_time_value_error(e) def type_dependencies(self, env): return () def infer_type(self, env): # TOOD: Infer struct constructors. return dict_type def analyse_types(self, env): hold_errors() self.key_value_pairs = [ item.analyse_types(env) for item in self.key_value_pairs ] self.obj_conversion_errors = held_errors() release_errors(ignore=True) return self def may_be_none(self): return False def coerce_to(self, dst_type, env): if dst_type.is_pyobject: self.release_errors() if not self.type.subtype_of(dst_type): error(self.pos, "Cannot interpret dict as type '%s'" % dst_type) elif dst_type.is_struct_or_union: self.type = dst_type if not dst_type.is_struct and len(self.key_value_pairs) != 1: error(self.pos, "Exactly one field must be specified to convert to union '%s'" % dst_type) elif dst_type.is_struct and len(self.key_value_pairs) < len(dst_type.scope.var_entries): warning(self.pos, "Not all members given for struct '%s'" % dst_type, 1) for item in self.key_value_pairs: if isinstance(item.key, CoerceToPyTypeNode): item.key = item.key.arg if not item.key.is_string_literal: error(item.key.pos, "Invalid struct field identifier") item.key = StringNode(item.key.pos, value="<error>") else: key = str(item.key.value) # converts string literals to unicode in Py3 member = dst_type.scope.lookup_here(key) if not member: error(item.key.pos, "struct '%s' has no field '%s'" % (dst_type, key)) else: value = item.value if isinstance(value, CoerceToPyTypeNode): value = value.arg item.value = value.coerce_to(member.type, env) else: self.type = error_type error(self.pos, "Cannot interpret dict as type '%s'" % dst_type) return self def release_errors(self): for err in self.obj_conversion_errors: report_error(err) self.obj_conversion_errors = [] gil_message = "Constructing Python dict" def generate_evaluation_code(self, code): # Custom method used here because key-value # pairs are evaluated and used one at a time. code.mark_pos(self.pos) self.allocate_temp_result(code) if self.type.is_pyobject: self.release_errors() code.putln( "%s = PyDict_New(); %s" % ( self.result(), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) for item in self.key_value_pairs: item.generate_evaluation_code(code) if self.type.is_pyobject: if self.exclude_null_values: code.putln('if (%s) {' % item.value.py_result()) code.put_error_if_neg(self.pos, "PyDict_SetItem(%s, %s, %s)" % ( self.result(), item.key.py_result(), item.value.py_result())) if self.exclude_null_values: code.putln('}') else: code.putln("%s.%s = %s;" % ( self.result(), item.key.value, item.value.result())) item.generate_disposal_code(code) item.free_temps(code) def annotate(self, code): for item in self.key_value_pairs: item.annotate(code) class DictItemNode(ExprNode): # Represents a single item in a DictNode # # key ExprNode # value ExprNode subexprs = ['key', 'value'] nogil_check = None # Parent DictNode takes care of it def calculate_constant_result(self): self.constant_result = ( self.key.constant_result, self.value.constant_result) def analyse_types(self, env): self.key = self.key.analyse_types(env) self.value = self.value.analyse_types(env) self.key = self.key.coerce_to_pyobject(env) self.value = self.value.coerce_to_pyobject(env) return self def generate_evaluation_code(self, code): self.key.generate_evaluation_code(code) self.value.generate_evaluation_code(code) def generate_disposal_code(self, code): self.key.generate_disposal_code(code) self.value.generate_disposal_code(code) def free_temps(self, code): self.key.free_temps(code) self.value.free_temps(code) def __iter__(self): return iter([self.key, self.value]) class SortedDictKeysNode(ExprNode): # build sorted list of dict keys, e.g. for dir() subexprs = ['arg'] is_temp = True def __init__(self, arg): ExprNode.__init__(self, arg.pos, arg=arg) self.type = Builtin.list_type def analyse_types(self, env): arg = self.arg.analyse_types(env) if arg.type is Builtin.dict_type: arg = arg.as_none_safe_node( "'NoneType' object is not iterable") self.arg = arg return self def may_be_none(self): return False def generate_result_code(self, code): dict_result = self.arg.py_result() if self.arg.type is Builtin.dict_type: function = 'PyDict_Keys' else: function = 'PyMapping_Keys' code.putln('%s = %s(%s); %s' % ( self.result(), function, dict_result, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) code.put_error_if_neg( self.pos, 'PyList_Sort(%s)' % self.py_result()) class ModuleNameMixin(object): def get_py_mod_name(self, code): return code.get_py_string_const( self.module_name, identifier=True) def get_py_qualified_name(self, code): return code.get_py_string_const( self.qualname, identifier=True) class ClassNode(ExprNode, ModuleNameMixin): # Helper class used in the implementation of Python # class definitions. Constructs a class object given # a name, tuple of bases and class dictionary. # # name EncodedString Name of the class # bases ExprNode Base class tuple # dict ExprNode Class dict (not owned by this node) # doc ExprNode or None Doc string # module_name EncodedString Name of defining module subexprs = ['bases', 'doc'] def analyse_types(self, env): self.bases = self.bases.analyse_types(env) if self.doc: self.doc = self.doc.analyse_types(env) self.doc = self.doc.coerce_to_pyobject(env) self.type = py_object_type self.is_temp = 1 env.use_utility_code(UtilityCode.load_cached("CreateClass", "ObjectHandling.c")) return self def may_be_none(self): return True gil_message = "Constructing Python class" def generate_result_code(self, code): cname = code.intern_identifier(self.name) if self.doc: code.put_error_if_neg(self.pos, 'PyDict_SetItem(%s, %s, %s)' % ( self.dict.py_result(), code.intern_identifier( StringEncoding.EncodedString("__doc__")), self.doc.py_result())) py_mod_name = self.get_py_mod_name(code) qualname = self.get_py_qualified_name(code) code.putln( '%s = __Pyx_CreateClass(%s, %s, %s, %s, %s); %s' % ( self.result(), self.bases.py_result(), self.dict.py_result(), cname, qualname, py_mod_name, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) class Py3ClassNode(ExprNode): # Helper class used in the implementation of Python3+ # class definitions. Constructs a class object given # a name, tuple of bases and class dictionary. # # name EncodedString Name of the class # dict ExprNode Class dict (not owned by this node) # module_name EncodedString Name of defining module # calculate_metaclass bool should call CalculateMetaclass() # allow_py2_metaclass bool should look for Py2 metaclass subexprs = [] def analyse_types(self, env): self.type = py_object_type self.is_temp = 1 return self def may_be_none(self): return True gil_message = "Constructing Python class" def generate_result_code(self, code): code.globalstate.use_utility_code(UtilityCode.load_cached("Py3ClassCreate", "ObjectHandling.c")) cname = code.intern_identifier(self.name) if self.mkw: mkw = self.mkw.py_result() else: mkw = 'NULL' if self.metaclass: metaclass = self.metaclass.result() else: metaclass = "((PyObject*)&__Pyx_DefaultClassType)" code.putln( '%s = __Pyx_Py3ClassCreate(%s, %s, %s, %s, %s, %d, %d); %s' % ( self.result(), metaclass, cname, self.bases.py_result(), self.dict.py_result(), mkw, self.calculate_metaclass, self.allow_py2_metaclass, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) class KeywordArgsNode(ExprNode): # Helper class for keyword arguments. # # starstar_arg DictNode # keyword_args [DictItemNode] subexprs = ['starstar_arg', 'keyword_args'] is_temp = 1 type = dict_type def calculate_constant_result(self): result = dict(self.starstar_arg.constant_result) for item in self.keyword_args: key, value = item.constant_result if key in result: raise ValueError("duplicate keyword argument found: %s" % key) result[key] = value self.constant_result = result def compile_time_value(self, denv): result = self.starstar_arg.compile_time_value(denv) pairs = [ (item.key.compile_time_value(denv), item.value.compile_time_value(denv)) for item in self.keyword_args ] try: result = dict(result) for key, value in pairs: if key in result: raise ValueError("duplicate keyword argument found: %s" % key) result[key] = value except Exception, e: self.compile_time_value_error(e) return result def type_dependencies(self, env): return () def infer_type(self, env): return dict_type def analyse_types(self, env): arg = self.starstar_arg.analyse_types(env) arg = arg.coerce_to_pyobject(env) self.starstar_arg = arg.as_none_safe_node( # FIXME: CPython's error message starts with the runtime function name 'argument after ** must be a mapping, not NoneType') self.keyword_args = [ item.analyse_types(env) for item in self.keyword_args ] return self def may_be_none(self): return False gil_message = "Constructing Python dict" def generate_evaluation_code(self, code): code.mark_pos(self.pos) self.allocate_temp_result(code) self.starstar_arg.generate_evaluation_code(code) if self.starstar_arg.type is not Builtin.dict_type: # CPython supports calling functions with non-dicts, so do we code.putln('if (likely(PyDict_Check(%s))) {' % self.starstar_arg.py_result()) if self.keyword_args: code.putln( "%s = PyDict_Copy(%s); %s" % ( self.result(), self.starstar_arg.py_result(), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) else: code.putln("%s = %s;" % ( self.result(), self.starstar_arg.py_result())) code.put_incref(self.result(), py_object_type) if self.starstar_arg.type is not Builtin.dict_type: code.putln('} else {') code.putln( "%s = PyObject_CallFunctionObjArgs(" "(PyObject*)&PyDict_Type, %s, NULL); %s" % ( self.result(), self.starstar_arg.py_result(), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) code.putln('}') self.starstar_arg.generate_disposal_code(code) self.starstar_arg.free_temps(code) if not self.keyword_args: return code.globalstate.use_utility_code( UtilityCode.load_cached("RaiseDoubleKeywords", "FunctionArguments.c")) for item in self.keyword_args: item.generate_evaluation_code(code) code.putln("if (unlikely(PyDict_GetItem(%s, %s))) {" % ( self.result(), item.key.py_result())) # FIXME: find out function name at runtime! code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % ( item.key.py_result(), code.error_goto(self.pos))) code.putln("}") code.put_error_if_neg(self.pos, "PyDict_SetItem(%s, %s, %s)" % ( self.result(), item.key.py_result(), item.value.py_result())) item.generate_disposal_code(code) item.free_temps(code) def annotate(self, code): self.starstar_arg.annotate(code) for item in self.keyword_args: item.annotate(code) class PyClassMetaclassNode(ExprNode): # Helper class holds Python3 metaclass object # # bases ExprNode Base class tuple (not owned by this node) # mkw ExprNode Class keyword arguments (not owned by this node) subexprs = [] def analyse_types(self, env): self.type = py_object_type self.is_temp = True return self def may_be_none(self): return True def generate_result_code(self, code): if self.mkw: code.globalstate.use_utility_code( UtilityCode.load_cached("Py3MetaclassGet", "ObjectHandling.c")) call = "__Pyx_Py3MetaclassGet(%s, %s)" % ( self.bases.result(), self.mkw.result()) else: code.globalstate.use_utility_code( UtilityCode.load_cached("CalculateMetaclass", "ObjectHandling.c")) call = "__Pyx_CalculateMetaclass(NULL, %s)" % ( self.bases.result()) code.putln( "%s = %s; %s" % ( self.result(), call, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) class PyClassNamespaceNode(ExprNode, ModuleNameMixin): # Helper class holds Python3 namespace object # # All this are not owned by this node # metaclass ExprNode Metaclass object # bases ExprNode Base class tuple # mkw ExprNode Class keyword arguments # doc ExprNode or None Doc string (owned) subexprs = ['doc'] def analyse_types(self, env): if self.doc: self.doc = self.doc.analyse_types(env) self.doc = self.doc.coerce_to_pyobject(env) self.type = py_object_type self.is_temp = 1 return self def may_be_none(self): return True def generate_result_code(self, code): cname = code.intern_identifier(self.name) py_mod_name = self.get_py_mod_name(code) qualname = self.get_py_qualified_name(code) if self.doc: doc_code = self.doc.result() else: doc_code = '(PyObject *) NULL' if self.mkw: mkw = self.mkw.py_result() else: mkw = '(PyObject *) NULL' if self.metaclass: metaclass = self.metaclass.result() else: metaclass = "(PyObject *) NULL" code.putln( "%s = __Pyx_Py3MetaclassPrepare(%s, %s, %s, %s, %s, %s, %s); %s" % ( self.result(), metaclass, self.bases.result(), cname, qualname, mkw, py_mod_name, doc_code, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) class ClassCellInjectorNode(ExprNode): # Initialize CyFunction.func_classobj is_temp = True type = py_object_type subexprs = [] is_active = False def analyse_expressions(self, env): if self.is_active: env.use_utility_code( UtilityCode.load_cached("CyFunctionClassCell", "CythonFunction.c")) return self def generate_evaluation_code(self, code): if self.is_active: self.allocate_temp_result(code) code.putln( '%s = PyList_New(0); %s' % ( self.result(), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.result()) def generate_injection_code(self, code, classobj_cname): if self.is_active: code.putln('__Pyx_CyFunction_InitClassCell(%s, %s);' % ( self.result(), classobj_cname)) class ClassCellNode(ExprNode): # Class Cell for noargs super() subexprs = [] is_temp = True is_generator = False type = py_object_type def analyse_types(self, env): return self def generate_result_code(self, code): if not self.is_generator: code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % ( self.result(), Naming.self_cname)) else: code.putln('%s = %s->classobj;' % ( self.result(), Naming.generator_cname)) code.putln( 'if (!%s) { PyErr_SetString(PyExc_SystemError, ' '"super(): empty __class__ cell"); %s }' % ( self.result(), code.error_goto(self.pos))) code.put_incref(self.result(), py_object_type) class BoundMethodNode(ExprNode): # Helper class used in the implementation of Python # class definitions. Constructs an bound method # object from a class and a function. # # function ExprNode Function object # self_object ExprNode self object subexprs = ['function'] def analyse_types(self, env): self.function = self.function.analyse_types(env) self.type = py_object_type self.is_temp = 1 return self gil_message = "Constructing a bound method" def generate_result_code(self, code): code.putln( "%s = PyMethod_New(%s, %s, (PyObject*)%s->ob_type); %s" % ( self.result(), self.function.py_result(), self.self_object.py_result(), self.self_object.py_result(), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) class UnboundMethodNode(ExprNode): # Helper class used in the implementation of Python # class definitions. Constructs an unbound method # object from a class and a function. # # function ExprNode Function object type = py_object_type is_temp = 1 subexprs = ['function'] def analyse_types(self, env): self.function = self.function.analyse_types(env) return self def may_be_none(self): return False gil_message = "Constructing an unbound method" def generate_result_code(self, code): class_cname = code.pyclass_stack[-1].classobj.result() code.putln( "%s = PyMethod_New(%s, 0, %s); %s" % ( self.result(), self.function.py_result(), class_cname, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) class PyCFunctionNode(ExprNode, ModuleNameMixin): # Helper class used in the implementation of Python # functions. Constructs a PyCFunction object # from a PyMethodDef struct. # # pymethdef_cname string PyMethodDef structure # self_object ExprNode or None # binding bool # def_node DefNode the Python function node # module_name EncodedString Name of defining module # code_object CodeObjectNode the PyCodeObject creator node subexprs = ['code_object', 'defaults_tuple', 'defaults_kwdict', 'annotations_dict'] self_object = None code_object = None binding = False def_node = None defaults = None defaults_struct = None defaults_pyobjects = 0 defaults_tuple = None defaults_kwdict = None annotations_dict = None type = py_object_type is_temp = 1 specialized_cpdefs = None is_specialization = False @classmethod def from_defnode(cls, node, binding): return cls(node.pos, def_node=node, pymethdef_cname=node.entry.pymethdef_cname, binding=binding or node.specialized_cpdefs, specialized_cpdefs=node.specialized_cpdefs, code_object=CodeObjectNode(node)) def analyse_types(self, env): if self.binding: self.analyse_default_args(env) return self def analyse_default_args(self, env): """ Handle non-literal function's default arguments. """ nonliteral_objects = [] nonliteral_other = [] default_args = [] default_kwargs = [] annotations = [] for arg in self.def_node.args: if arg.default: if not arg.default.is_literal: arg.is_dynamic = True if arg.type.is_pyobject: nonliteral_objects.append(arg) else: nonliteral_other.append(arg) else: arg.default = DefaultLiteralArgNode(arg.pos, arg.default) if arg.kw_only: default_kwargs.append(arg) else: default_args.append(arg) if arg.annotation: arg.annotation = arg.annotation.analyse_types(env) if not arg.annotation.type.is_pyobject: arg.annotation = arg.annotation.coerce_to_pyobject(env) annotations.append((arg.pos, arg.name, arg.annotation)) if self.def_node.return_type_annotation: annotations.append((self.def_node.return_type_annotation.pos, StringEncoding.EncodedString("return"), self.def_node.return_type_annotation)) if nonliteral_objects or nonliteral_other: module_scope = env.global_scope() cname = module_scope.next_id(Naming.defaults_struct_prefix) scope = Symtab.StructOrUnionScope(cname) self.defaults = [] for arg in nonliteral_objects: entry = scope.declare_var(arg.name, arg.type, None, Naming.arg_prefix + arg.name, allow_pyobject=True) self.defaults.append((arg, entry)) for arg in nonliteral_other: entry = scope.declare_var(arg.name, arg.type, None, Naming.arg_prefix + arg.name, allow_pyobject=False) self.defaults.append((arg, entry)) entry = module_scope.declare_struct_or_union( None, 'struct', scope, 1, None, cname=cname) self.defaults_struct = scope self.defaults_pyobjects = len(nonliteral_objects) for arg, entry in self.defaults: arg.default_value = '%s->%s' % ( Naming.dynamic_args_cname, entry.cname) self.def_node.defaults_struct = self.defaults_struct.name if default_args or default_kwargs: if self.defaults_struct is None: if default_args: defaults_tuple = TupleNode(self.pos, args=[ arg.default for arg in default_args]) self.defaults_tuple = defaults_tuple.analyse_types(env) if default_kwargs: defaults_kwdict = DictNode(self.pos, key_value_pairs=[ DictItemNode( arg.pos, key=IdentifierStringNode(arg.pos, value=arg.name), value=arg.default) for arg in default_kwargs]) self.defaults_kwdict = defaults_kwdict.analyse_types(env) else: if default_args: defaults_tuple = DefaultsTupleNode( self.pos, default_args, self.defaults_struct) else: defaults_tuple = NoneNode(self.pos) if default_kwargs: defaults_kwdict = DefaultsKwDictNode( self.pos, default_kwargs, self.defaults_struct) else: defaults_kwdict = NoneNode(self.pos) defaults_getter = Nodes.DefNode( self.pos, args=[], star_arg=None, starstar_arg=None, body=Nodes.ReturnStatNode( self.pos, return_type=py_object_type, value=TupleNode( self.pos, args=[defaults_tuple, defaults_kwdict])), decorators=None, name=StringEncoding.EncodedString("__defaults__")) defaults_getter.analyse_declarations(env) defaults_getter = defaults_getter.analyse_expressions(env) defaults_getter.body = defaults_getter.body.analyse_expressions( defaults_getter.local_scope) defaults_getter.py_wrapper_required = False defaults_getter.pymethdef_required = False self.def_node.defaults_getter = defaults_getter if annotations: annotations_dict = DictNode(self.pos, key_value_pairs=[ DictItemNode( pos, key=IdentifierStringNode(pos, value=name), value=value) for pos, name, value in annotations]) self.annotations_dict = annotations_dict.analyse_types(env) def may_be_none(self): return False gil_message = "Constructing Python function" def self_result_code(self): if self.self_object is None: self_result = "NULL" else: self_result = self.self_object.py_result() return self_result def generate_result_code(self, code): if self.binding: self.generate_cyfunction_code(code) else: self.generate_pycfunction_code(code) def generate_pycfunction_code(self, code): py_mod_name = self.get_py_mod_name(code) code.putln( '%s = PyCFunction_NewEx(&%s, %s, %s); %s' % ( self.result(), self.pymethdef_cname, self.self_result_code(), py_mod_name, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) def generate_cyfunction_code(self, code): if self.specialized_cpdefs: def_node = self.specialized_cpdefs[0] else: def_node = self.def_node if self.specialized_cpdefs or self.is_specialization: code.globalstate.use_utility_code( UtilityCode.load_cached("FusedFunction", "CythonFunction.c")) constructor = "__pyx_FusedFunction_NewEx" else: code.globalstate.use_utility_code( UtilityCode.load_cached("CythonFunction", "CythonFunction.c")) constructor = "__Pyx_CyFunction_NewEx" if self.code_object: code_object_result = self.code_object.py_result() else: code_object_result = 'NULL' flags = [] if def_node.is_staticmethod: flags.append('__Pyx_CYFUNCTION_STATICMETHOD') elif def_node.is_classmethod: flags.append('__Pyx_CYFUNCTION_CLASSMETHOD') if def_node.local_scope.parent_scope.is_c_class_scope: flags.append('__Pyx_CYFUNCTION_CCLASS') if flags: flags = ' | '.join(flags) else: flags = '0' code.putln( '%s = %s(&%s, %s, %s, %s, %s, %s, %s); %s' % ( self.result(), constructor, self.pymethdef_cname, flags, self.get_py_qualified_name(code), self.self_result_code(), self.get_py_mod_name(code), "PyModule_GetDict(%s)" % Naming.module_cname, code_object_result, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) if def_node.requires_classobj: assert code.pyclass_stack, "pyclass_stack is empty" class_node = code.pyclass_stack[-1] code.put_incref(self.py_result(), py_object_type) code.putln( 'PyList_Append(%s, %s);' % ( class_node.class_cell.result(), self.result())) code.put_giveref(self.py_result()) if self.defaults: code.putln( 'if (!__Pyx_CyFunction_InitDefaults(%s, sizeof(%s), %d)) %s' % ( self.result(), self.defaults_struct.name, self.defaults_pyobjects, code.error_goto(self.pos))) defaults = '__Pyx_CyFunction_Defaults(%s, %s)' % ( self.defaults_struct.name, self.result()) for arg, entry in self.defaults: arg.generate_assignment_code(code, target='%s->%s' % ( defaults, entry.cname)) if self.defaults_tuple: code.putln('__Pyx_CyFunction_SetDefaultsTuple(%s, %s);' % ( self.result(), self.defaults_tuple.py_result())) if self.defaults_kwdict: code.putln('__Pyx_CyFunction_SetDefaultsKwDict(%s, %s);' % ( self.result(), self.defaults_kwdict.py_result())) if def_node.defaults_getter: code.putln('__Pyx_CyFunction_SetDefaultsGetter(%s, %s);' % ( self.result(), def_node.defaults_getter.entry.pyfunc_cname)) if self.annotations_dict: code.putln('__Pyx_CyFunction_SetAnnotationsDict(%s, %s);' % ( self.result(), self.annotations_dict.py_result())) class InnerFunctionNode(PyCFunctionNode): # Special PyCFunctionNode that depends on a closure class # binding = True needs_self_code = True def self_result_code(self): if self.needs_self_code: return "((PyObject*)%s)" % Naming.cur_scope_cname return "NULL" class CodeObjectNode(ExprNode): # Create a PyCodeObject for a CyFunction instance. # # def_node DefNode the Python function node # varnames TupleNode a tuple with all local variable names subexprs = ['varnames'] is_temp = False def __init__(self, def_node): ExprNode.__init__(self, def_node.pos, def_node=def_node) args = list(def_node.args) # if we have args/kwargs, then the first two in var_entries are those local_vars = [arg for arg in def_node.local_scope.var_entries if arg.name] self.varnames = TupleNode( def_node.pos, args=[IdentifierStringNode(arg.pos, value=arg.name) for arg in args + local_vars], is_temp=0, is_literal=1) def may_be_none(self): return False def calculate_result_code(self): return self.result_code def generate_result_code(self, code): self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2) code = code.get_cached_constants_writer() code.mark_pos(self.pos) func = self.def_node func_name = code.get_py_string_const( func.name, identifier=True, is_str=False, unicode_value=func.name) # FIXME: better way to get the module file path at module init time? Encoding to use? file_path = StringEncoding.BytesLiteral(func.pos[0].get_filenametable_entry().encode('utf8')) file_path_const = code.get_py_string_const(file_path, identifier=False, is_str=True) flags = [] if self.def_node.star_arg: flags.append('CO_VARARGS') if self.def_node.starstar_arg: flags.append('CO_VARKEYWORDS') code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % ( self.result_code, len(func.args) - func.num_kwonly_args, # argcount func.num_kwonly_args, # kwonlyargcount (Py3 only) len(self.varnames.args), # nlocals '|'.join(flags) or '0', # flags Naming.empty_bytes, # code Naming.empty_tuple, # consts Naming.empty_tuple, # names (FIXME) self.varnames.result(), # varnames Naming.empty_tuple, # freevars (FIXME) Naming.empty_tuple, # cellvars (FIXME) file_path_const, # filename func_name, # name self.pos[1], # firstlineno Naming.empty_bytes, # lnotab code.error_goto_if_null(self.result_code, self.pos), )) class DefaultLiteralArgNode(ExprNode): # CyFunction's literal argument default value # # Evaluate literal only once. subexprs = [] is_literal = True is_temp = False def __init__(self, pos, arg): super(DefaultLiteralArgNode, self).__init__(pos) self.arg = arg self.type = self.arg.type self.evaluated = False def analyse_types(self, env): return self def generate_result_code(self, code): pass def generate_evaluation_code(self, code): if not self.evaluated: self.arg.generate_evaluation_code(code) self.evaluated = True def result(self): return self.type.cast_code(self.arg.result()) class DefaultNonLiteralArgNode(ExprNode): # CyFunction's non-literal argument default value subexprs = [] def __init__(self, pos, arg, defaults_struct): super(DefaultNonLiteralArgNode, self).__init__(pos) self.arg = arg self.defaults_struct = defaults_struct def analyse_types(self, env): self.type = self.arg.type self.is_temp = False return self def generate_result_code(self, code): pass def result(self): return '__Pyx_CyFunction_Defaults(%s, %s)->%s' % ( self.defaults_struct.name, Naming.self_cname, self.defaults_struct.lookup(self.arg.name).cname) class DefaultsTupleNode(TupleNode): # CyFunction's __defaults__ tuple def __init__(self, pos, defaults, defaults_struct): args = [] for arg in defaults: if not arg.default.is_literal: arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct) else: arg = arg.default args.append(arg) super(DefaultsTupleNode, self).__init__(pos, args=args) class DefaultsKwDictNode(DictNode): # CyFunction's __kwdefaults__ dict def __init__(self, pos, defaults, defaults_struct): items = [] for arg in defaults: name = IdentifierStringNode(arg.pos, value=arg.name) if not arg.default.is_literal: arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct) else: arg = arg.default items.append(DictItemNode(arg.pos, key=name, value=arg)) super(DefaultsKwDictNode, self).__init__(pos, key_value_pairs=items) class LambdaNode(InnerFunctionNode): # Lambda expression node (only used as a function reference) # # args [CArgDeclNode] formal arguments # star_arg PyArgDeclNode or None * argument # starstar_arg PyArgDeclNode or None ** argument # lambda_name string a module-globally unique lambda name # result_expr ExprNode # def_node DefNode the underlying function 'def' node child_attrs = ['def_node'] name = StringEncoding.EncodedString('<lambda>') def analyse_declarations(self, env): self.def_node.no_assignment_synthesis = True self.def_node.pymethdef_required = True self.def_node.analyse_declarations(env) self.def_node.is_cyfunction = True self.pymethdef_cname = self.def_node.entry.pymethdef_cname env.add_lambda_def(self.def_node) def analyse_types(self, env): self.def_node = self.def_node.analyse_expressions(env) return super(LambdaNode, self).analyse_types(env) def generate_result_code(self, code): self.def_node.generate_execution_code(code) super(LambdaNode, self).generate_result_code(code) class GeneratorExpressionNode(LambdaNode): # A generator expression, e.g. (i for i in range(10)) # # Result is a generator. # # loop ForStatNode the for-loop, containing a YieldExprNode # def_node DefNode the underlying generator 'def' node name = StringEncoding.EncodedString('genexpr') binding = False def analyse_declarations(self, env): super(GeneratorExpressionNode, self).analyse_declarations(env) # No pymethdef required self.def_node.pymethdef_required = False self.def_node.py_wrapper_required = False self.def_node.is_cyfunction = False # Force genexpr signature self.def_node.entry.signature = TypeSlots.pyfunction_noargs def generate_result_code(self, code): code.putln( '%s = %s(%s); %s' % ( self.result(), self.def_node.entry.pyfunc_cname, self.self_result_code(), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) class YieldExprNode(ExprNode): # Yield expression node # # arg ExprNode the value to return from the generator # label_num integer yield label number # is_yield_from boolean is a YieldFromExprNode to delegate to another generator subexprs = ['arg'] type = py_object_type label_num = 0 is_yield_from = False def analyse_types(self, env): if not self.label_num: error(self.pos, "'yield' not supported here") self.is_temp = 1 if self.arg is not None: self.arg = self.arg.analyse_types(env) if not self.arg.type.is_pyobject: self.coerce_yield_argument(env) return self def coerce_yield_argument(self, env): self.arg = self.arg.coerce_to_pyobject(env) def generate_evaluation_code(self, code): if self.arg: self.arg.generate_evaluation_code(code) self.arg.make_owned_reference(code) code.putln( "%s = %s;" % ( Naming.retval_cname, self.arg.result_as(py_object_type))) self.arg.generate_post_assignment_code(code) self.arg.free_temps(code) else: code.put_init_to_py_none(Naming.retval_cname, py_object_type) self.generate_yield_code(code) def generate_yield_code(self, code): """ Generate the code to return the argument in 'Naming.retval_cname' and to continue at the yield label. """ label_num, label_name = code.new_yield_label() code.use_label(label_name) saved = [] code.funcstate.closure_temps.reset() for cname, type, manage_ref in code.funcstate.temps_in_use(): save_cname = code.funcstate.closure_temps.allocate_temp(type) saved.append((cname, save_cname, type)) if type.is_pyobject: code.put_xgiveref(cname) code.putln('%s->%s = %s;' % (Naming.cur_scope_cname, save_cname, cname)) code.put_xgiveref(Naming.retval_cname) code.put_finish_refcount_context() code.putln("/* return from generator, yielding value */") code.putln("%s->resume_label = %d;" % ( Naming.generator_cname, label_num)) code.putln("return %s;" % Naming.retval_cname) code.put_label(label_name) for cname, save_cname, type in saved: code.putln('%s = %s->%s;' % (cname, Naming.cur_scope_cname, save_cname)) if type.is_pyobject: code.putln('%s->%s = 0;' % (Naming.cur_scope_cname, save_cname)) code.put_xgotref(cname) code.putln(code.error_goto_if_null(Naming.sent_value_cname, self.pos)) if self.result_is_used: self.allocate_temp_result(code) code.put('%s = %s; ' % (self.result(), Naming.sent_value_cname)) code.put_incref(self.result(), py_object_type) class YieldFromExprNode(YieldExprNode): # "yield from GEN" expression is_yield_from = True def coerce_yield_argument(self, env): if not self.arg.type.is_string: # FIXME: support C arrays and C++ iterators? error(self.pos, "yielding from non-Python object not supported") self.arg = self.arg.coerce_to_pyobject(env) def generate_evaluation_code(self, code): code.globalstate.use_utility_code(UtilityCode.load_cached("YieldFrom", "Generator.c")) self.arg.generate_evaluation_code(code) code.putln("%s = __Pyx_Generator_Yield_From(%s, %s);" % ( Naming.retval_cname, Naming.generator_cname, self.arg.result_as(py_object_type))) self.arg.generate_disposal_code(code) self.arg.free_temps(code) code.put_xgotref(Naming.retval_cname) code.putln("if (likely(%s)) {" % Naming.retval_cname) self.generate_yield_code(code) code.putln("} else {") # either error or sub-generator has normally terminated: return value => node result if self.result_is_used: # YieldExprNode has allocated the result temp for us code.putln("%s = NULL;" % self.result()) code.putln("if (unlikely(__Pyx_PyGen_FetchStopIterationValue(&%s) < 0)) %s" % ( self.result(), code.error_goto(self.pos))) code.put_gotref(self.result()) else: code.putln("PyObject* exc_type = PyErr_Occurred();") code.putln("if (exc_type) {") code.putln("if (likely(exc_type == PyExc_StopIteration ||" " PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();") code.putln("else %s" % code.error_goto(self.pos)) code.putln("}") code.putln("}") class GlobalsExprNode(AtomicExprNode): type = dict_type is_temp = 1 def analyse_types(self, env): env.use_utility_code(Builtin.globals_utility_code) return self gil_message = "Constructing globals dict" def may_be_none(self): return False def generate_result_code(self, code): code.putln('%s = __Pyx_Globals(); %s' % ( self.result(), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.result()) class LocalsDictItemNode(DictItemNode): def analyse_types(self, env): self.key = self.key.analyse_types(env) self.value = self.value.analyse_types(env) self.key = self.key.coerce_to_pyobject(env) if self.value.type.can_coerce_to_pyobject(env): self.value = self.value.coerce_to_pyobject(env) else: self.value = None return self class FuncLocalsExprNode(DictNode): def __init__(self, pos, env): local_vars = sorted([ entry.name for entry in env.entries.values() if entry.name]) items = [LocalsDictItemNode( pos, key=IdentifierStringNode(pos, value=var), value=NameNode(pos, name=var, allow_null=True)) for var in local_vars] DictNode.__init__(self, pos, key_value_pairs=items, exclude_null_values=True) def analyse_types(self, env): node = super(FuncLocalsExprNode, self).analyse_types(env) node.key_value_pairs = [ i for i in node.key_value_pairs if i.value is not None ] return node class PyClassLocalsExprNode(AtomicExprNode): def __init__(self, pos, pyclass_dict): AtomicExprNode.__init__(self, pos) self.pyclass_dict = pyclass_dict def analyse_types(self, env): self.type = self.pyclass_dict.type self.is_temp = False return self def may_be_none(self): return False def result(self): return self.pyclass_dict.result() def generate_result_code(self, code): pass def LocalsExprNode(pos, scope_node, env): if env.is_module_scope: return GlobalsExprNode(pos) if env.is_py_class_scope: return PyClassLocalsExprNode(pos, scope_node.dict) return FuncLocalsExprNode(pos, env) #------------------------------------------------------------------- # # Unary operator nodes # #------------------------------------------------------------------- compile_time_unary_operators = { 'not': operator.not_, '~': operator.inv, '-': operator.neg, '+': operator.pos, } class UnopNode(ExprNode): # operator string # operand ExprNode # # Processing during analyse_expressions phase: # # analyse_c_operation # Called when the operand is not a pyobject. # - Check operand type and coerce if needed. # - Determine result type and result code fragment. # - Allocate temporary for result if needed. subexprs = ['operand'] infix = True def calculate_constant_result(self): func = compile_time_unary_operators[self.operator] self.constant_result = func(self.operand.constant_result) def compile_time_value(self, denv): func = compile_time_unary_operators.get(self.operator) if not func: error(self.pos, "Unary '%s' not supported in compile-time expression" % self.operator) operand = self.operand.compile_time_value(denv) try: return func(operand) except Exception, e: self.compile_time_value_error(e) def infer_type(self, env): operand_type = self.operand.infer_type(env) if operand_type.is_cpp_class or operand_type.is_ptr: cpp_type = operand_type.find_cpp_operation_type(self.operator) if cpp_type is not None: return cpp_type return self.infer_unop_type(env, operand_type) def infer_unop_type(self, env, operand_type): if operand_type.is_pyobject: return py_object_type else: return operand_type def may_be_none(self): if self.operand.type and self.operand.type.is_builtin_type: if self.operand.type is not type_type: return False return ExprNode.may_be_none(self) def analyse_types(self, env): self.operand = self.operand.analyse_types(env) if self.is_py_operation(): self.coerce_operand_to_pyobject(env) self.type = py_object_type self.is_temp = 1 elif self.is_cpp_operation(): self.analyse_cpp_operation(env) else: self.analyse_c_operation(env) return self def check_const(self): return self.operand.check_const() def is_py_operation(self): return self.operand.type.is_pyobject def nogil_check(self, env): if self.is_py_operation(): self.gil_error() def is_cpp_operation(self): type = self.operand.type return type.is_cpp_class def coerce_operand_to_pyobject(self, env): self.operand = self.operand.coerce_to_pyobject(env) def generate_result_code(self, code): if self.operand.type.is_pyobject: self.generate_py_operation_code(code) def generate_py_operation_code(self, code): function = self.py_operation_function() code.putln( "%s = %s(%s); %s" % ( self.result(), function, self.operand.py_result(), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) def type_error(self): if not self.operand.type.is_error: error(self.pos, "Invalid operand type for '%s' (%s)" % (self.operator, self.operand.type)) self.type = PyrexTypes.error_type def analyse_cpp_operation(self, env): cpp_type = self.operand.type.find_cpp_operation_type(self.operator) if cpp_type is None: error(self.pos, "'%s' operator not defined for %s" % ( self.operator, type)) self.type_error() return self.type = cpp_type class NotNode(UnopNode): # 'not' operator # # operand ExprNode operator = '!' type = PyrexTypes.c_bint_type def calculate_constant_result(self): self.constant_result = not self.operand.constant_result def compile_time_value(self, denv): operand = self.operand.compile_time_value(denv) try: return not operand except Exception, e: self.compile_time_value_error(e) def infer_unop_type(self, env, operand_type): return PyrexTypes.c_bint_type def analyse_types(self, env): self.operand = self.operand.analyse_types(env) operand_type = self.operand.type if operand_type.is_cpp_class: cpp_type = operand_type.find_cpp_operation_type(self.operator) if not cpp_type: error(self.pos, "'!' operator not defined for %s" % operand_type) self.type = PyrexTypes.error_type return self.type = cpp_type else: self.operand = self.operand.coerce_to_boolean(env) return self def calculate_result_code(self): return "(!%s)" % self.operand.result() def generate_result_code(self, code): pass class UnaryPlusNode(UnopNode): # unary '+' operator operator = '+' def analyse_c_operation(self, env): self.type = PyrexTypes.widest_numeric_type( self.operand.type, PyrexTypes.c_int_type) def py_operation_function(self): return "PyNumber_Positive" def calculate_result_code(self): if self.is_cpp_operation(): return "(+%s)" % self.operand.result() else: return self.operand.result() class UnaryMinusNode(UnopNode): # unary '-' operator operator = '-' def analyse_c_operation(self, env): if self.operand.type.is_numeric: self.type = PyrexTypes.widest_numeric_type( self.operand.type, PyrexTypes.c_int_type) elif self.operand.type.is_enum: self.type = PyrexTypes.c_int_type else: self.type_error() if self.type.is_complex: self.infix = False def py_operation_function(self): return "PyNumber_Negative" def calculate_result_code(self): if self.infix: return "(-%s)" % self.operand.result() else: return "%s(%s)" % (self.operand.type.unary_op('-'), self.operand.result()) def get_constant_c_result_code(self): value = self.operand.get_constant_c_result_code() if value: return "(-%s)" % value class TildeNode(UnopNode): # unary '~' operator def analyse_c_operation(self, env): if self.operand.type.is_int: self.type = PyrexTypes.widest_numeric_type( self.operand.type, PyrexTypes.c_int_type) elif self.operand.type.is_enum: self.type = PyrexTypes.c_int_type else: self.type_error() def py_operation_function(self): return "PyNumber_Invert" def calculate_result_code(self): return "(~%s)" % self.operand.result() class CUnopNode(UnopNode): def is_py_operation(self): return False class DereferenceNode(CUnopNode): # unary * operator operator = '*' def infer_unop_type(self, env, operand_type): if operand_type.is_ptr: return operand_type.base_type else: return PyrexTypes.error_type def analyse_c_operation(self, env): if self.operand.type.is_ptr: self.type = self.operand.type.base_type else: self.type_error() def calculate_result_code(self): return "(*%s)" % self.operand.result() class DecrementIncrementNode(CUnopNode): # unary ++/-- operator def analyse_c_operation(self, env): if self.operand.type.is_numeric: self.type = PyrexTypes.widest_numeric_type( self.operand.type, PyrexTypes.c_int_type) elif self.operand.type.is_ptr: self.type = self.operand.type else: self.type_error() def calculate_result_code(self): if self.is_prefix: return "(%s%s)" % (self.operator, self.operand.result()) else: return "(%s%s)" % (self.operand.result(), self.operator) def inc_dec_constructor(is_prefix, operator): return lambda pos, **kwds: DecrementIncrementNode(pos, is_prefix=is_prefix, operator=operator, **kwds) class AmpersandNode(CUnopNode): # The C address-of operator. # # operand ExprNode operator = '&' def infer_unop_type(self, env, operand_type): return PyrexTypes.c_ptr_type(operand_type) def analyse_types(self, env): self.operand = self.operand.analyse_types(env) argtype = self.operand.type if argtype.is_cpp_class: cpp_type = argtype.find_cpp_operation_type(self.operator) if cpp_type is not None: self.type = cpp_type return self if not (argtype.is_cfunction or argtype.is_reference or self.operand.is_addressable()): if argtype.is_memoryviewslice: self.error("Cannot take address of memoryview slice") else: self.error("Taking address of non-lvalue") return self if argtype.is_pyobject: self.error("Cannot take address of Python variable") return self self.type = PyrexTypes.c_ptr_type(argtype) return self def check_const(self): return self.operand.check_const_addr() def error(self, mess): error(self.pos, mess) self.type = PyrexTypes.error_type self.result_code = "<error>" def calculate_result_code(self): return "(&%s)" % self.operand.result() def generate_result_code(self, code): pass unop_node_classes = { "+": UnaryPlusNode, "-": UnaryMinusNode, "~": TildeNode, } def unop_node(pos, operator, operand): # Construct unnop node of appropriate class for # given operator. if isinstance(operand, IntNode) and operator == '-': return IntNode(pos = operand.pos, value = str(-Utils.str_to_number(operand.value)), longness=operand.longness, unsigned=operand.unsigned) elif isinstance(operand, UnopNode) and operand.operator == operator in '+-': warning(pos, "Python has no increment/decrement operator: %s%sx == %s(%sx) == x" % ((operator,)*4), 5) return unop_node_classes[operator](pos, operator = operator, operand = operand) class TypecastNode(ExprNode): # C type cast # # operand ExprNode # base_type CBaseTypeNode # declarator CDeclaratorNode # typecheck boolean # # If used from a transform, one can if wanted specify the attribute # "type" directly and leave base_type and declarator to None subexprs = ['operand'] base_type = declarator = type = None def type_dependencies(self, env): return () def infer_type(self, env): if self.type is None: base_type = self.base_type.analyse(env) _, self.type = self.declarator.analyse(base_type, env) return self.type def analyse_types(self, env): if self.type is None: base_type = self.base_type.analyse(env) _, self.type = self.declarator.analyse(base_type, env) if self.operand.has_constant_result(): # Must be done after self.type is resolved. self.calculate_constant_result() if self.type.is_cfunction: error(self.pos, "Cannot cast to a function type") self.type = PyrexTypes.error_type self.operand = self.operand.analyse_types(env) if self.type is PyrexTypes.c_bint_type: # short circuit this to a coercion return self.operand.coerce_to_boolean(env) to_py = self.type.is_pyobject from_py = self.operand.type.is_pyobject if from_py and not to_py and self.operand.is_ephemeral(): if not self.type.is_numeric and not self.type.is_cpp_class: error(self.pos, "Casting temporary Python object to non-numeric non-Python type") if to_py and not from_py: if self.type is bytes_type and self.operand.type.is_int: return CoerceIntToBytesNode(self.operand, env) elif self.operand.type.can_coerce_to_pyobject(env): self.result_ctype = py_object_type base_type = self.base_type.analyse(env) self.operand = self.operand.coerce_to(base_type, env) else: if self.operand.type.is_ptr: if not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct): error(self.pos, "Python objects cannot be cast from pointers of primitive types") else: # Should this be an error? warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.operand.type, self.type)) self.operand = self.operand.coerce_to_simple(env) elif from_py and not to_py: if self.type.create_from_py_utility_code(env): self.operand = self.operand.coerce_to(self.type, env) elif self.type.is_ptr: if not (self.type.base_type.is_void or self.type.base_type.is_struct): error(self.pos, "Python objects cannot be cast to pointers of primitive types") else: warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.type, self.operand.type)) elif from_py and to_py: if self.typecheck: self.operand = PyTypeTestNode(self.operand, self.type, env, notnone=True) elif isinstance(self.operand, SliceIndexNode): # This cast can influence the created type of string slices. self.operand = self.operand.coerce_to(self.type, env) elif self.type.is_complex and self.operand.type.is_complex: self.operand = self.operand.coerce_to_simple(env) elif self.operand.type.is_fused: self.operand = self.operand.coerce_to(self.type, env) #self.type = self.operand.type return self def is_simple(self): # either temp or a C cast => no side effects other than the operand's return self.operand.is_simple() def nonlocally_immutable(self): return self.is_temp or self.operand.nonlocally_immutable() def nogil_check(self, env): if self.type and self.type.is_pyobject and self.is_temp: self.gil_error() def check_const(self): return self.operand.check_const() def calculate_constant_result(self): self.constant_result = self.calculate_result_code(self.operand.constant_result) def calculate_result_code(self, operand_result = None): if operand_result is None: operand_result = self.operand.result() if self.type.is_complex: operand_result = self.operand.result() if self.operand.type.is_complex: real_part = self.type.real_type.cast_code("__Pyx_CREAL(%s)" % operand_result) imag_part = self.type.real_type.cast_code("__Pyx_CIMAG(%s)" % operand_result) else: real_part = self.type.real_type.cast_code(operand_result) imag_part = "0" return "%s(%s, %s)" % ( self.type.from_parts, real_part, imag_part) else: return self.type.cast_code(operand_result) def get_constant_c_result_code(self): operand_result = self.operand.get_constant_c_result_code() if operand_result: return self.type.cast_code(operand_result) def result_as(self, type): if self.type.is_pyobject and not self.is_temp: # Optimise away some unnecessary casting return self.operand.result_as(type) else: return ExprNode.result_as(self, type) def generate_result_code(self, code): if self.is_temp: code.putln( "%s = (PyObject *)%s;" % ( self.result(), self.operand.result())) code.put_incref(self.result(), self.ctype()) ERR_START = "Start may not be given" ERR_NOT_STOP = "Stop must be provided to indicate shape" ERR_STEPS = ("Strides may only be given to indicate contiguity. " "Consider slicing it after conversion") ERR_NOT_POINTER = "Can only create cython.array from pointer or array" ERR_BASE_TYPE = "Pointer base type does not match cython.array base type" class CythonArrayNode(ExprNode): """ Used when a pointer of base_type is cast to a memoryviewslice with that base type. i.e. <int[:M:1, :N]> p creates a fortran-contiguous cython.array. We leave the type set to object so coercions to object are more efficient and less work. Acquiring a memoryviewslice from this will be just as efficient. ExprNode.coerce_to() will do the additional typecheck on self.compile_time_type This also handles <int[:, :]> my_c_array operand ExprNode the thing we're casting base_type_node MemoryViewSliceTypeNode the cast expression node """ subexprs = ['operand', 'shapes'] shapes = None is_temp = True mode = "c" array_dtype = None shape_type = PyrexTypes.c_py_ssize_t_type def analyse_types(self, env): import MemoryView self.operand = self.operand.analyse_types(env) if self.array_dtype: array_dtype = self.array_dtype else: array_dtype = self.base_type_node.base_type_node.analyse(env) axes = self.base_type_node.axes MemoryView.validate_memslice_dtype(self.pos, array_dtype) self.type = error_type self.shapes = [] ndim = len(axes) # Base type of the pointer or C array we are converting base_type = self.operand.type if not self.operand.type.is_ptr and not self.operand.type.is_array: error(self.operand.pos, ERR_NOT_POINTER) return self # Dimension sizes of C array array_dimension_sizes = [] if base_type.is_array: while base_type.is_array: array_dimension_sizes.append(base_type.size) base_type = base_type.base_type elif base_type.is_ptr: base_type = base_type.base_type else: error(self.pos, "unexpected base type %s found" % base_type) return self if not (base_type.same_as(array_dtype) or base_type.is_void): error(self.operand.pos, ERR_BASE_TYPE) return self elif self.operand.type.is_array and len(array_dimension_sizes) != ndim: error(self.operand.pos, "Expected %d dimensions, array has %d dimensions" % (ndim, len(array_dimension_sizes))) return self # Verify the start, stop and step values # In case of a C array, use the size of C array in each dimension to # get an automatic cast for axis_no, axis in enumerate(axes): if not axis.start.is_none: error(axis.start.pos, ERR_START) return self if axis.stop.is_none: if array_dimension_sizes: dimsize = array_dimension_sizes[axis_no] axis.stop = IntNode(self.pos, value=str(dimsize), constant_result=dimsize, type=PyrexTypes.c_int_type) else: error(axis.pos, ERR_NOT_STOP) return self axis.stop = axis.stop.analyse_types(env) shape = axis.stop.coerce_to(self.shape_type, env) if not shape.is_literal: shape.coerce_to_temp(env) self.shapes.append(shape) first_or_last = axis_no in (0, ndim - 1) if not axis.step.is_none and first_or_last: # '1' in the first or last dimension denotes F or C contiguity axis.step = axis.step.analyse_types(env) if (not axis.step.type.is_int and axis.step.is_literal and not axis.step.type.is_error): error(axis.step.pos, "Expected an integer literal") return self if axis.step.compile_time_value(env) != 1: error(axis.step.pos, ERR_STEPS) return self if axis_no == 0: self.mode = "fortran" elif not axis.step.is_none and not first_or_last: # step provided in some other dimension error(axis.step.pos, ERR_STEPS) return self if not self.operand.is_name: self.operand = self.operand.coerce_to_temp(env) axes = [('direct', 'follow')] * len(axes) if self.mode == "fortran": axes[0] = ('direct', 'contig') else: axes[-1] = ('direct', 'contig') self.coercion_type = PyrexTypes.MemoryViewSliceType(array_dtype, axes) self.type = self.get_cython_array_type(env) MemoryView.use_cython_array_utility_code(env) env.use_utility_code(MemoryView.typeinfo_to_format_code) return self def allocate_temp_result(self, code): if self.temp_code: raise RuntimeError("temp allocated mulitple times") self.temp_code = code.funcstate.allocate_temp(self.type, True) def infer_type(self, env): return self.get_cython_array_type(env) def get_cython_array_type(self, env): return env.global_scope().context.cython_scope.viewscope.lookup("array").type def generate_result_code(self, code): import Buffer shapes = [self.shape_type.cast_code(shape.result()) for shape in self.shapes] dtype = self.coercion_type.dtype shapes_temp = code.funcstate.allocate_temp(py_object_type, True) format_temp = code.funcstate.allocate_temp(py_object_type, True) itemsize = "sizeof(%s)" % dtype.declaration_code("") type_info = Buffer.get_type_information_cname(code, dtype) if self.operand.type.is_ptr: code.putln("if (!%s) {" % self.operand.result()) code.putln( 'PyErr_SetString(PyExc_ValueError,' '"Cannot create cython.array from NULL pointer");') code.putln(code.error_goto(self.operand.pos)) code.putln("}") code.putln("%s = __pyx_format_from_typeinfo(&%s);" % (format_temp, type_info)) buildvalue_fmt = " __PYX_BUILD_PY_SSIZE_T " * len(shapes) code.putln('%s = Py_BuildValue((char*) "(" %s ")", %s);' % ( shapes_temp, buildvalue_fmt, ", ".join(shapes))) err = "!%s || !%s || !PyBytes_AsString(%s)" % (format_temp, shapes_temp, format_temp) code.putln(code.error_goto_if(err, self.pos)) code.put_gotref(format_temp) code.put_gotref(shapes_temp) tup = (self.result(), shapes_temp, itemsize, format_temp, self.mode, self.operand.result()) code.putln('%s = __pyx_array_new(' '%s, %s, PyBytes_AS_STRING(%s), ' '(char *) "%s", (char *) %s);' % tup) code.putln(code.error_goto_if_null(self.result(), self.pos)) code.put_gotref(self.result()) def dispose(temp): code.put_decref_clear(temp, py_object_type) code.funcstate.release_temp(temp) dispose(shapes_temp) dispose(format_temp) @classmethod def from_carray(cls, src_node, env): """ Given a C array type, return a CythonArrayNode """ pos = src_node.pos base_type = src_node.type none_node = NoneNode(pos) axes = [] while base_type.is_array: axes.append(SliceNode(pos, start=none_node, stop=none_node, step=none_node)) base_type = base_type.base_type axes[-1].step = IntNode(pos, value="1", is_c_literal=True) memslicenode = Nodes.MemoryViewSliceTypeNode(pos, axes=axes, base_type_node=base_type) result = CythonArrayNode(pos, base_type_node=memslicenode, operand=src_node, array_dtype=base_type) result = result.analyse_types(env) return result class SizeofNode(ExprNode): # Abstract base class for sizeof(x) expression nodes. type = PyrexTypes.c_size_t_type def check_const(self): return True def generate_result_code(self, code): pass class SizeofTypeNode(SizeofNode): # C sizeof function applied to a type # # base_type CBaseTypeNode # declarator CDeclaratorNode subexprs = [] arg_type = None def analyse_types(self, env): # we may have incorrectly interpreted a dotted name as a type rather than an attribute # this could be better handled by more uniformly treating types as runtime-available objects if 0 and self.base_type.module_path: path = self.base_type.module_path obj = env.lookup(path[0]) if obj.as_module is None: operand = NameNode(pos=self.pos, name=path[0]) for attr in path[1:]: operand = AttributeNode(pos=self.pos, obj=operand, attribute=attr) operand = AttributeNode(pos=self.pos, obj=operand, attribute=self.base_type.name) self.operand = operand self.__class__ = SizeofVarNode node = self.analyse_types(env) return node if self.arg_type is None: base_type = self.base_type.analyse(env) _, arg_type = self.declarator.analyse(base_type, env) self.arg_type = arg_type self.check_type() return self def check_type(self): arg_type = self.arg_type if arg_type.is_pyobject and not arg_type.is_extension_type: error(self.pos, "Cannot take sizeof Python object") elif arg_type.is_void: error(self.pos, "Cannot take sizeof void") elif not arg_type.is_complete(): error(self.pos, "Cannot take sizeof incomplete type '%s'" % arg_type) def calculate_result_code(self): if self.arg_type.is_extension_type: # the size of the pointer is boring # we want the size of the actual struct arg_code = self.arg_type.declaration_code("", deref=1) else: arg_code = self.arg_type.declaration_code("") return "(sizeof(%s))" % arg_code class SizeofVarNode(SizeofNode): # C sizeof function applied to a variable # # operand ExprNode subexprs = ['operand'] def analyse_types(self, env): # We may actually be looking at a type rather than a variable... # If we are, traditional analysis would fail... operand_as_type = self.operand.analyse_as_type(env) if operand_as_type: self.arg_type = operand_as_type if self.arg_type.is_fused: self.arg_type = self.arg_type.specialize(env.fused_to_specific) self.__class__ = SizeofTypeNode self.check_type() else: self.operand = self.operand.analyse_types(env) return self def calculate_result_code(self): return "(sizeof(%s))" % self.operand.result() def generate_result_code(self, code): pass class TypeofNode(ExprNode): # Compile-time type of an expression, as a string. # # operand ExprNode # literal StringNode # internal literal = None type = py_object_type subexprs = ['literal'] # 'operand' will be ignored after type analysis! def analyse_types(self, env): self.operand = self.operand.analyse_types(env) value = StringEncoding.EncodedString(str(self.operand.type)) #self.operand.type.typeof_name()) literal = StringNode(self.pos, value=value) literal = literal.analyse_types(env) self.literal = literal.coerce_to_pyobject(env) return self def may_be_none(self): return False def generate_evaluation_code(self, code): self.literal.generate_evaluation_code(code) def calculate_result_code(self): return self.literal.calculate_result_code() #------------------------------------------------------------------- # # Binary operator nodes # #------------------------------------------------------------------- compile_time_binary_operators = { '<': operator.lt, '<=': operator.le, '==': operator.eq, '!=': operator.ne, '>=': operator.ge, '>': operator.gt, 'is': operator.is_, 'is_not': operator.is_not, '+': operator.add, '&': operator.and_, '/': operator.truediv, '//': operator.floordiv, '<<': operator.lshift, '%': operator.mod, '*': operator.mul, '|': operator.or_, '**': operator.pow, '>>': operator.rshift, '-': operator.sub, '^': operator.xor, 'in': lambda x, seq: x in seq, 'not_in': lambda x, seq: x not in seq, } def get_compile_time_binop(node): func = compile_time_binary_operators.get(node.operator) if not func: error(node.pos, "Binary '%s' not supported in compile-time expression" % node.operator) return func class BinopNode(ExprNode): # operator string # operand1 ExprNode # operand2 ExprNode # # Processing during analyse_expressions phase: # # analyse_c_operation # Called when neither operand is a pyobject. # - Check operand types and coerce if needed. # - Determine result type and result code fragment. # - Allocate temporary for result if needed. subexprs = ['operand1', 'operand2'] inplace = False def calculate_constant_result(self): func = compile_time_binary_operators[self.operator] self.constant_result = func( self.operand1.constant_result, self.operand2.constant_result) def compile_time_value(self, denv): func = get_compile_time_binop(self) operand1 = self.operand1.compile_time_value(denv) operand2 = self.operand2.compile_time_value(denv) try: return func(operand1, operand2) except Exception, e: self.compile_time_value_error(e) def infer_type(self, env): return self.result_type(self.operand1.infer_type(env), self.operand2.infer_type(env)) def analyse_types(self, env): self.operand1 = self.operand1.analyse_types(env) self.operand2 = self.operand2.analyse_types(env) self.analyse_operation(env) return self def analyse_operation(self, env): if self.is_py_operation(): self.coerce_operands_to_pyobjects(env) self.type = self.result_type(self.operand1.type, self.operand2.type) assert self.type.is_pyobject self.is_temp = 1 elif self.is_cpp_operation(): self.analyse_cpp_operation(env) else: self.analyse_c_operation(env) def is_py_operation(self): return self.is_py_operation_types(self.operand1.type, self.operand2.type) def is_py_operation_types(self, type1, type2): return type1.is_pyobject or type2.is_pyobject def is_cpp_operation(self): return (self.operand1.type.is_cpp_class or self.operand2.type.is_cpp_class) def analyse_cpp_operation(self, env): entry = env.lookup_operator(self.operator, [self.operand1, self.operand2]) if not entry: self.type_error() return func_type = entry.type if func_type.is_ptr: func_type = func_type.base_type if len(func_type.args) == 1: self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env) else: self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env) self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env) self.type = func_type.return_type def result_type(self, type1, type2): if self.is_py_operation_types(type1, type2): if type2.is_string: type2 = Builtin.bytes_type elif type2.is_pyunicode_ptr: type2 = Builtin.unicode_type if type1.is_string: type1 = Builtin.bytes_type elif type1.is_pyunicode_ptr: type1 = Builtin.unicode_type if type1.is_builtin_type or type2.is_builtin_type: if type1 is type2 and self.operator in '**%+|&^': # FIXME: at least these operators should be safe - others? return type1 result_type = self.infer_builtin_types_operation(type1, type2) if result_type is not None: return result_type return py_object_type else: return self.compute_c_result_type(type1, type2) def infer_builtin_types_operation(self, type1, type2): return None def nogil_check(self, env): if self.is_py_operation(): self.gil_error() def coerce_operands_to_pyobjects(self, env): self.operand1 = self.operand1.coerce_to_pyobject(env) self.operand2 = self.operand2.coerce_to_pyobject(env) def check_const(self): return self.operand1.check_const() and self.operand2.check_const() def generate_result_code(self, code): #print "BinopNode.generate_result_code:", self.operand1, self.operand2 ### if self.operand1.type.is_pyobject: function = self.py_operation_function() if self.operator == '**': extra_args = ", Py_None" else: extra_args = "" code.putln( "%s = %s(%s, %s%s); %s" % ( self.result(), function, self.operand1.py_result(), self.operand2.py_result(), extra_args, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) elif self.is_temp: code.putln("%s = %s;" % (self.result(), self.calculate_result_code())) def type_error(self): if not (self.operand1.type.is_error or self.operand2.type.is_error): error(self.pos, "Invalid operand types for '%s' (%s; %s)" % (self.operator, self.operand1.type, self.operand2.type)) self.type = PyrexTypes.error_type class CBinopNode(BinopNode): def analyse_types(self, env): node = BinopNode.analyse_types(self, env) if node.is_py_operation(): node.type = PyrexTypes.error_type return node def py_operation_function(self): return "" def calculate_result_code(self): return "(%s %s %s)" % ( self.operand1.result(), self.operator, self.operand2.result()) def compute_c_result_type(self, type1, type2): cpp_type = None if type1.is_cpp_class or type1.is_ptr: cpp_type = type1.find_cpp_operation_type(self.operator, type2) # FIXME: handle the reversed case? #if cpp_type is None and (type2.is_cpp_class or type2.is_ptr): # cpp_type = type2.find_cpp_operation_type(self.operator, type1) # FIXME: do we need to handle other cases here? return cpp_type def c_binop_constructor(operator): def make_binop_node(pos, **operands): return CBinopNode(pos, operator=operator, **operands) return make_binop_node class NumBinopNode(BinopNode): # Binary operation taking numeric arguments. infix = True overflow_check = False overflow_bit_node = None def analyse_c_operation(self, env): type1 = self.operand1.type type2 = self.operand2.type self.type = self.compute_c_result_type(type1, type2) if not self.type: self.type_error() return if self.type.is_complex: self.infix = False if (self.type.is_int and env.directives['overflowcheck'] and self.operator in self.overflow_op_names): if (self.operator in ('+', '*') and self.operand1.has_constant_result() and not self.operand2.has_constant_result()): self.operand1, self.operand2 = self.operand2, self.operand1 self.overflow_check = True self.overflow_fold = env.directives['overflowcheck.fold'] self.func = self.type.overflow_check_binop( self.overflow_op_names[self.operator], env, const_rhs = self.operand2.has_constant_result()) self.is_temp = True if not self.infix or (type1.is_numeric and type2.is_numeric): self.operand1 = self.operand1.coerce_to(self.type, env) self.operand2 = self.operand2.coerce_to(self.type, env) def compute_c_result_type(self, type1, type2): if self.c_types_okay(type1, type2): widest_type = PyrexTypes.widest_numeric_type(type1, type2) if widest_type is PyrexTypes.c_bint_type: if self.operator not in '|^&': # False + False == 0 # not False! widest_type = PyrexTypes.c_int_type else: widest_type = PyrexTypes.widest_numeric_type( widest_type, PyrexTypes.c_int_type) return widest_type else: return None def may_be_none(self): if self.type and self.type.is_builtin_type: # if we know the result type, we know the operation, so it can't be None return False type1 = self.operand1.type type2 = self.operand2.type if type1 and type1.is_builtin_type and type2 and type2.is_builtin_type: # XXX: I can't think of any case where a binary operation # on builtin types evaluates to None - add a special case # here if there is one. return False return super(NumBinopNode, self).may_be_none() def get_constant_c_result_code(self): value1 = self.operand1.get_constant_c_result_code() value2 = self.operand2.get_constant_c_result_code() if value1 and value2: return "(%s %s %s)" % (value1, self.operator, value2) else: return None def c_types_okay(self, type1, type2): #print "NumBinopNode.c_types_okay:", type1, type2 ### return (type1.is_numeric or type1.is_enum) \ and (type2.is_numeric or type2.is_enum) def generate_evaluation_code(self, code): if self.overflow_check: self.overflow_bit_node = self self.overflow_bit = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False) code.putln("%s = 0;" % self.overflow_bit) super(NumBinopNode, self).generate_evaluation_code(code) if self.overflow_check: code.putln("if (unlikely(%s)) {" % self.overflow_bit) code.putln('PyErr_SetString(PyExc_OverflowError, "value too large");') code.putln(code.error_goto(self.pos)) code.putln("}") code.funcstate.release_temp(self.overflow_bit) def calculate_result_code(self): if self.overflow_bit_node is not None: return "%s(%s, %s, &%s)" % ( self.func, self.operand1.result(), self.operand2.result(), self.overflow_bit_node.overflow_bit) elif self.infix: return "(%s %s %s)" % ( self.operand1.result(), self.operator, self.operand2.result()) else: func = self.type.binary_op(self.operator) if func is None: error(self.pos, "binary operator %s not supported for %s" % (self.operator, self.type)) return "%s(%s, %s)" % ( func, self.operand1.result(), self.operand2.result()) def is_py_operation_types(self, type1, type2): return (type1.is_unicode_char or type2.is_unicode_char or BinopNode.is_py_operation_types(self, type1, type2)) def py_operation_function(self): function_name = self.py_functions[self.operator] if self.inplace: function_name = function_name.replace('PyNumber_', 'PyNumber_InPlace') return function_name py_functions = { "|": "PyNumber_Or", "^": "PyNumber_Xor", "&": "PyNumber_And", "<<": "PyNumber_Lshift", ">>": "PyNumber_Rshift", "+": "PyNumber_Add", "-": "PyNumber_Subtract", "*": "PyNumber_Multiply", "/": "__Pyx_PyNumber_Divide", "//": "PyNumber_FloorDivide", "%": "PyNumber_Remainder", "**": "PyNumber_Power" } overflow_op_names = { "+": "add", "-": "sub", "*": "mul", "<<": "lshift", } class IntBinopNode(NumBinopNode): # Binary operation taking integer arguments. def c_types_okay(self, type1, type2): #print "IntBinopNode.c_types_okay:", type1, type2 ### return (type1.is_int or type1.is_enum) \ and (type2.is_int or type2.is_enum) class AddNode(NumBinopNode): # '+' operator. def is_py_operation_types(self, type1, type2): if type1.is_string and type2.is_string or type1.is_pyunicode_ptr and type2.is_pyunicode_ptr: return 1 else: return NumBinopNode.is_py_operation_types(self, type1, type2) def infer_builtin_types_operation(self, type1, type2): # b'abc' + 'abc' raises an exception in Py3, # so we can safely infer the Py2 type for bytes here string_types = [bytes_type, str_type, basestring_type, unicode_type] # Py2.4 lacks tuple.index() if type1 in string_types and type2 in string_types: return string_types[max(string_types.index(type1), string_types.index(type2))] return None def compute_c_result_type(self, type1, type2): #print "AddNode.compute_c_result_type:", type1, self.operator, type2 ### if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum): return type1 elif (type2.is_ptr or type2.is_array) and (type1.is_int or type1.is_enum): return type2 else: return NumBinopNode.compute_c_result_type( self, type1, type2) def py_operation_function(self): type1, type2 = self.operand1.type, self.operand2.type if type1 is unicode_type or type2 is unicode_type: if type1.is_builtin_type and type2.is_builtin_type: if self.operand1.may_be_none() or self.operand2.may_be_none(): return '__Pyx_PyUnicode_ConcatSafe' else: return '__Pyx_PyUnicode_Concat' return super(AddNode, self).py_operation_function() class SubNode(NumBinopNode): # '-' operator. def compute_c_result_type(self, type1, type2): if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum): return type1 elif (type1.is_ptr or type1.is_array) and (type2.is_ptr or type2.is_array): return PyrexTypes.c_ptrdiff_t_type else: return NumBinopNode.compute_c_result_type( self, type1, type2) class MulNode(NumBinopNode): # '*' operator. def is_py_operation_types(self, type1, type2): if ((type1.is_string and type2.is_int) or (type2.is_string and type1.is_int)): return 1 else: return NumBinopNode.is_py_operation_types(self, type1, type2) def infer_builtin_types_operation(self, type1, type2): # let's assume that whatever builtin type you multiply a string with # will either return a string of the same type or fail with an exception string_types = (bytes_type, str_type, basestring_type, unicode_type) if type1 in string_types and type2.is_builtin_type: return type1 if type2 in string_types and type1.is_builtin_type: return type2 # multiplication of containers/numbers with an integer value # always (?) returns the same type if type1.is_int: return type2 if type2.is_int: return type1 return None class DivNode(NumBinopNode): # '/' or '//' operator. cdivision = None truedivision = None # == "unknown" if operator == '/' ctruedivision = False cdivision_warnings = False zerodivision_check = None def find_compile_time_binary_operator(self, op1, op2): func = compile_time_binary_operators[self.operator] if self.operator == '/' and self.truedivision is None: # => true div for floats, floor div for integers if isinstance(op1, (int,long)) and isinstance(op2, (int,long)): func = compile_time_binary_operators['//'] return func def calculate_constant_result(self): op1 = self.operand1.constant_result op2 = self.operand2.constant_result func = self.find_compile_time_binary_operator(op1, op2) self.constant_result = func( self.operand1.constant_result, self.operand2.constant_result) def compile_time_value(self, denv): operand1 = self.operand1.compile_time_value(denv) operand2 = self.operand2.compile_time_value(denv) try: func = self.find_compile_time_binary_operator( operand1, operand2) return func(operand1, operand2) except Exception, e: self.compile_time_value_error(e) def analyse_operation(self, env): if self.cdivision or env.directives['cdivision']: self.ctruedivision = False else: self.ctruedivision = self.truedivision NumBinopNode.analyse_operation(self, env) if self.is_cpp_operation(): self.cdivision = True if not self.type.is_pyobject: self.zerodivision_check = ( self.cdivision is None and not env.directives['cdivision'] and (not self.operand2.has_constant_result() or self.operand2.constant_result == 0)) if self.zerodivision_check or env.directives['cdivision_warnings']: # Need to check ahead of time to warn or raise zero division error self.operand1 = self.operand1.coerce_to_simple(env) self.operand2 = self.operand2.coerce_to_simple(env) def compute_c_result_type(self, type1, type2): if self.operator == '/' and self.ctruedivision: if not type1.is_float and not type2.is_float: widest_type = PyrexTypes.widest_numeric_type(type1, PyrexTypes.c_double_type) widest_type = PyrexTypes.widest_numeric_type(type2, widest_type) return widest_type return NumBinopNode.compute_c_result_type(self, type1, type2) def zero_division_message(self): if self.type.is_int: return "integer division or modulo by zero" else: return "float division" def generate_evaluation_code(self, code): if not self.type.is_pyobject and not self.type.is_complex: if self.cdivision is None: self.cdivision = (code.globalstate.directives['cdivision'] or not self.type.signed or self.type.is_float) if not self.cdivision: code.globalstate.use_utility_code(div_int_utility_code.specialize(self.type)) NumBinopNode.generate_evaluation_code(self, code) self.generate_div_warning_code(code) def generate_div_warning_code(self, code): if not self.type.is_pyobject: if self.zerodivision_check: if not self.infix: zero_test = "%s(%s)" % (self.type.unary_op('zero'), self.operand2.result()) else: zero_test = "%s == 0" % self.operand2.result() code.putln("if (unlikely(%s)) {" % zero_test) code.put_ensure_gil() code.putln('PyErr_SetString(PyExc_ZeroDivisionError, "%s");' % self.zero_division_message()) code.put_release_ensured_gil() code.putln(code.error_goto(self.pos)) code.putln("}") if self.type.is_int and self.type.signed and self.operator != '%': code.globalstate.use_utility_code(division_overflow_test_code) if self.operand2.type.signed == 2: # explicitly signed, no runtime check needed minus1_check = 'unlikely(%s == -1)' % self.operand2.result() else: type_of_op2 = self.operand2.type.declaration_code('') minus1_check = '(!(((%s)-1) > 0)) && unlikely(%s == (%s)-1)' % ( type_of_op2, self.operand2.result(), type_of_op2) code.putln("else if (sizeof(%s) == sizeof(long) && %s " " && unlikely(UNARY_NEG_WOULD_OVERFLOW(%s))) {" % ( self.type.declaration_code(''), minus1_check, self.operand1.result())) code.put_ensure_gil() code.putln('PyErr_SetString(PyExc_OverflowError, "value too large to perform division");') code.put_release_ensured_gil() code.putln(code.error_goto(self.pos)) code.putln("}") if code.globalstate.directives['cdivision_warnings'] and self.operator != '/': code.globalstate.use_utility_code(cdivision_warning_utility_code) code.putln("if (unlikely((%s < 0) ^ (%s < 0))) {" % ( self.operand1.result(), self.operand2.result())) code.put_ensure_gil() code.putln(code.set_error_info(self.pos, used=True)) code.putln("if (__Pyx_cdivision_warning(%(FILENAME)s, " "%(LINENO)s)) {" % { 'FILENAME': Naming.filename_cname, 'LINENO': Naming.lineno_cname, }) code.put_release_ensured_gil() code.put_goto(code.error_label) code.putln("}") code.put_release_ensured_gil() code.putln("}") def calculate_result_code(self): if self.type.is_complex: return NumBinopNode.calculate_result_code(self) elif self.type.is_float and self.operator == '//': return "floor(%s / %s)" % ( self.operand1.result(), self.operand2.result()) elif self.truedivision or self.cdivision: op1 = self.operand1.result() op2 = self.operand2.result() if self.truedivision: if self.type != self.operand1.type: op1 = self.type.cast_code(op1) if self.type != self.operand2.type: op2 = self.type.cast_code(op2) return "(%s / %s)" % (op1, op2) else: return "__Pyx_div_%s(%s, %s)" % ( self.type.specialization_name(), self.operand1.result(), self.operand2.result()) class ModNode(DivNode): # '%' operator. def is_py_operation_types(self, type1, type2): return (type1.is_string or type2.is_string or NumBinopNode.is_py_operation_types(self, type1, type2)) def infer_builtin_types_operation(self, type1, type2): # b'%s' % xyz raises an exception in Py3, so it's safe to infer the type for Py2 if type1 is unicode_type: # None + xyz may be implemented by RHS if type2.is_builtin_type or not self.operand1.may_be_none(): return type1 elif type1 in (bytes_type, str_type, basestring_type): if type2 is unicode_type: return type2 elif type2.is_numeric: return type1 elif type1 is bytes_type and not type2.is_builtin_type: return None # RHS might implement '% operator differently in Py3 else: return basestring_type # either str or unicode, can't tell return None def zero_division_message(self): if self.type.is_int: return "integer division or modulo by zero" else: return "float divmod()" def analyse_operation(self, env): DivNode.analyse_operation(self, env) if not self.type.is_pyobject: if self.cdivision is None: self.cdivision = env.directives['cdivision'] or not self.type.signed if not self.cdivision and not self.type.is_int and not self.type.is_float: error(self.pos, "mod operator not supported for type '%s'" % self.type) def generate_evaluation_code(self, code): if not self.type.is_pyobject and not self.cdivision: if self.type.is_int: code.globalstate.use_utility_code( mod_int_utility_code.specialize(self.type)) else: # float code.globalstate.use_utility_code( mod_float_utility_code.specialize( self.type, math_h_modifier=self.type.math_h_modifier)) # note: skipping over DivNode here NumBinopNode.generate_evaluation_code(self, code) self.generate_div_warning_code(code) def calculate_result_code(self): if self.cdivision: if self.type.is_float: return "fmod%s(%s, %s)" % ( self.type.math_h_modifier, self.operand1.result(), self.operand2.result()) else: return "(%s %% %s)" % ( self.operand1.result(), self.operand2.result()) else: return "__Pyx_mod_%s(%s, %s)" % ( self.type.specialization_name(), self.operand1.result(), self.operand2.result()) def py_operation_function(self): if self.operand1.type is unicode_type: if self.operand1.may_be_none(): return '__Pyx_PyUnicode_FormatSafe' else: return 'PyUnicode_Format' elif self.operand1.type is str_type: if self.operand1.may_be_none(): return '__Pyx_PyString_FormatSafe' else: return '__Pyx_PyString_Format' return super(ModNode, self).py_operation_function() class PowNode(NumBinopNode): # '**' operator. def analyse_c_operation(self, env): NumBinopNode.analyse_c_operation(self, env) if self.type.is_complex: if self.type.real_type.is_float: self.operand1 = self.operand1.coerce_to(self.type, env) self.operand2 = self.operand2.coerce_to(self.type, env) self.pow_func = "__Pyx_c_pow" + self.type.real_type.math_h_modifier else: error(self.pos, "complex int powers not supported") self.pow_func = "<error>" elif self.type.is_float: self.pow_func = "pow" + self.type.math_h_modifier elif self.type.is_int: self.pow_func = "__Pyx_pow_%s" % self.type.declaration_code('').replace(' ', '_') env.use_utility_code( int_pow_utility_code.specialize( func_name=self.pow_func, type=self.type.declaration_code(''), signed=self.type.signed and 1 or 0)) elif not self.type.is_error: error(self.pos, "got unexpected types for C power operator: %s, %s" % (self.operand1.type, self.operand2.type)) def calculate_result_code(self): # Work around MSVC overloading ambiguity. def typecast(operand): if self.type == operand.type: return operand.result() else: return self.type.cast_code(operand.result()) return "%s(%s, %s)" % ( self.pow_func, typecast(self.operand1), typecast(self.operand2)) # Note: This class is temporarily "shut down" into an ineffective temp # allocation mode. # # More sophisticated temp reuse was going on before, one could have a # look at adding this again after /all/ classes are converted to the # new temp scheme. (The temp juggling cannot work otherwise). class BoolBinopNode(ExprNode): # Short-circuiting boolean operation. # # operator string # operand1 ExprNode # operand2 ExprNode subexprs = ['operand1', 'operand2'] def infer_type(self, env): type1 = self.operand1.infer_type(env) type2 = self.operand2.infer_type(env) return PyrexTypes.independent_spanning_type(type1, type2) def may_be_none(self): if self.operator == 'or': return self.operand2.may_be_none() else: return self.operand1.may_be_none() or self.operand2.may_be_none() def calculate_constant_result(self): if self.operator == 'and': self.constant_result = \ self.operand1.constant_result and \ self.operand2.constant_result else: self.constant_result = \ self.operand1.constant_result or \ self.operand2.constant_result def compile_time_value(self, denv): if self.operator == 'and': return self.operand1.compile_time_value(denv) \ and self.operand2.compile_time_value(denv) else: return self.operand1.compile_time_value(denv) \ or self.operand2.compile_time_value(denv) def coerce_to_boolean(self, env): return BoolBinopNode( self.pos, operator = self.operator, operand1 = self.operand1.coerce_to_boolean(env), operand2 = self.operand2.coerce_to_boolean(env), type = PyrexTypes.c_bint_type, is_temp = self.is_temp) def analyse_types(self, env): self.operand1 = self.operand1.analyse_types(env) self.operand2 = self.operand2.analyse_types(env) self.type = PyrexTypes.independent_spanning_type(self.operand1.type, self.operand2.type) self.operand1 = self.operand1.coerce_to(self.type, env) self.operand2 = self.operand2.coerce_to(self.type, env) # For what we're about to do, it's vital that # both operands be temp nodes. self.operand1 = self.operand1.coerce_to_simple(env) self.operand2 = self.operand2.coerce_to_simple(env) self.is_temp = 1 return self gil_message = "Truth-testing Python object" def check_const(self): return self.operand1.check_const() and self.operand2.check_const() def generate_evaluation_code(self, code): code.mark_pos(self.pos) self.operand1.generate_evaluation_code(code) test_result, uses_temp = self.generate_operand1_test(code) if self.operator == 'and': sense = "" else: sense = "!" code.putln( "if (%s%s) {" % ( sense, test_result)) if uses_temp: code.funcstate.release_temp(test_result) self.operand1.generate_disposal_code(code) self.operand2.generate_evaluation_code(code) self.allocate_temp_result(code) self.operand2.make_owned_reference(code) code.putln("%s = %s;" % (self.result(), self.operand2.result())) self.operand2.generate_post_assignment_code(code) self.operand2.free_temps(code) code.putln("} else {") self.operand1.make_owned_reference(code) code.putln("%s = %s;" % (self.result(), self.operand1.result())) self.operand1.generate_post_assignment_code(code) self.operand1.free_temps(code) code.putln("}") def generate_operand1_test(self, code): # Generate code to test the truth of the first operand. if self.type.is_pyobject: test_result = code.funcstate.allocate_temp(PyrexTypes.c_bint_type, manage_ref=False) code.putln( "%s = __Pyx_PyObject_IsTrue(%s); %s" % ( test_result, self.operand1.py_result(), code.error_goto_if_neg(test_result, self.pos))) else: test_result = self.operand1.result() return (test_result, self.type.is_pyobject) class CondExprNode(ExprNode): # Short-circuiting conditional expression. # # test ExprNode # true_val ExprNode # false_val ExprNode true_val = None false_val = None subexprs = ['test', 'true_val', 'false_val'] def type_dependencies(self, env): return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env) def infer_type(self, env): return PyrexTypes.independent_spanning_type( self.true_val.infer_type(env), self.false_val.infer_type(env)) def calculate_constant_result(self): if self.test.constant_result: self.constant_result = self.true_val.constant_result else: self.constant_result = self.false_val.constant_result def analyse_types(self, env): self.test = self.test.analyse_types(env).coerce_to_boolean(env) self.true_val = self.true_val.analyse_types(env) self.false_val = self.false_val.analyse_types(env) self.is_temp = 1 return self.analyse_result_type(env) def analyse_result_type(self, env): self.type = PyrexTypes.independent_spanning_type( self.true_val.type, self.false_val.type) if self.type.is_pyobject: self.result_ctype = py_object_type if self.true_val.type.is_pyobject or self.false_val.type.is_pyobject: self.true_val = self.true_val.coerce_to(self.type, env) self.false_val = self.false_val.coerce_to(self.type, env) if self.type == PyrexTypes.error_type: self.type_error() return self def coerce_to(self, dst_type, env): self.true_val = self.true_val.coerce_to(dst_type, env) self.false_val = self.false_val.coerce_to(dst_type, env) self.result_ctype = None return self.analyse_result_type(env) def type_error(self): if not (self.true_val.type.is_error or self.false_val.type.is_error): error(self.pos, "Incompatible types in conditional expression (%s; %s)" % (self.true_val.type, self.false_val.type)) self.type = PyrexTypes.error_type def check_const(self): return (self.test.check_const() and self.true_val.check_const() and self.false_val.check_const()) def generate_evaluation_code(self, code): # Because subexprs may not be evaluated we can use a more optimal # subexpr allocation strategy than the default, so override evaluation_code. code.mark_pos(self.pos) self.allocate_temp_result(code) self.test.generate_evaluation_code(code) code.putln("if (%s) {" % self.test.result() ) self.eval_and_get(code, self.true_val) code.putln("} else {") self.eval_and_get(code, self.false_val) code.putln("}") self.test.generate_disposal_code(code) self.test.free_temps(code) def eval_and_get(self, code, expr): expr.generate_evaluation_code(code) expr.make_owned_reference(code) code.putln('%s = %s;' % (self.result(), expr.result_as(self.ctype()))) expr.generate_post_assignment_code(code) expr.free_temps(code) richcmp_constants = { "<" : "Py_LT", "<=": "Py_LE", "==": "Py_EQ", "!=": "Py_NE", "<>": "Py_NE", ">" : "Py_GT", ">=": "Py_GE", # the following are faked by special compare functions "in" : "Py_EQ", "not_in": "Py_NE", } class CmpNode(object): # Mixin class containing code common to PrimaryCmpNodes # and CascadedCmpNodes. special_bool_cmp_function = None special_bool_cmp_utility_code = None def infer_type(self, env): # TODO: Actually implement this (after merging with -unstable). return py_object_type def calculate_cascaded_constant_result(self, operand1_result): func = compile_time_binary_operators[self.operator] operand2_result = self.operand2.constant_result if (isinstance(operand1_result, (bytes, unicode)) and isinstance(operand2_result, (bytes, unicode)) and type(operand1_result) != type(operand2_result)): # string comparison of different types isn't portable return if self.operator in ('in', 'not_in'): if isinstance(self.operand2, (ListNode, TupleNode, SetNode)): if not self.operand2.args: self.constant_result = self.operator == 'not_in' return elif isinstance(self.operand2, ListNode) and not self.cascade: # tuples are more efficient to store than lists self.operand2 = self.operand2.as_tuple() elif isinstance(self.operand2, DictNode): if not self.operand2.key_value_pairs: self.constant_result = self.operator == 'not_in' return self.constant_result = func(operand1_result, operand2_result) def cascaded_compile_time_value(self, operand1, denv): func = get_compile_time_binop(self) operand2 = self.operand2.compile_time_value(denv) try: result = func(operand1, operand2) except Exception, e: self.compile_time_value_error(e) result = None if result: cascade = self.cascade if cascade: result = result and cascade.cascaded_compile_time_value(operand2, denv) return result def is_cpp_comparison(self): return self.operand1.type.is_cpp_class or self.operand2.type.is_cpp_class def find_common_int_type(self, env, op, operand1, operand2): # type1 != type2 and at least one of the types is not a C int type1 = operand1.type type2 = operand2.type type1_can_be_int = False type2_can_be_int = False if operand1.is_string_literal and operand1.can_coerce_to_char_literal(): type1_can_be_int = True if operand2.is_string_literal and operand2.can_coerce_to_char_literal(): type2_can_be_int = True if type1.is_int: if type2_can_be_int: return type1 elif type2.is_int: if type1_can_be_int: return type2 elif type1_can_be_int: if type2_can_be_int: if Builtin.unicode_type in (type1, type2): return PyrexTypes.c_py_ucs4_type else: return PyrexTypes.c_uchar_type return None def find_common_type(self, env, op, operand1, common_type=None): operand2 = self.operand2 type1 = operand1.type type2 = operand2.type new_common_type = None # catch general errors if type1 == str_type and (type2.is_string or type2 in (bytes_type, unicode_type)) or \ type2 == str_type and (type1.is_string or type1 in (bytes_type, unicode_type)): error(self.pos, "Comparisons between bytes/unicode and str are not portable to Python 3") new_common_type = error_type # try to use numeric comparisons where possible elif type1.is_complex or type2.is_complex: if op not in ('==', '!=') \ and (type1.is_complex or type1.is_numeric) \ and (type2.is_complex or type2.is_numeric): error(self.pos, "complex types are unordered") new_common_type = error_type elif type1.is_pyobject: new_common_type = type1 elif type2.is_pyobject: new_common_type = type2 else: new_common_type = PyrexTypes.widest_numeric_type(type1, type2) elif type1.is_numeric and type2.is_numeric: new_common_type = PyrexTypes.widest_numeric_type(type1, type2) elif common_type is None or not common_type.is_pyobject: new_common_type = self.find_common_int_type(env, op, operand1, operand2) if new_common_type is None: # fall back to generic type compatibility tests if type1 == type2: new_common_type = type1 elif type1.is_pyobject or type2.is_pyobject: if type2.is_numeric or type2.is_string: if operand2.check_for_coercion_error(type1, env): new_common_type = error_type else: new_common_type = py_object_type elif type1.is_numeric or type1.is_string: if operand1.check_for_coercion_error(type2, env): new_common_type = error_type else: new_common_type = py_object_type elif py_object_type.assignable_from(type1) and py_object_type.assignable_from(type2): new_common_type = py_object_type else: # one Python type and one non-Python type, not assignable self.invalid_types_error(operand1, op, operand2) new_common_type = error_type elif type1.assignable_from(type2): new_common_type = type1 elif type2.assignable_from(type1): new_common_type = type2 else: # C types that we couldn't handle up to here are an error self.invalid_types_error(operand1, op, operand2) new_common_type = error_type if new_common_type.is_string and (isinstance(operand1, BytesNode) or isinstance(operand2, BytesNode)): # special case when comparing char* to bytes literal: must # compare string values! new_common_type = bytes_type # recursively merge types if common_type is None or new_common_type.is_error: common_type = new_common_type else: # we could do a lot better by splitting the comparison # into a non-Python part and a Python part, but this is # safer for now common_type = PyrexTypes.spanning_type(common_type, new_common_type) if self.cascade: common_type = self.cascade.find_common_type(env, self.operator, operand2, common_type) return common_type def invalid_types_error(self, operand1, op, operand2): error(self.pos, "Invalid types for '%s' (%s, %s)" % (op, operand1.type, operand2.type)) def is_python_comparison(self): return (not self.is_ptr_contains() and not self.is_c_string_contains() and (self.has_python_operands() or (self.cascade and self.cascade.is_python_comparison()) or self.operator in ('in', 'not_in'))) def coerce_operands_to(self, dst_type, env): operand2 = self.operand2 if operand2.type != dst_type: self.operand2 = operand2.coerce_to(dst_type, env) if self.cascade: self.cascade.coerce_operands_to(dst_type, env) def is_python_result(self): return ((self.has_python_operands() and self.special_bool_cmp_function is None and self.operator not in ('is', 'is_not', 'in', 'not_in') and not self.is_c_string_contains() and not self.is_ptr_contains()) or (self.cascade and self.cascade.is_python_result())) def is_c_string_contains(self): return self.operator in ('in', 'not_in') and \ ((self.operand1.type.is_int and (self.operand2.type.is_string or self.operand2.type is bytes_type)) or (self.operand1.type.is_unicode_char and self.operand2.type is unicode_type)) def is_ptr_contains(self): if self.operator in ('in', 'not_in'): container_type = self.operand2.type return (container_type.is_ptr or container_type.is_array) \ and not container_type.is_string def find_special_bool_compare_function(self, env, operand1, result_is_bool=False): # note: currently operand1 must get coerced to a Python object if we succeed here! if self.operator in ('==', '!='): type1, type2 = operand1.type, self.operand2.type if result_is_bool or (type1.is_builtin_type and type2.is_builtin_type): if type1 is Builtin.unicode_type or type2 is Builtin.unicode_type: self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c") self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals" return True elif type1 is Builtin.bytes_type or type2 is Builtin.bytes_type: self.special_bool_cmp_utility_code = UtilityCode.load_cached("BytesEquals", "StringTools.c") self.special_bool_cmp_function = "__Pyx_PyBytes_Equals" return True elif type1 is Builtin.basestring_type or type2 is Builtin.basestring_type: self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c") self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals" return True elif type1 is Builtin.str_type or type2 is Builtin.str_type: self.special_bool_cmp_utility_code = UtilityCode.load_cached("StrEquals", "StringTools.c") self.special_bool_cmp_function = "__Pyx_PyString_Equals" return True elif self.operator in ('in', 'not_in'): if self.operand2.type is Builtin.dict_type: self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable") self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyDictContains", "ObjectHandling.c") self.special_bool_cmp_function = "__Pyx_PyDict_Contains" return True elif self.operand2.type is Builtin.unicode_type: self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable") self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyUnicodeContains", "StringTools.c") self.special_bool_cmp_function = "__Pyx_PyUnicode_Contains" return True else: if not self.operand2.type.is_pyobject: self.operand2 = self.operand2.coerce_to_pyobject(env) self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySequenceContains", "ObjectHandling.c") self.special_bool_cmp_function = "__Pyx_PySequence_Contains" return True return False def generate_operation_code(self, code, result_code, operand1, op , operand2): if self.type.is_pyobject: error_clause = code.error_goto_if_null got_ref = "__Pyx_XGOTREF(%s); " % result_code if self.special_bool_cmp_function: code.globalstate.use_utility_code( UtilityCode.load_cached("PyBoolOrNullFromLong", "ObjectHandling.c")) coerce_result = "__Pyx_PyBoolOrNull_FromLong" else: coerce_result = "__Pyx_PyBool_FromLong" else: error_clause = code.error_goto_if_neg got_ref = "" coerce_result = "" if self.special_bool_cmp_function: if operand1.type.is_pyobject: result1 = operand1.py_result() else: result1 = operand1.result() if operand2.type.is_pyobject: result2 = operand2.py_result() else: result2 = operand2.result() if self.special_bool_cmp_utility_code: code.globalstate.use_utility_code(self.special_bool_cmp_utility_code) code.putln( "%s = %s(%s(%s, %s, %s)); %s%s" % ( result_code, coerce_result, self.special_bool_cmp_function, result1, result2, richcmp_constants[op], got_ref, error_clause(result_code, self.pos))) elif operand1.type.is_pyobject and op not in ('is', 'is_not'): assert op not in ('in', 'not_in'), op code.putln("%s = PyObject_RichCompare(%s, %s, %s); %s%s" % ( result_code, operand1.py_result(), operand2.py_result(), richcmp_constants[op], got_ref, error_clause(result_code, self.pos))) elif operand1.type.is_complex: code.putln("%s = %s(%s%s(%s, %s));" % ( result_code, coerce_result, op == "!=" and "!" or "", operand1.type.unary_op('eq'), operand1.result(), operand2.result())) else: type1 = operand1.type type2 = operand2.type if (type1.is_extension_type or type2.is_extension_type) \ and not type1.same_as(type2): common_type = py_object_type elif type1.is_numeric: common_type = PyrexTypes.widest_numeric_type(type1, type2) else: common_type = type1 code1 = operand1.result_as(common_type) code2 = operand2.result_as(common_type) code.putln("%s = %s(%s %s %s);" % ( result_code, coerce_result, code1, self.c_operator(op), code2)) def c_operator(self, op): if op == 'is': return "==" elif op == 'is_not': return "!=" else: return op class PrimaryCmpNode(ExprNode, CmpNode): # Non-cascaded comparison or first comparison of # a cascaded sequence. # # operator string # operand1 ExprNode # operand2 ExprNode # cascade CascadedCmpNode # We don't use the subexprs mechanism, because # things here are too complicated for it to handle. # Instead, we override all the framework methods # which use it. child_attrs = ['operand1', 'operand2', 'coerced_operand2', 'cascade'] cascade = None coerced_operand2 = None is_memslice_nonecheck = False def infer_type(self, env): # TODO: Actually implement this (after merging with -unstable). return py_object_type def type_dependencies(self, env): return () def calculate_constant_result(self): assert not self.cascade self.calculate_cascaded_constant_result(self.operand1.constant_result) def compile_time_value(self, denv): operand1 = self.operand1.compile_time_value(denv) return self.cascaded_compile_time_value(operand1, denv) def analyse_types(self, env): self.operand1 = self.operand1.analyse_types(env) self.operand2 = self.operand2.analyse_types(env) if self.is_cpp_comparison(): self.analyse_cpp_comparison(env) if self.cascade: error(self.pos, "Cascading comparison not yet supported for cpp types.") return self if self.analyse_memoryviewslice_comparison(env): return self if self.cascade: self.cascade = self.cascade.analyse_types(env) if self.operator in ('in', 'not_in'): if self.is_c_string_contains(): self.is_pycmp = False common_type = None if self.cascade: error(self.pos, "Cascading comparison not yet supported for 'int_val in string'.") return self if self.operand2.type is unicode_type: env.use_utility_code(UtilityCode.load_cached("PyUCS4InUnicode", "StringTools.c")) else: if self.operand1.type is PyrexTypes.c_uchar_type: self.operand1 = self.operand1.coerce_to(PyrexTypes.c_char_type, env) if self.operand2.type is not bytes_type: self.operand2 = self.operand2.coerce_to(bytes_type, env) env.use_utility_code(UtilityCode.load_cached("BytesContains", "StringTools.c")) self.operand2 = self.operand2.as_none_safe_node( "argument of type 'NoneType' is not iterable") elif self.is_ptr_contains(): if self.cascade: error(self.pos, "Cascading comparison not supported for 'val in sliced pointer'.") self.type = PyrexTypes.c_bint_type # Will be transformed by IterationTransform return self elif self.find_special_bool_compare_function(env, self.operand1): if not self.operand1.type.is_pyobject: self.operand1 = self.operand1.coerce_to_pyobject(env) common_type = None # if coercion needed, the method call above has already done it self.is_pycmp = False # result is bint else: common_type = py_object_type self.is_pycmp = True elif self.find_special_bool_compare_function(env, self.operand1): if not self.operand1.type.is_pyobject: self.operand1 = self.operand1.coerce_to_pyobject(env) common_type = None # if coercion needed, the method call above has already done it self.is_pycmp = False # result is bint else: common_type = self.find_common_type(env, self.operator, self.operand1) self.is_pycmp = common_type.is_pyobject if common_type is not None and not common_type.is_error: if self.operand1.type != common_type: self.operand1 = self.operand1.coerce_to(common_type, env) self.coerce_operands_to(common_type, env) if self.cascade: self.operand2 = self.operand2.coerce_to_simple(env) self.cascade.coerce_cascaded_operands_to_temp(env) operand2 = self.cascade.optimise_comparison(self.operand2, env) if operand2 is not self.operand2: self.coerced_operand2 = operand2 if self.is_python_result(): self.type = PyrexTypes.py_object_type else: self.type = PyrexTypes.c_bint_type cdr = self.cascade while cdr: cdr.type = self.type cdr = cdr.cascade if self.is_pycmp or self.cascade or self.special_bool_cmp_function: # 1) owned reference, 2) reused value, 3) potential function error return value self.is_temp = 1 return self def analyse_cpp_comparison(self, env): type1 = self.operand1.type type2 = self.operand2.type entry = env.lookup_operator(self.operator, [self.operand1, self.operand2]) if entry is None: error(self.pos, "Invalid types for '%s' (%s, %s)" % (self.operator, type1, type2)) self.type = PyrexTypes.error_type self.result_code = "<error>" return func_type = entry.type if func_type.is_ptr: func_type = func_type.base_type if len(func_type.args) == 1: self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env) else: self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env) self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env) self.is_pycmp = False self.type = func_type.return_type def analyse_memoryviewslice_comparison(self, env): have_none = self.operand1.is_none or self.operand2.is_none have_slice = (self.operand1.type.is_memoryviewslice or self.operand2.type.is_memoryviewslice) ops = ('==', '!=', 'is', 'is_not') if have_slice and have_none and self.operator in ops: self.is_pycmp = False self.type = PyrexTypes.c_bint_type self.is_memslice_nonecheck = True return True return False def coerce_to_boolean(self, env): if self.is_pycmp: # coercing to bool => may allow for more efficient comparison code if self.find_special_bool_compare_function( env, self.operand1, result_is_bool=True): self.is_pycmp = False self.type = PyrexTypes.c_bint_type self.is_temp = 1 if self.cascade: operand2 = self.cascade.optimise_comparison( self.operand2, env, result_is_bool=True) if operand2 is not self.operand2: self.coerced_operand2 = operand2 return self # TODO: check if we can optimise parts of the cascade here return ExprNode.coerce_to_boolean(self, env) def has_python_operands(self): return (self.operand1.type.is_pyobject or self.operand2.type.is_pyobject) def check_const(self): if self.cascade: self.not_const() return False else: return self.operand1.check_const() and self.operand2.check_const() def calculate_result_code(self): if self.operand1.type.is_complex: if self.operator == "!=": negation = "!" else: negation = "" return "(%s%s(%s, %s))" % ( negation, self.operand1.type.binary_op('=='), self.operand1.result(), self.operand2.result()) elif self.is_c_string_contains(): if self.operand2.type is unicode_type: method = "__Pyx_UnicodeContainsUCS4" else: method = "__Pyx_BytesContains" if self.operator == "not_in": negation = "!" else: negation = "" return "(%s%s(%s, %s))" % ( negation, method, self.operand2.result(), self.operand1.result()) else: result1 = self.operand1.result() result2 = self.operand2.result() if self.is_memslice_nonecheck: if self.operand1.type.is_memoryviewslice: result1 = "((PyObject *) %s.memview)" % result1 else: result2 = "((PyObject *) %s.memview)" % result2 return "(%s %s %s)" % ( result1, self.c_operator(self.operator), result2) def generate_evaluation_code(self, code): self.operand1.generate_evaluation_code(code) self.operand2.generate_evaluation_code(code) if self.is_temp: self.allocate_temp_result(code) self.generate_operation_code(code, self.result(), self.operand1, self.operator, self.operand2) if self.cascade: self.cascade.generate_evaluation_code( code, self.result(), self.coerced_operand2 or self.operand2, needs_evaluation=self.coerced_operand2 is not None) self.operand1.generate_disposal_code(code) self.operand1.free_temps(code) self.operand2.generate_disposal_code(code) self.operand2.free_temps(code) def generate_subexpr_disposal_code(self, code): # If this is called, it is a non-cascaded cmp, # so only need to dispose of the two main operands. self.operand1.generate_disposal_code(code) self.operand2.generate_disposal_code(code) def free_subexpr_temps(self, code): # If this is called, it is a non-cascaded cmp, # so only need to dispose of the two main operands. self.operand1.free_temps(code) self.operand2.free_temps(code) def annotate(self, code): self.operand1.annotate(code) self.operand2.annotate(code) if self.cascade: self.cascade.annotate(code) class CascadedCmpNode(Node, CmpNode): # A CascadedCmpNode is not a complete expression node. It # hangs off the side of another comparison node, shares # its left operand with that node, and shares its result # with the PrimaryCmpNode at the head of the chain. # # operator string # operand2 ExprNode # cascade CascadedCmpNode child_attrs = ['operand2', 'coerced_operand2', 'cascade'] cascade = None coerced_operand2 = None constant_result = constant_value_not_set # FIXME: where to calculate this? def infer_type(self, env): # TODO: Actually implement this (after merging with -unstable). return py_object_type def type_dependencies(self, env): return () def has_constant_result(self): return self.constant_result is not constant_value_not_set and \ self.constant_result is not not_a_constant def analyse_types(self, env): self.operand2 = self.operand2.analyse_types(env) if self.cascade: self.cascade = self.cascade.analyse_types(env) return self def has_python_operands(self): return self.operand2.type.is_pyobject def optimise_comparison(self, operand1, env, result_is_bool=False): if self.find_special_bool_compare_function(env, operand1, result_is_bool): self.is_pycmp = False self.type = PyrexTypes.c_bint_type if not operand1.type.is_pyobject: operand1 = operand1.coerce_to_pyobject(env) if self.cascade: operand2 = self.cascade.optimise_comparison(self.operand2, env, result_is_bool) if operand2 is not self.operand2: self.coerced_operand2 = operand2 return operand1 def coerce_operands_to_pyobjects(self, env): self.operand2 = self.operand2.coerce_to_pyobject(env) if self.operand2.type is dict_type and self.operator in ('in', 'not_in'): self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable") if self.cascade: self.cascade.coerce_operands_to_pyobjects(env) def coerce_cascaded_operands_to_temp(self, env): if self.cascade: #self.operand2 = self.operand2.coerce_to_temp(env) #CTT self.operand2 = self.operand2.coerce_to_simple(env) self.cascade.coerce_cascaded_operands_to_temp(env) def generate_evaluation_code(self, code, result, operand1, needs_evaluation=False): if self.type.is_pyobject: code.putln("if (__Pyx_PyObject_IsTrue(%s)) {" % result) code.put_decref(result, self.type) else: code.putln("if (%s) {" % result) if needs_evaluation: operand1.generate_evaluation_code(code) self.operand2.generate_evaluation_code(code) self.generate_operation_code(code, result, operand1, self.operator, self.operand2) if self.cascade: self.cascade.generate_evaluation_code( code, result, self.coerced_operand2 or self.operand2, needs_evaluation=self.coerced_operand2 is not None) if needs_evaluation: operand1.generate_disposal_code(code) operand1.free_temps(code) # Cascaded cmp result is always temp self.operand2.generate_disposal_code(code) self.operand2.free_temps(code) code.putln("}") def annotate(self, code): self.operand2.annotate(code) if self.cascade: self.cascade.annotate(code) binop_node_classes = { "or": BoolBinopNode, "and": BoolBinopNode, "|": IntBinopNode, "^": IntBinopNode, "&": IntBinopNode, "<<": IntBinopNode, ">>": IntBinopNode, "+": AddNode, "-": SubNode, "*": MulNode, "/": DivNode, "//": DivNode, "%": ModNode, "**": PowNode } def binop_node(pos, operator, operand1, operand2, inplace=False): # Construct binop node of appropriate class for # given operator. return binop_node_classes[operator](pos, operator = operator, operand1 = operand1, operand2 = operand2, inplace = inplace) #------------------------------------------------------------------- # # Coercion nodes # # Coercion nodes are special in that they are created during # the analyse_types phase of parse tree processing. # Their __init__ methods consequently incorporate some aspects # of that phase. # #------------------------------------------------------------------- class CoercionNode(ExprNode): # Abstract base class for coercion nodes. # # arg ExprNode node being coerced subexprs = ['arg'] constant_result = not_a_constant def __init__(self, arg): super(CoercionNode, self).__init__(arg.pos) self.arg = arg if debug_coercion: print("%s Coercing %s" % (self, self.arg)) def calculate_constant_result(self): # constant folding can break type coercion, so this is disabled pass def annotate(self, code): self.arg.annotate(code) if self.arg.type != self.type: file, line, col = self.pos code.annotate((file, line, col-1), AnnotationItem( style='coerce', tag='coerce', text='[%s] to [%s]' % (self.arg.type, self.type))) class CoerceToMemViewSliceNode(CoercionNode): """ Coerce an object to a memoryview slice. This holds a new reference in a managed temp. """ def __init__(self, arg, dst_type, env): assert dst_type.is_memoryviewslice assert not arg.type.is_memoryviewslice CoercionNode.__init__(self, arg) self.type = dst_type self.is_temp = 1 self.env = env self.use_managed_ref = True self.arg = arg def generate_result_code(self, code): self.type.create_from_py_utility_code(self.env) code.putln("%s = %s(%s);" % (self.result(), self.type.from_py_function, self.arg.py_result())) error_cond = self.type.error_condition(self.result()) code.putln(code.error_goto_if(error_cond, self.pos)) class CastNode(CoercionNode): # Wrap a node in a C type cast. def __init__(self, arg, new_type): CoercionNode.__init__(self, arg) self.type = new_type def may_be_none(self): return self.arg.may_be_none() def calculate_result_code(self): return self.arg.result_as(self.type) def generate_result_code(self, code): self.arg.generate_result_code(code) class PyTypeTestNode(CoercionNode): # This node is used to check that a generic Python # object is an instance of a particular extension type. # This node borrows the result of its argument node. exact_builtin_type = True def __init__(self, arg, dst_type, env, notnone=False): # The arg is know to be a Python object, and # the dst_type is known to be an extension type. assert dst_type.is_extension_type or dst_type.is_builtin_type, "PyTypeTest on non extension type" CoercionNode.__init__(self, arg) self.type = dst_type self.result_ctype = arg.ctype() self.notnone = notnone nogil_check = Node.gil_error gil_message = "Python type test" def analyse_types(self, env): return self def may_be_none(self): if self.notnone: return False return self.arg.may_be_none() def is_simple(self): return self.arg.is_simple() def result_in_temp(self): return self.arg.result_in_temp() def is_ephemeral(self): return self.arg.is_ephemeral() def nonlocally_immutable(self): return self.arg.nonlocally_immutable() def calculate_constant_result(self): # FIXME pass def calculate_result_code(self): return self.arg.result() def generate_result_code(self, code): if self.type.typeobj_is_available(): if self.type.is_builtin_type: type_test = self.type.type_test_code( self.arg.py_result(), self.notnone, exact=self.exact_builtin_type) else: type_test = self.type.type_test_code( self.arg.py_result(), self.notnone) code.globalstate.use_utility_code( UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c")) code.putln("if (!(%s)) %s" % ( type_test, code.error_goto(self.pos))) else: error(self.pos, "Cannot test type of extern C class " "without type object name specification") def generate_post_assignment_code(self, code): self.arg.generate_post_assignment_code(code) def free_temps(self, code): self.arg.free_temps(code) class NoneCheckNode(CoercionNode): # This node is used to check that a Python object is not None and # raises an appropriate exception (as specified by the creating # transform). is_nonecheck = True def __init__(self, arg, exception_type_cname, exception_message, exception_format_args): CoercionNode.__init__(self, arg) self.type = arg.type self.result_ctype = arg.ctype() self.exception_type_cname = exception_type_cname self.exception_message = exception_message self.exception_format_args = tuple(exception_format_args or ()) nogil_check = None # this node only guards an operation that would fail already def analyse_types(self, env): return self def may_be_none(self): return False def is_simple(self): return self.arg.is_simple() def result_in_temp(self): return self.arg.result_in_temp() def nonlocally_immutable(self): return self.arg.nonlocally_immutable() def calculate_result_code(self): return self.arg.result() def condition(self): if self.type.is_pyobject: return self.arg.py_result() elif self.type.is_memoryviewslice: return "((PyObject *) %s.memview)" % self.arg.result() else: raise Exception("unsupported type") def put_nonecheck(self, code): code.putln( "if (unlikely(%s == Py_None)) {" % self.condition()) if self.in_nogil_context: code.put_ensure_gil() escape = StringEncoding.escape_byte_string if self.exception_format_args: code.putln('PyErr_Format(%s, "%s", %s);' % ( self.exception_type_cname, StringEncoding.escape_byte_string( self.exception_message.encode('UTF-8')), ', '.join([ '"%s"' % escape(str(arg).encode('UTF-8')) for arg in self.exception_format_args ]))) else: code.putln('PyErr_SetString(%s, "%s");' % ( self.exception_type_cname, escape(self.exception_message.encode('UTF-8')))) if self.in_nogil_context: code.put_release_ensured_gil() code.putln(code.error_goto(self.pos)) code.putln("}") def generate_result_code(self, code): self.put_nonecheck(code) def generate_post_assignment_code(self, code): self.arg.generate_post_assignment_code(code) def free_temps(self, code): self.arg.free_temps(code) class CoerceToPyTypeNode(CoercionNode): # This node is used to convert a C data type # to a Python object. type = py_object_type is_temp = 1 def __init__(self, arg, env, type=py_object_type): if not arg.type.create_to_py_utility_code(env): error(arg.pos, "Cannot convert '%s' to Python object" % arg.type) elif arg.type.is_complex: # special case: complex coercion is so complex that it # uses a macro ("__pyx_PyComplex_FromComplex()"), for # which the argument must be simple arg = arg.coerce_to_simple(env) CoercionNode.__init__(self, arg) if type is py_object_type: # be specific about some known types if arg.type.is_string or arg.type.is_cpp_string: self.type = default_str_type(env) elif arg.type.is_pyunicode_ptr or arg.type.is_unicode_char: self.type = unicode_type elif arg.type.is_complex: self.type = Builtin.complex_type elif arg.type.is_string or arg.type.is_cpp_string: if (type not in (bytes_type, bytearray_type) and not env.directives['c_string_encoding']): error(arg.pos, "default encoding required for conversion from '%s' to '%s'" % (arg.type, type)) self.type = type else: # FIXME: check that the target type and the resulting type are compatible pass if arg.type.is_memoryviewslice: # Register utility codes at this point arg.type.get_to_py_function(env, arg) self.env = env gil_message = "Converting to Python object" def may_be_none(self): # FIXME: is this always safe? return False def coerce_to_boolean(self, env): arg_type = self.arg.type if (arg_type == PyrexTypes.c_bint_type or (arg_type.is_pyobject and arg_type.name == 'bool')): return self.arg.coerce_to_temp(env) else: return CoerceToBooleanNode(self, env) def coerce_to_integer(self, env): # If not already some C integer type, coerce to longint. if self.arg.type.is_int: return self.arg else: return self.arg.coerce_to(PyrexTypes.c_long_type, env) def analyse_types(self, env): # The arg is always already analysed return self def generate_result_code(self, code): arg_type = self.arg.type if arg_type.is_memoryviewslice: funccall = arg_type.get_to_py_function(self.env, self.arg) else: func = arg_type.to_py_function if arg_type.is_string or arg_type.is_cpp_string: if self.type in (bytes_type, str_type, unicode_type): func = func.replace("Object", self.type.name.title()) elif self.type is bytearray_type: func = func.replace("Object", "ByteArray") funccall = "%s(%s)" % (func, self.arg.result()) code.putln('%s = %s; %s' % ( self.result(), funccall, code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) class CoerceIntToBytesNode(CoerceToPyTypeNode): # This node is used to convert a C int type to a Python bytes # object. is_temp = 1 def __init__(self, arg, env): arg = arg.coerce_to_simple(env) CoercionNode.__init__(self, arg) self.type = Builtin.bytes_type def generate_result_code(self, code): arg = self.arg arg_result = arg.result() if arg.type not in (PyrexTypes.c_char_type, PyrexTypes.c_uchar_type, PyrexTypes.c_schar_type): if arg.type.signed: code.putln("if ((%s < 0) || (%s > 255)) {" % ( arg_result, arg_result)) else: code.putln("if (%s > 255) {" % arg_result) code.putln('PyErr_SetString(PyExc_OverflowError, ' '"value too large to pack into a byte"); %s' % ( code.error_goto(self.pos))) code.putln('}') temp = None if arg.type is not PyrexTypes.c_char_type: temp = code.funcstate.allocate_temp(PyrexTypes.c_char_type, manage_ref=False) code.putln("%s = (char)%s;" % (temp, arg_result)) arg_result = temp code.putln('%s = PyBytes_FromStringAndSize(&%s, 1); %s' % ( self.result(), arg_result, code.error_goto_if_null(self.result(), self.pos))) if temp is not None: code.funcstate.release_temp(temp) code.put_gotref(self.py_result()) class CoerceFromPyTypeNode(CoercionNode): # This node is used to convert a Python object # to a C data type. def __init__(self, result_type, arg, env): CoercionNode.__init__(self, arg) self.type = result_type self.is_temp = 1 if not result_type.create_from_py_utility_code(env): error(arg.pos, "Cannot convert Python object to '%s'" % result_type) if self.type.is_string or self.type.is_pyunicode_ptr: if self.arg.is_ephemeral(): error(arg.pos, "Obtaining '%s' from temporary Python value" % result_type) elif self.arg.is_name and self.arg.entry and self.arg.entry.is_pyglobal: warning(arg.pos, "Obtaining '%s' from externally modifiable global Python value" % result_type, level=1) def analyse_types(self, env): # The arg is always already analysed return self def generate_result_code(self, code): function = self.type.from_py_function operand = self.arg.py_result() rhs = "%s(%s)" % (function, operand) if self.type.is_enum: rhs = typecast(self.type, c_long_type, rhs) code.putln('%s = %s; %s' % ( self.result(), rhs, code.error_goto_if(self.type.error_condition(self.result()), self.pos))) if self.type.is_pyobject: code.put_gotref(self.py_result()) def nogil_check(self, env): error(self.pos, "Coercion from Python not allowed without the GIL") class CoerceToBooleanNode(CoercionNode): # This node is used when a result needs to be used # in a boolean context. type = PyrexTypes.c_bint_type _special_builtins = { Builtin.list_type : 'PyList_GET_SIZE', Builtin.tuple_type : 'PyTuple_GET_SIZE', Builtin.bytes_type : 'PyBytes_GET_SIZE', Builtin.unicode_type : 'PyUnicode_GET_SIZE', } def __init__(self, arg, env): CoercionNode.__init__(self, arg) if arg.type.is_pyobject: self.is_temp = 1 def nogil_check(self, env): if self.arg.type.is_pyobject and self._special_builtins.get(self.arg.type) is None: self.gil_error() gil_message = "Truth-testing Python object" def check_const(self): if self.is_temp: self.not_const() return False return self.arg.check_const() def calculate_result_code(self): return "(%s != 0)" % self.arg.result() def generate_result_code(self, code): if not self.is_temp: return test_func = self._special_builtins.get(self.arg.type) if test_func is not None: code.putln("%s = (%s != Py_None) && (%s(%s) != 0);" % ( self.result(), self.arg.py_result(), test_func, self.arg.py_result())) else: code.putln( "%s = __Pyx_PyObject_IsTrue(%s); %s" % ( self.result(), self.arg.py_result(), code.error_goto_if_neg(self.result(), self.pos))) class CoerceToComplexNode(CoercionNode): def __init__(self, arg, dst_type, env): if arg.type.is_complex: arg = arg.coerce_to_simple(env) self.type = dst_type CoercionNode.__init__(self, arg) dst_type.create_declaration_utility_code(env) def calculate_result_code(self): if self.arg.type.is_complex: real_part = "__Pyx_CREAL(%s)" % self.arg.result() imag_part = "__Pyx_CIMAG(%s)" % self.arg.result() else: real_part = self.arg.result() imag_part = "0" return "%s(%s, %s)" % ( self.type.from_parts, real_part, imag_part) def generate_result_code(self, code): pass class CoerceToTempNode(CoercionNode): # This node is used to force the result of another node # to be stored in a temporary. It is only used if the # argument node's result is not already in a temporary. def __init__(self, arg, env): CoercionNode.__init__(self, arg) self.type = self.arg.type.as_argument_type() self.constant_result = self.arg.constant_result self.is_temp = 1 if self.type.is_pyobject: self.result_ctype = py_object_type gil_message = "Creating temporary Python reference" def analyse_types(self, env): # The arg is always already analysed return self def coerce_to_boolean(self, env): self.arg = self.arg.coerce_to_boolean(env) if self.arg.is_simple(): return self.arg self.type = self.arg.type self.result_ctype = self.type return self def generate_result_code(self, code): #self.arg.generate_evaluation_code(code) # Already done # by generic generate_subexpr_evaluation_code! code.putln("%s = %s;" % ( self.result(), self.arg.result_as(self.ctype()))) if self.use_managed_ref: if self.type.is_pyobject: code.put_incref(self.result(), self.ctype()) elif self.type.is_memoryviewslice: code.put_incref_memoryviewslice(self.result(), not self.in_nogil_context) class ProxyNode(CoercionNode): """ A node that should not be replaced by transforms or other means, and hence can be useful to wrap the argument to a clone node MyNode -> ProxyNode -> ArgNode CloneNode -^ """ nogil_check = None def __init__(self, arg): super(ProxyNode, self).__init__(arg) self.constant_result = arg.constant_result self._proxy_type() def analyse_expressions(self, env): self.arg = self.arg.analyse_expressions(env) self._proxy_type() return self def _proxy_type(self): if hasattr(self.arg, 'type'): self.type = self.arg.type self.result_ctype = self.arg.result_ctype if hasattr(self.arg, 'entry'): self.entry = self.arg.entry def generate_result_code(self, code): self.arg.generate_result_code(code) def result(self): return self.arg.result() def is_simple(self): return self.arg.is_simple() def may_be_none(self): return self.arg.may_be_none() def generate_evaluation_code(self, code): self.arg.generate_evaluation_code(code) def generate_result_code(self, code): self.arg.generate_result_code(code) def generate_disposal_code(self, code): self.arg.generate_disposal_code(code) def free_temps(self, code): self.arg.free_temps(code) class CloneNode(CoercionNode): # This node is employed when the result of another node needs # to be used multiple times. The argument node's result must # be in a temporary. This node "borrows" the result from the # argument node, and does not generate any evaluation or # disposal code for it. The original owner of the argument # node is responsible for doing those things. subexprs = [] # Arg is not considered a subexpr nogil_check = None def __init__(self, arg): CoercionNode.__init__(self, arg) self.constant_result = arg.constant_result if hasattr(arg, 'type'): self.type = arg.type self.result_ctype = arg.result_ctype if hasattr(arg, 'entry'): self.entry = arg.entry def result(self): return self.arg.result() def may_be_none(self): return self.arg.may_be_none() def type_dependencies(self, env): return self.arg.type_dependencies(env) def infer_type(self, env): return self.arg.infer_type(env) def analyse_types(self, env): self.type = self.arg.type self.result_ctype = self.arg.result_ctype self.is_temp = 1 if hasattr(self.arg, 'entry'): self.entry = self.arg.entry return self def is_simple(self): return True # result is always in a temp (or a name) def generate_evaluation_code(self, code): pass def generate_result_code(self, code): pass def generate_disposal_code(self, code): pass def free_temps(self, code): pass class CMethodSelfCloneNode(CloneNode): # Special CloneNode for the self argument of builtin C methods # that accepts subtypes of the builtin type. This is safe only # for 'final' subtypes, as subtypes of the declared type may # override the C method. def coerce_to(self, dst_type, env): if dst_type.is_builtin_type and self.type.subtype_of(dst_type): return self return CloneNode.coerce_to(self, dst_type, env) class ModuleRefNode(ExprNode): # Simple returns the module object type = py_object_type is_temp = False subexprs = [] def analyse_types(self, env): return self def may_be_none(self): return False def calculate_result_code(self): return Naming.module_cname def generate_result_code(self, code): pass class DocstringRefNode(ExprNode): # Extracts the docstring of the body element subexprs = ['body'] type = py_object_type is_temp = True def __init__(self, pos, body): ExprNode.__init__(self, pos) assert body.type.is_pyobject self.body = body def analyse_types(self, env): return self def generate_result_code(self, code): code.putln('%s = __Pyx_GetAttr(%s, %s); %s' % ( self.result(), self.body.result(), code.intern_identifier(StringEncoding.EncodedString("__doc__")), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.result()) #------------------------------------------------------------------------------------ # # Runtime support code # #------------------------------------------------------------------------------------ pyerr_occurred_withgil_utility_code= UtilityCode( proto = """ static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void); /* proto */ """, impl = """ static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void) { int err; #ifdef WITH_THREAD PyGILState_STATE _save = PyGILState_Ensure(); #endif err = !!PyErr_Occurred(); #ifdef WITH_THREAD PyGILState_Release(_save); #endif return err; } """ ) #------------------------------------------------------------------------------------ raise_unbound_local_error_utility_code = UtilityCode( proto = """ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); """, impl = """ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } """) raise_closure_name_error_utility_code = UtilityCode( proto = """ static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname); """, impl = """ static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname) { PyErr_Format(PyExc_NameError, "free variable '%s' referenced before assignment in enclosing scope", varname); } """) # Don't inline the function, it should really never be called in production raise_unbound_memoryview_utility_code_nogil = UtilityCode( proto = """ static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname); """, impl = """ static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname) { #ifdef WITH_THREAD PyGILState_STATE gilstate = PyGILState_Ensure(); #endif __Pyx_RaiseUnboundLocalError(varname); #ifdef WITH_THREAD PyGILState_Release(gilstate); #endif } """, requires = [raise_unbound_local_error_utility_code]) #------------------------------------------------------------------------------------ raise_too_many_values_to_unpack = UtilityCode.load_cached("RaiseTooManyValuesToUnpack", "ObjectHandling.c") raise_need_more_values_to_unpack = UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c") tuple_unpacking_error_code = UtilityCode.load_cached("UnpackTupleError", "ObjectHandling.c") #------------------------------------------------------------------------------------ int_pow_utility_code = UtilityCode( proto=""" static CYTHON_INLINE %(type)s %(func_name)s(%(type)s, %(type)s); /* proto */ """, impl=""" static CYTHON_INLINE %(type)s %(func_name)s(%(type)s b, %(type)s e) { %(type)s t = b; switch (e) { case 3: t *= b; case 2: t *= b; case 1: return t; case 0: return 1; } #if %(signed)s if (unlikely(e<0)) return 0; #endif t = 1; while (likely(e)) { t *= (b * (e&1)) | ((~e)&1); /* 1 or b */ b *= b; e >>= 1; } return t; } """) # ------------------------------ Division ------------------------------------ div_int_utility_code = UtilityCode( proto=""" static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s, %(type)s); /* proto */ """, impl=""" static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s a, %(type)s b) { %(type)s q = a / b; %(type)s r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } """) mod_int_utility_code = UtilityCode( proto=""" static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */ """, impl=""" static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) { %(type)s r = a %% b; r += ((r != 0) & ((r ^ b) < 0)) * b; return r; } """) mod_float_utility_code = UtilityCode( proto=""" static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */ """, impl=""" static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) { %(type)s r = fmod%(math_h_modifier)s(a, b); r += ((r != 0) & ((r < 0) ^ (b < 0))) * b; return r; } """) cdivision_warning_utility_code = UtilityCode( proto=""" static int __Pyx_cdivision_warning(const char *, int); /* proto */ """, impl=""" static int __Pyx_cdivision_warning(const char *filename, int lineno) { #if CYTHON_COMPILING_IN_PYPY filename++; // avoid compiler warnings lineno++; return PyErr_Warn(PyExc_RuntimeWarning, "division with oppositely signed operands, C and Python semantics differ"); #else return PyErr_WarnExplicit(PyExc_RuntimeWarning, "division with oppositely signed operands, C and Python semantics differ", filename, lineno, __Pyx_MODULE_NAME, NULL); #endif } """) # from intobject.c division_overflow_test_code = UtilityCode( proto=""" #define UNARY_NEG_WOULD_OVERFLOW(x) \ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) """)
bsd-3-clause
xguse/bioconda-recipes
recipes/phylip/drawtree.py
57
3832
#!/usr/bin/env python # # Wrapper script for Java Conda packages that ensures that the java runtime # is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128). # # Program Parameters # import os import subprocess import sys import shutil from os import access from os import getenv from os import X_OK jar_file = 'DrawTree.jar' import re default_jvm_mem_opts = ['-Xms512m', '-Xmx1g'] # !!! End of parameter section. No user-serviceable code below this line !!! def real_dirname(path): """Return the symlink-resolved, canonicalized directory-portion of path.""" return os.path.dirname(os.path.realpath(path)) def java_executable(): """Return the executable name of the Java interpreter.""" java_home = getenv('JAVA_HOME') java_bin = os.path.join('bin', 'java') if java_home and access(os.path.join(java_home, java_bin), X_OK): return os.path.join(java_home, java_bin) else: return 'java' def jvm_opts(argv): """Construct list of Java arguments based on our argument list. The argument list passed in argv must not include the script name. The return value is a 3-tuple lists of strings of the form: (memory_options, prop_options, passthrough_options) """ mem_opts = [] prop_opts = [] pass_args = [] exec_dir = None for arg in argv: if arg.startswith('-D'): prop_opts.append(arg) elif arg.startswith('-XX'): prop_opts.append(arg) elif arg.startswith('-Xm'): mem_opts.append(arg) elif arg.startswith('--exec_dir='): exec_dir = arg.split('=')[1].strip('"').strip("'") if not os.path.exists(exec_dir): shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None) else: pass_args.append(arg) # In the original shell script the test coded below read: # if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ] # To reproduce the behaviour of the above shell code fragment # it is important to explictly check for equality with None # in the second condition, so a null envar value counts as True! if mem_opts == [] and getenv('_JAVA_OPTIONS') is None: mem_opts = default_jvm_mem_opts return (mem_opts, prop_opts, pass_args, exec_dir) def main(): java = java_executable() """ this program updates files relative to the path of the jar file. In a multiuser setting, the option --exec_dir="exec_dir" can be used as the location for the phylip distribution. If the exec_dir does not exist, we copy the jar file, lib, and resources to the exec_dir directory. """ (mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:]) jar_dir = "{jd}/java".format(jd=exec_dir if exec_dir else real_dirname(sys.argv[0])) if pass_args != [] and pass_args[0].startswith('eu'): jar_arg = '-cp' else: jar_arg = '-jar' jar_path = os.path.join(jar_dir, jar_file) java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args # Trying to fix font problems. Phylip appears to require that fonts # are placed in the working directory Current workaround is to # temporarily link them there and then remove links. Far from # optimal solution, but I can't get anything else to work:( subprocess.call("ln -s {fd}/* .".format(fd=re.sub("java","fonts",jar_dir)), shell=True) # execute jar-file, record signal sig=subprocess.call(java_args) # remove links subprocess.call(["rm -f font[1-6] fontfile"], shell=True) # return signal from jar-file sys.exit(sig) if __name__ == '__main__': main()
mit
lithint/pyGT511C3
FPS.py
1
30349
''' Created on 21/03/2014 @author: Jean Machuca <correojean@gmail.com> @jeanmachuca ''' import os import serial from serial.tools import list_ports import time import binascii def delay(seconds): ''' wait a number of secons ''' time.sleep(seconds) def serial_ports(): ''' Returns a generator for all available serial ports ''' if os.name == 'nt': # windows _comports = [port for port in list_ports.comports()] if _comports.__len__()>0: _serial_ports = [p for p in _comports[0]] else: _serial_ports = [] else: # unix _serial_ports = [port[0] for port in list_ports.comports()] return _serial_ports def devices(index=None): ''' device's list :param index if this param is not None, then returns the device name of the index in the list ''' portList = [portName for portName in serial_ports()] return portList if index is None else portList[index] if os.name == 'nt': DEVICE_NAME = 'COM3' else: DEVICE_NAME = '/dev/cu.usbserial-A601EQ14' #default device to use def isFingerPrintConnected(is_com=True): ''' Detect if the fingerprint device is present in the device list, only for com ports ''' return True if (not is_com) or devices().__contains__(DEVICE_NAME) else False class Packet: ''' Generic Internal Packet Class ''' COMMAND_START_CODE_1 = 0x55; # Static byte to mark the beginning of a command packet - never changes COMMAND_START_CODE_2 = 0xAA; # Static byte to mark the beginning of a command packet - never changes COMMAND_DEVICE_ID_1 = 0x01; # Device ID Byte 1 (lesser byte) - theoretically never changes COMMAND_DEVICE_ID_2 = 0x00; # Device ID Byte 2 (greater byte) - theoretically never changes def GetHighByte(self, w): ''' Returns the high byte from a word ''' return (w>>8)&0x00FF def GetLowByte(self, w): ''' Returns the low byte from a word ''' return w&0x00FF def CalculateCheckSum(self,bytearr): return sum(map(ord,bytes(bytearr))) def serializeToSend(self,bytearr): return ' '.join(binascii.hexlify(ch) for ch in bytes(bytearr)) class Command_Packet(Packet): ''' Command Packet Class Used to build the serial message ''' command = bytearray(2) cmd = '' commands = { 'NotSet' : 0x00, # Default value for enum. Scanner will return error if sent this. 'Open' : 0x01, # Open Initialization 'Close' : 0x02, # Close Termination 'UsbInternalCheck' : 0x03, # UsbInternalCheck Check if the connected USB device is valid 'ChangeBaudrate' : 0x04, # ChangeBaudrate Change UART baud rate 'SetIAPMode' : 0x05, # SetIAPMode Enter IAP Mode In this mode, FW Upgrade is available 'CmosLed' : 0x12, # CmosLed Control CMOS LED 'GetEnrollCount' : 0x20, # Get enrolled fingerprint count 'CheckEnrolled' : 0x21, # Check whether the specified ID is already enrolled 'EnrollStart' : 0x22, # Start an enrollment 'Enroll1' : 0x23, # Make 1st template for an enrollment 'Enroll2' : 0x24, # Make 2nd template for an enrollment 'Enroll3' : 0x25, # Make 3rd template for an enrollment, merge three templates into one template, save merged template to the database 'IsPressFinger' : 0x26, # Check if a finger is placed on the sensor 'DeleteID' : 0x40, # Delete the fingerprint with the specified ID 'DeleteAll' : 0x41, # Delete all fingerprints from the database 'Verify1_1' : 0x50, # Verification of the capture fingerprint image with the specified ID 'Identify1_N' : 0x51, # Identification of the capture fingerprint image with the database 'VerifyTemplate1_1' : 0x52, # Verification of a fingerprint template with the specified ID 'IdentifyTemplate1_N' : 0x53, # Identification of a fingerprint template with the database 'CaptureFinger' : 0x60, # Capture a fingerprint image(256x256) from the sensor 'MakeTemplate' : 0x61, # Make template for transmission 'GetImage' : 0x62, # Download the captured fingerprint image(256x256) 'GetRawImage' : 0x63, # Capture & Download raw fingerprint image(320x240) 'GetTemplate' : 0x70, # Download the template of the specified ID 'SetTemplate' : 0x71, # Upload the template of the specified ID 'GetDatabaseStart' : 0x72, # Start database download, obsolete 'GetDatabaseEnd' : 0x73, # End database download, obsolete 'UpgradeFirmware' : 0x80, # Not supported 'UpgradeISOCDImage' : 0x81, # Not supported 'Ack' : 0x30, # Acknowledge. 'Nack' : 0x31 # Non-acknowledge } def __init__(self,*args,**kwargs): ''' Command Packet Constructor ''' commandName=args[0] kwargs.setdefault('UseSerialDebug', True) self.UseSerialDebug= kwargs['UseSerialDebug'] if self.UseSerialDebug: print 'Command: %s' % commandName self.cmd = self.commands[commandName] UseSerialDebug = True Parameter = bytearray(4) def GetPacketBytes(self): ''' returns the 12 bytes of the generated command packet remember to call delete on the returned array ''' self.command[0] = self.GetLowByte(self.cmd) self.command[1] = self.GetHighByte(self.cmd) packetbytes= bytearray(12) packetbytes[0] = self.COMMAND_START_CODE_1 packetbytes[1] = self.COMMAND_START_CODE_2 packetbytes[2] = self.COMMAND_DEVICE_ID_1 packetbytes[3] = self.COMMAND_DEVICE_ID_2 packetbytes[4] = self.Parameter[0] packetbytes[5] = self.Parameter[1] packetbytes[6] = self.Parameter[2] packetbytes[7] = self.Parameter[3] packetbytes[8] = self.command[0] packetbytes[9] = self.command[1] chksum = self.CalculateCheckSum(packetbytes[0:9]) packetbytes[10] = self.GetLowByte(chksum) packetbytes[11] = self.GetHighByte(chksum) return packetbytes; def ParameterFromInt(self, i): ''' Converts the int to bytes and puts them into the paramter array ''' self.Parameter[0] = (i & 0x000000ff); self.Parameter[1] = (i & 0x0000ff00) >> 8; self.Parameter[2] = (i & 0x00ff0000) >> 16; self.Parameter[3] = (i & 0xff000000) >> 24; class Response_Packet(Packet): ''' Response Packet Class ''' errors = { 'NO_ERROR' : 0x0000, # Default value. no error 'NACK_TIMEOUT' : 0x1001, # Obsolete, capture timeout 'NACK_INVALID_BAUDRATE' : 0x1002, # Obsolete, Invalid serial baud rate 'NACK_INVALID_POS' : 0x1003, # The specified ID is not between 0~199 'NACK_IS_NOT_USED' : 0x1004, # The specified ID is not used 'NACK_IS_ALREADY_USED' : 0x1005, # The specified ID is already used 'NACK_COMM_ERR' : 0x1006, # Communication Error 'NACK_VERIFY_FAILED' : 0x1007, # 1:1 Verification Failure 'NACK_IDENTIFY_FAILED' : 0x1008, # 1:N Identification Failure 'NACK_DB_IS_FULL' : 0x1009, # The database is full 'NACK_DB_IS_EMPTY' : 0x100A, # The database is empty 'NACK_TURN_ERR' : 0x100B, # Obsolete, Invalid order of the enrollment (The order was not as: EnrollStart -> Enroll1 -> Enroll2 -> Enroll3) 'NACK_BAD_FINGER' : 0x100C, # Too bad fingerprint 'NACK_ENROLL_FAILED' : 0x100D, # Enrollment Failure 'NACK_IS_NOT_SUPPORTED' : 0x100E, # The specified command is not supported 'NACK_DEV_ERR' : 0x100F, # Device Error, especially if Crypto-Chip is trouble 'NACK_CAPTURE_CANCELED' : 0x1010, # Obsolete, The capturing is canceled 'NACK_INVALID_PARAM' : 0x1011, # Invalid parameter 'NACK_FINGER_IS_NOT_PRESSED' : 0x1012, # Finger is not pressed 'INVALID' : 0XFFFF # Used when parsing fails } def __init__(self,_buffer=None,UseSerialDebug=False): ''' creates and parses a response packet from the finger print scanner ''' self.UseSerialDebug= UseSerialDebug if not (_buffer is None ): self.RawBytes = _buffer self._lastBuffer = bytes(_buffer) if self.UseSerialDebug: print 'readed: %s'% self.serializeToSend(_buffer) if _buffer.__len__()>=12: self.ACK = True if _buffer[8] == 0x30 else False self.ParameterBytes[0] = _buffer[4] self.ParameterBytes[1] = _buffer[5] self.ParameterBytes[2] = _buffer[6] self.ParameterBytes[3] = _buffer[7] self.ResponseBytes[0] = _buffer[8] self.ResponseBytes[1] = _buffer[9] self.Error = self.ParseFromBytes(self.GetHighByte(_buffer[5]),self.GetLowByte(_buffer[4])) _lastBuffer = bytes() RawBytes = bytearray(12) ParameterBytes=bytearray(4) ResponseBytes=bytearray(2) ACK = False Error = None UseSerialDebug = True def ParseFromBytes(self,high,low): ''' parses bytes into one of the possible errors from the finger print scanner ''' e = 'INVALID' if high == 0x01: if low in self.errors.values(): errorIndex = self.errors.values().index(low) e = self.errors.keys()[errorIndex] return e def IntFromParameter(self): retval = 0; retval = (retval << 8) + self.ParameterBytes[3]; retval = (retval << 8) + self.ParameterBytes[2]; retval = (retval << 8) + self.ParameterBytes[1]; retval = (retval << 8) + self.ParameterBytes[0]; return retval; class SerialCommander: ''' Serializes the args to hex to send to serial port ''' def __serialize_args_hex__(self,*arg,**kwargs): return bytes(bytearray([v for v in kwargs.values()])) def serializeToSend(self,bytearr): return ' '.join(binascii.hexlify(ch) for ch in bytes(bytearr)) def unserializeFromRead(self,char_readed,bytearr): bytearr.append(char_readed) return bytearr def connect(device_name=None,baud=None,timeout=None,is_com=True): _ser = None if device_name is None: device_name = DEVICE_NAME else: DEVICE_NAME = device_name if baud is None: baud=9600 if timeout is None: timeout = 10000 if isFingerPrintConnected(is_com): try: _ser = serial.Serial(device_name,baudrate=baud,timeout=timeout) if not _ser.isOpen(): _ser.open() except Exception,e: print '[Connect] No es posible conectar al dispositivo %s' % (str(e)) pass return _ser #BAUD = 115200 BAUD = 9600 class FPS_GT511C3(SerialCommander): _serial = None _lastResponse = None _device_name = None _baud = None _timeout= None ''' # Enables verbose debug output using hardware Serial ''' UseSerialDebug = True def __init__(self,device_name=None,baud=None,timeout=None,is_com=True): ''' Creates a new object to interface with the fingerprint scanner ''' self._device_name = device_name self._baud=baud self._timeout = timeout self._serial = connect(device_name,baud,timeout,is_com=is_com) if not self._serial is None: delay(0.1) self.Open() elif self.UseSerialDebug: print '[FPS_GT511C3] No es posible conectar con el dispositivo %s' % self._device_name def Open(self): ''' Initialises the device and gets ready for commands ''' self.ChangeBaudRate(BAUD) delay(0.1) cp = Command_Packet('Open',UseSerialDebug=self.UseSerialDebug) cp.ParameterFromInt(1) packetbytes = cp.GetPacketBytes() self.SendCommand(packetbytes, 12) rp = self.GetResponse() del packetbytes return rp.ACK def Close(self): ''' Does not actually do anything (according to the datasheet) I implemented open, so had to do closed too... lol ''' cp = Command_Packet('Close',UseSerialDebug=self.UseSerialDebug) cp.Parameter[0] = 0x00; cp.Parameter[1] = 0x00; cp.Parameter[2] = 0x00; cp.Parameter[3] = 0x00; packetbytes = cp.GetPacketBytes() self.SendCommand(packetbytes, 12) rp = self.GetResponse() if not self._serial is None: self._serial.close() del packetbytes return rp.ACK def SetLED(self,on=True): ''' Turns on or off the LED backlight LED must be on to see fingerprints Parameter: true turns on the backlight, false turns it off Returns: True if successful, false if not ''' cp = Command_Packet('CmosLed',UseSerialDebug=self.UseSerialDebug) cp.Parameter[0] = 0x01 if on else 0x00; cp.Parameter[1] = 0x00; cp.Parameter[2] = 0x00; cp.Parameter[3] = 0x00; packetbytes = cp.GetPacketBytes() self.SendCommand(packetbytes, 12) rp = self.GetResponse() retval = rp.ACK del rp del packetbytes return retval def ChangeBaudRate(self,baud): ''' Changes the baud rate of the connection Parameter: 9600 - 115200 Returns: True if success, false if invalid baud NOTE: Untested (don't have a logic level changer and a voltage divider is too slow) ''' retval = False if baud <> self._serial.getBaudrate(): cp = Command_Packet('ChangeBaudrate',UseSerialDebug=self.UseSerialDebug) cp.ParameterFromInt(baud) packetbytes = cp.GetPacketBytes() self.SendCommand(packetbytes, 12) delay(0.5) rp = self.GetResponse() delay(0.5) retval = rp.ACK if retval: if self.UseSerialDebug: print 'Changing port baudrate' self._serial.close() BAUD = baud self._serial = connect(self._device_name,self._baud,self._timeout) del rp del packetbytes return retval def GetEnrollCount(self): ''' Gets the number of enrolled fingerprints Return: The total number of enrolled fingerprints ''' cp = Command_Packet('GetEnrollCount',UseSerialDebug=self.UseSerialDebug) cp.Parameter[0] = 0x00 cp.Parameter[1] = 0x00 cp.Parameter[2] = 0x00 cp.Parameter[3] = 0x00 packetbytes = cp.GetPacketBytes() self.SendCommand(packetbytes, 12) rp = self.GetResponse() retval = rp.IntFromParameter() del rp del packetbytes return retval def CheckEnrolled(self,ID): ''' checks to see if the ID number is in use or not Parameter: 0-199 Return: True if the ID number is enrolled, false if not ''' cp = Command_Packet('CheckEnrolled',UseSerialDebug=self.UseSerialDebug) cp.ParameterFromInt(ID) packetbytes = cp.GetPacketBytes() del cp self.SendCommand(packetbytes, 12) del packetbytes rp = self.GetResponse() retval = rp.ACK del rp return retval def EnrollStart(self,ID): ''' Starts the Enrollment Process Parameter: 0-199 Return: 0 - ACK 1 - Database is full 2 - Invalid Position 3 - Position(ID) is already used ''' cp = Command_Packet('EnrollStart',UseSerialDebug=self.UseSerialDebug) cp.ParameterFromInt(ID) packetbytes = cp.GetPacketBytes() del cp self.SendCommand(packetbytes, 12) del packetbytes rp = self.GetResponse() retval = 0 if not rp.ACK: if rp.Error == rp.errors['NACK_DB_IS_FULL']: retval = 1 elif rp.Error == rp.errors['NACK_INVALID_POS']: retval = 2 elif rp.Error == rp.errors['NACK_IS_ALREADY_USED']: retval = 3 del rp return retval def Enroll1(self): ''' Gets the first scan of an enrollment Return: 0 - ACK 1 - Enroll Failed 2 - Bad finger 3 - ID in use ''' cp = Command_Packet('Enroll1',UseSerialDebug=self.UseSerialDebug) packetbytes = cp.GetPacketBytes() del cp self.SendCommand(packetbytes, 12) del packetbytes rp = self.GetResponse() retval = rp.IntFromParameter() retval = 3 if retval < 200 else 0 if not rp.ACK: if rp.Error == rp.errors['NACK_ENROLL_FAILED']: retval = 1 elif rp.Error == rp.errors['NACK_BAD_FINGER']: retval = 2 return 0 if rp.ACK else retval def Enroll2(self): ''' Gets the Second scan of an enrollment Return: 0 - ACK 1 - Enroll Failed 2 - Bad finger 3 - ID in use ''' cp = Command_Packet('Enroll2',UseSerialDebug=self.UseSerialDebug) packetbytes = cp.GetPacketBytes() del cp self.SendCommand(packetbytes, 12) del packetbytes rp = self.GetResponse() retval = rp.IntFromParameter() retval = 3 if retval < 200 else 0 if not rp.ACK: if rp.Error == rp.errors['NACK_ENROLL_FAILED']: retval = 1 elif rp.Error == rp.errors['NACK_BAD_FINGER']: retval = 2 return 0 if rp.ACK else retval def Enroll3(self): ''' Gets the Third scan of an enrollment Finishes Enrollment Return: 0 - ACK 1 - Enroll Failed 2 - Bad finger 3 - ID in use ''' cp = Command_Packet('Enroll3',UseSerialDebug=self.UseSerialDebug) packetbytes = cp.GetPacketBytes() del cp self.SendCommand(packetbytes, 12) del packetbytes rp = self.GetResponse() retval = rp.IntFromParameter() retval = 3 if retval < 200 else 0 if not rp.ACK: if rp.Error == rp.errors['NACK_ENROLL_FAILED']: retval = 1 elif rp.Error == rp.errors['NACK_BAD_FINGER']: retval = 2 return 0 if rp.ACK else retval def IsPressFinger(self): ''' Checks to see if a finger is pressed on the FPS Return: true if finger pressed, false if not ''' cp = Command_Packet('IsPressFinger',UseSerialDebug=self.UseSerialDebug) packetbytes = cp.GetPacketBytes() self.SendCommand(packetbytes, 12) rp = self.GetResponse() pval = rp.ParameterBytes[0] pval += rp.ParameterBytes[1] pval += rp.ParameterBytes[2] pval += rp.ParameterBytes[3] retval = True if pval == 0 else False del rp del packetbytes del cp return retval def DeleteID(self,ID): ''' Deletes the specified ID (enrollment) from the database Returns: true if successful, false if position invalid ''' cp = Command_Packet('DeleteID',UseSerialDebug=self.UseSerialDebug) cp.ParameterFromInt(ID) packetbytes = cp.GetPacketBytes() self.SendCommand(packetbytes, 12) rp = self.GetResponse() retval = rp.ACK del rp del packetbytes del cp return retval def DeleteAll(self): ''' Deletes all IDs (enrollments) from the database Returns: true if successful, false if db is empty ''' cp = Command_Packet('DeleteAll',UseSerialDebug=self.UseSerialDebug) packetbytes = cp.GetPacketBytes() self.SendCommand(packetbytes, 12) rp = self.GetResponse() retval = rp.ACK del rp del packetbytes del cp return retval def Verify1_1(self,ID): ''' Checks the currently pressed finger against a specific ID Parameter: 0-199 (id number to be checked) Returns: 0 - Verified OK (the correct finger) 1 - Invalid Position 2 - ID is not in use 3 - Verified FALSE (not the correct finger) ''' cp = Command_Packet('Verify1_1',UseSerialDebug=self.UseSerialDebug) cp.ParameterFromInt(ID) packetbytes = cp.GetPacketBytes() self.SendCommand(packetbytes, 12) rp = self.GetResponse() retval = 0 if not rp.ACK: if rp.Error == rp.errors['NACK_INVALID_POS']: retval = 1 elif rp.Error == rp.errors['NACK_IS_NOT_USED']: retval = 2 elif rp.Error == rp.errors['NACK_VERIFY_FAILED']: retval = 3 del rp del packetbytes del cp return retval def Identify1_N(self): ''' Checks the currently pressed finger against all enrolled fingerprints Returns: 0-199: Verified against the specified ID (found, and here is the ID number) 200: Failed to find the fingerprint in the database ''' cp = Command_Packet('Identify1_N',UseSerialDebug=self.UseSerialDebug) packetbytes = cp.GetPacketBytes() self.SendCommand(packetbytes, 12) rp = self.GetResponse() retval = rp.IntFromParameter() if retval > 200: retval = 200 del rp del packetbytes del cp return retval def CaptureFinger(self,highquality=True): ''' Captures the currently pressed finger into onboard ram Parameter: true for high quality image(slower), false for low quality image (faster) Generally, use high quality for enrollment, and low quality for verification/identification Returns: True if ok, false if no finger pressed ''' cp = Command_Packet('CaptureFinger',UseSerialDebug=self.UseSerialDebug) cp.ParameterFromInt(1 if highquality else 0) packetbytes = cp.GetPacketBytes() self.SendCommand(packetbytes, 12) rp = self.GetResponse() retval = rp.ACK del rp del packetbytes del cp return retval def GetImage(self): ''' Gets an image that is 258x202 (52116 bytes) and returns it in 407 Data_Packets Use StartDataDownload, and then GetNextDataPacket until done Returns: True (device confirming download starting) ''' cp = Command_Packet('GetImage',UseSerialDebug=self.UseSerialDebug) packetbytes = cp.GetPacketBytes() self.SendCommand(packetbytes, 12) rp = self.GetResponse() retval = rp.ACK return retval def GetRawImage(self): ''' Gets an image that is qvga 160x120 (19200 bytes) and returns it in 150 Data_Packets Use StartDataDownload, and then GetNextDataPacket until done Returns: True (device confirming download starting) Not implemented due to memory restrictions on the arduino may revisit this if I find a need for it ''' cp = Command_Packet('GetRawImage',UseSerialDebug=self.UseSerialDebug) packetbytes = cp.GetPacketBytes() self.SendCommand(packetbytes, 12) rp = self.GetResponse() retval = rp.ACK return retval def GetTemplate(self, ID): ''' Gets a template from the fps (498 bytes) in 4 Data_Packets Use StartDataDownload, and then GetNextDataPacket until done Parameter: 0-199 ID number Returns: 0 - ACK Download starting 1 - Invalid position 2 - ID not used (no template to download ''' cp = Command_Packet('GetTemplate',UseSerialDebug=self.UseSerialDebug) cp.ParameterFromInt(ID) packetbytes = cp.GetPacketBytes() self.SendCommand(packetbytes, 12) rp = self.GetResponse() retval = 0 if not rp.ACK: if rp.Error == rp.errors['NACK_INVALID_POS']: retval = 1 elif rp.Error == rp.errors['NACK_IS_NOT_USED']: retval = 2 return retval ''' Uploads a template to the fps Parameter: the template (498 bytes) Parameter: the ID number to upload Parameter: Check for duplicate fingerprints already on fps Returns: 0-199 - ID duplicated 200 - Uploaded ok (no duplicate if enabled) 201 - Invalid position 202 - Communications error 203 - Device error int SetTemplate(byte* tmplt, int id, bool duplicateCheck); def SetTemplate(self,tmplt,ID,duplicateCheck): cp = Command_Packet('SetTemplate',UseSerialDebug=self.UseSerialDebug) cp.ParameterFromInt(ID) Commands that are not implemented (and why) VerifyTemplate1_1 - Couldn't find a good reason to implement this on an arduino IdentifyTemplate1_N - Couldn't find a good reason to implement this on an arduino MakeTemplate - Couldn't find a good reason to implement this on an arduino UsbInternalCheck - not implemented - Not valid config for arduino GetDatabaseStart - historical command, no longer supported GetDatabaseEnd - historical command, no longer supported UpgradeFirmware - Data Sheet says not supported UpgradeISOCDImage - Data Sheet says not supported SetIAPMode - for upgrading firmware (which is not supported) Ack and Nack are listed as a commands for some unknown reason... not implemented ''' def SendCommand(self, cmd,length): ''' resets the Data_Packet class, and gets ready to download Not implemented due to memory restrictions on the arduino may revisit this if I find a need for it void StartDataDownload(); Returns the next data packet Not implemented due to memory restrictions on the arduino may revisit this if I find a need for it Data_Packet GetNextDataPacket(); ''' if not self._serial is None: self._serial.write(bytes(cmd)) if self.UseSerialDebug: print self.serializeToSend(cmd) print bytes(cmd) print repr(bytes(cmd))[1:-1] else: if self.UseSerialDebug: print '[SendCommand] No es posible escribir en %s' % self._device_name def GetResponse(self): ''' Gets the response to the command from the software serial channel (and waits for it) ''' interval = 0.1 delay(interval) if self._serial is None: rp = Response_Packet() print '[GetResponse] No es posible leer desde: %s' % self._device_name else: r = bytearray(self._serial.read(self._serial.inWaiting())) rp = Response_Packet(r,self.UseSerialDebug) if rp.ACK: delay(interval) r2 = bytearray(self._serial.read(self._serial.inWaiting())) rp2 = Response_Packet(r2,self.UseSerialDebug) while str(rp2._lastBuffer).__len__()>0: rp.RawBytes.extend(rp2.RawBytes) rp._lastBuffer += rp2._lastBuffer delay(interval) r2 = bytearray(self._serial.read(self._serial.inWaiting())) rp2 = Response_Packet(r2,self.UseSerialDebug) self._lastResponse = rp return rp
lgpl-3.0
kenshay/ImageScript
ProgramData/SystemFiles/Python/Lib/site-packages/docutils/writers/docutils_xml.py
16
7687
# $Id: docutils_xml.py 7966 2016-08-18 13:06:09Z milde $ # Author: David Goodger, Paul Tremblay, Guenter Milde # Maintainer: docutils-develop@lists.sourceforge.net # Copyright: This module has been placed in the public domain. """ Simple document tree Writer, writes Docutils XML according to http://docutils.sourceforge.net/docs/ref/docutils.dtd. """ __docformat__ = 'reStructuredText' import sys # Work around broken PyXML and obsolete python stdlib behaviour. (The stdlib # replaces its own xml module with PyXML if the latter is installed. However, # PyXML is no longer maintained and partially incompatible/buggy.) Reverse # the order in which xml module and submodules are searched to import stdlib # modules if they exist and PyXML modules if they do not exist in the stdlib. # # See http://sourceforge.net/tracker/index.php?func=detail&aid=3552403&group_id=38414&atid=422030 # and http://lists.fedoraproject.org/pipermail/python-devel/2012-July/000406.html import xml if "_xmlplus" in xml.__path__[0]: # PyXML sub-module xml.__path__.reverse() # If both are available, prefer stdlib over PyXML import xml.sax.saxutils from StringIO import StringIO import docutils from docutils import frontend, writers, nodes class RawXmlError(docutils.ApplicationError): pass class Writer(writers.Writer): supported = ('xml',) """Formats this writer supports.""" settings_spec = ( '"Docutils XML" Writer Options', None, (('Generate XML with newlines before and after tags.', ['--newlines'], {'action': 'store_true', 'validator': frontend.validate_boolean}), ('Generate XML with indents and newlines.', ['--indents'], #@ TODO use integer value for number of spaces? {'action': 'store_true', 'validator': frontend.validate_boolean}), ('Omit the XML declaration. Use with caution.', ['--no-xml-declaration'], {'dest': 'xml_declaration', 'default': 1, 'action': 'store_false', 'validator': frontend.validate_boolean}), ('Omit the DOCTYPE declaration.', ['--no-doctype'], {'dest': 'doctype_declaration', 'default': 1, 'action': 'store_false', 'validator': frontend.validate_boolean}),)) settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'} config_section = 'docutils_xml writer' config_section_dependencies = ('writers',) output = None """Final translated form of `document`.""" def __init__(self): writers.Writer.__init__(self) self.translator_class = XMLTranslator def translate(self): self.visitor = visitor = self.translator_class(self.document) self.document.walkabout(visitor) self.output = ''.join(visitor.output) class XMLTranslator(nodes.GenericNodeVisitor): xml_declaration = '<?xml version="1.0" encoding="%s"?>\n' # TODO: add stylesheet options similar to HTML and LaTeX writers? #xml_stylesheet = '<?xml-stylesheet type="text/xsl" href="%s"?>\n' doctype = ( '<!DOCTYPE document PUBLIC' ' "+//IDN docutils.sourceforge.net//DTD Docutils Generic//EN//XML"' ' "http://docutils.sourceforge.net/docs/ref/docutils.dtd">\n') generator = '<!-- Generated by Docutils %s -->\n' xmlparser = xml.sax.make_parser() """SAX parser instance to check/exctract raw XML.""" xmlparser.setFeature( "http://xml.org/sax/features/external-general-entities", True) def __init__(self, document): nodes.NodeVisitor.__init__(self, document) # Reporter self.warn = self.document.reporter.warning self.error = self.document.reporter.error # Settings self.settings = settings = document.settings self.indent = self.newline = '' if settings.newlines: self.newline = '\n' if settings.indents: self.newline = '\n' self.indent = ' ' #@ TODO make this configurable? self.level = 0 # indentation level self.in_simple = 0 # level of nesting inside mixed-content elements self.fixed_text = 0 # level of nesting inside FixedText elements # Output self.output = [] if settings.xml_declaration: self.output.append( self.xml_declaration % settings.output_encoding) if settings.doctype_declaration: self.output.append(self.doctype) self.output.append(self.generator % docutils.__version__) # initialize XML parser self.the_handle=TestXml() self.xmlparser.setContentHandler(self.the_handle) # generic visit and depart methods # -------------------------------- simple_nodes = (nodes.TextElement, nodes.image, nodes.colspec, nodes.transition) # empty elements def default_visit(self, node): """Default node visit method.""" if not self.in_simple: self.output.append(self.indent*self.level) self.output.append(node.starttag(xml.sax.saxutils.quoteattr)) self.level += 1 # @@ make nodes.literal an instance of FixedTextElement? if isinstance(node, (nodes.FixedTextElement, nodes.literal)): self.fixed_text += 1 if isinstance(node, self.simple_nodes): self.in_simple += 1 if not self.in_simple: self.output.append(self.newline) def default_departure(self, node): """Default node depart method.""" self.level -= 1 if not self.in_simple: self.output.append(self.indent*self.level) self.output.append(node.endtag()) if isinstance(node, (nodes.FixedTextElement, nodes.literal)): self.fixed_text -= 1 if isinstance(node, self.simple_nodes): self.in_simple -= 1 if not self.in_simple: self.output.append(self.newline) # specific visit and depart methods # --------------------------------- def visit_Text(self, node): text = xml.sax.saxutils.escape(node.astext()) # indent text if we are not in a FixedText element: if not self.fixed_text: text = text.replace('\n', '\n'+self.indent*self.level) self.output.append(text) def depart_Text(self, node): pass def visit_raw(self, node): if 'xml' not in node.get('format', '').split(): # skip other raw content? # raise nodes.SkipNode self.default_visit(node) return # wrap in <raw> element self.default_visit(node) # or not? xml_string = node.astext() self.output.append(xml_string) self.default_departure(node) # or not? # Check validity of raw XML: if isinstance(xml_string, unicode) and sys.version_info < (3,): xml_string = xml_string.encode('utf8') try: self.xmlparser.parse(StringIO(xml_string)) except xml.sax._exceptions.SAXParseException, error: col_num = self.the_handle.locator.getColumnNumber() line_num = self.the_handle.locator.getLineNumber() srcline = node.line if not isinstance(node.parent, nodes.TextElement): srcline += 2 # directive content start line msg = 'Invalid raw XML in column %d, line offset %d:\n%s' % ( col_num, line_num, node.astext()) self.warn(msg, source=node.source, line=srcline+line_num-1) raise nodes.SkipNode # content already processed class TestXml(xml.sax.ContentHandler): def setDocumentLocator(self, locator): self.locator = locator
gpl-3.0
direvus/ansible
test/units/modules/cloud/amazon/test_aws_api_gateway.py
13
2454
# # (c) 2016 Michael De La Rue # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) import sys import pytest from ansible.module_utils.ec2 import HAS_BOTO3 from units.modules.utils import set_module_args if not HAS_BOTO3: pytestmark = pytest.mark.skip("test_api_gateway.py requires the `boto3` and `botocore` modules") import ansible.modules.cloud.amazon.aws_api_gateway as agw import ansible.module_utils.aws.core as core exit_return_dict = {} def fake_exit_json(self, **kwargs): """ store the kwargs given to exit_json rather than putting them out to stdout""" global exit_return_dict exit_return_dict = kwargs sys.exit(0) def test_upload_api(monkeypatch): class FakeConnection: def put_rest_api(self, *args, **kwargs): assert kwargs["body"] == "the-swagger-text-is-fake" return {"msg": "success!"} def return_fake_connection(*args, **kwargs): return FakeConnection() monkeypatch.setattr(core, "boto3_conn", return_fake_connection) monkeypatch.setattr(core.AnsibleAWSModule, "exit_json", fake_exit_json) set_module_args({ "api_id": "fred", "state": "present", "swagger_text": "the-swagger-text-is-fake", "region": 'mars-north-1', "_ansible_tmpdir": "/tmp/ansibl-abcdef", }) with pytest.raises(SystemExit): agw.main() assert exit_return_dict["changed"] def test_warn_if_region_not_specified(): set_module_args({ "name": "aws_api_gateway", "state": "present", "runtime": 'python2.7', "role": 'arn:aws:iam::987654321012:role/lambda_basic_execution', "handler": 'lambda_python.my_handler'}) with pytest.raises(SystemExit): print(agw.main())
gpl-3.0
gregdek/ansible
lib/ansible/modules/network/junos/junos_package.py
31
7469
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2017, Ansible by Red Hat, inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = """ --- module: junos_package version_added: "2.1" author: "Peter Sprygada (@privateip)" short_description: Installs packages on remote devices running Junos description: - This module can install new and updated packages on remote devices running Junos. The module will compare the specified package with the one running on the remote device and install the specified version if there is a mismatch extends_documentation_fragment: junos options: src: description: - The I(src) argument specifies the path to the source package to be installed on the remote device in the advent of a version mismatch. The I(src) argument can be either a localized path or a full path to the package file to install. required: true aliases: ['package'] version: description: - The I(version) argument can be used to explicitly specify the version of the package that should be installed on the remote device. If the I(version) argument is not specified, then the version is extracts from the I(src) filename. reboot: description: - In order for a package to take effect, the remote device must be restarted. When enabled, this argument will instruct the module to reboot the device once the updated package has been installed. If disabled or the remote package does not need to be changed, the device will not be started. type: bool default: 'yes' no_copy: description: - The I(no_copy) argument is responsible for instructing the remote device on where to install the package from. When enabled, the package is transferred to the remote device prior to installing. type: bool default: 'no' validate: description: - The I(validate) argument is responsible for instructing the remote device to skip checking the current device configuration compatibility with the package being installed. When set to false validation is not performed. version_added: 2.5 type: bool default: 'yes' force: description: - The I(force) argument instructs the module to bypass the package version check and install the packaged identified in I(src) on the remote device. type: bool default: 'no' force_host: description: - The I(force_host) argument controls the way software package or bundle is added on remote JUNOS host and is applicable for JUNOS QFX5100 device. If the value is set to C(True) it will ignore any warnings while adding the host software package or bundle. type: bool default: False version_added: 2.8 issu: description: - The I(issu) argument is a boolean flag when set to C(True) allows unified in-service software upgrade (ISSU) feature which enables you to upgrade between two different Junos OS releases with no disruption on the control plane and with minimal disruption of traffic. type: bool default: False version_added: 2.8 requirements: - junos-eznc - ncclient (>=v0.5.2) notes: - This module requires the netconf system service be enabled on the remote device being managed. - Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4. - Works with C(local) connections only. """ EXAMPLES = """ # the required set of connection arguments have been purposely left off # the examples for brevity - name: install local package on remote device junos_package: src: junos-vsrx-12.1X46-D10.2-domestic.tgz - name: install local package on remote device without rebooting junos_package: src: junos-vsrx-12.1X46-D10.2-domestic.tgz reboot: no """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.junos.junos import junos_argument_spec, get_param from ansible.module_utils._text import to_native try: from jnpr.junos import Device from jnpr.junos.utils.sw import SW from jnpr.junos.exception import ConnectError HAS_PYEZ = True except ImportError: HAS_PYEZ = False def connect(module): host = get_param(module, 'host') kwargs = { 'port': get_param(module, 'port') or 830, 'user': get_param(module, 'username') } if get_param(module, 'password'): kwargs['passwd'] = get_param(module, 'password') if get_param(module, 'ssh_keyfile'): kwargs['ssh_private_key_file'] = get_param(module, 'ssh_keyfile') kwargs['gather_facts'] = False try: device = Device(host, **kwargs) device.open() device.timeout = get_param(module, 'timeout') or 10 except ConnectError as exc: module.fail_json(msg='unable to connect to %s: %s' % (host, to_native(exc))) return device def install_package(module, device): junos = SW(device) package = module.params['src'] no_copy = module.params['no_copy'] validate = module.params['validate'] force_host = module.params['force_host'] issu = module.params['issu'] def progress_log(dev, report): module.log(report) module.log('installing package') result = junos.install(package, progress=progress_log, no_copy=no_copy, validate=validate, force_host=force_host, issu=issu) if not result: module.fail_json(msg='Unable to install package on device') if module.params['reboot']: module.log('rebooting system') junos.reboot() def main(): """ Main entry point for Ansible module execution """ argument_spec = dict( src=dict(type='path', required=True, aliases=['package']), version=dict(), reboot=dict(type='bool', default=True), no_copy=dict(default=False, type='bool'), validate=dict(default=True, type='bool'), force=dict(type='bool', default=False), transport=dict(default='netconf', choices=['netconf']), force_host=dict(type='bool', default=False), issu=dict(type='bool', default=False) ) argument_spec.update(junos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if module.params['provider'] is None: module.params['provider'] = {} if not HAS_PYEZ: module.fail_json( msg='junos-eznc is required but does not appear to be installed. ' 'It can be installed using `pip install junos-eznc`' ) result = dict(changed=False) do_upgrade = module.params['force'] or False device = connect(module) if not module.params['force']: facts = device.facts_refresh() has_ver = device.facts.get('version') wants_ver = module.params['version'] do_upgrade = has_ver != wants_ver if do_upgrade: if not module.check_mode: install_package(module, device) result['changed'] = True module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
kawamon/hue
desktop/core/ext-py/pytest-4.6.11/testing/test_pastebin.py
3
4101
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import pytest class TestPasteCapture(object): @pytest.fixture def pastebinlist(self, monkeypatch, request): pastebinlist = [] plugin = request.config.pluginmanager.getplugin("pastebin") monkeypatch.setattr(plugin, "create_new_paste", pastebinlist.append) return pastebinlist def test_failed(self, testdir, pastebinlist): testpath = testdir.makepyfile( """ import pytest def test_pass(): pass def test_fail(): assert 0 def test_skip(): pytest.skip("") """ ) reprec = testdir.inline_run(testpath, "--paste=failed") assert len(pastebinlist) == 1 s = pastebinlist[0] assert s.find("def test_fail") != -1 assert reprec.countoutcomes() == [1, 1, 1] def test_all(self, testdir, pastebinlist): from _pytest.pytester import LineMatcher testpath = testdir.makepyfile( """ import pytest def test_pass(): pass def test_fail(): assert 0 def test_skip(): pytest.skip("") """ ) reprec = testdir.inline_run(testpath, "--pastebin=all", "-v") assert reprec.countoutcomes() == [1, 1, 1] assert len(pastebinlist) == 1 contents = pastebinlist[0].decode("utf-8") matcher = LineMatcher(contents.splitlines()) matcher.fnmatch_lines( [ "*test_pass PASSED*", "*test_fail FAILED*", "*test_skip SKIPPED*", "*== 1 failed, 1 passed, 1 skipped in *", ] ) def test_non_ascii_paste_text(self, testdir): """Make sure that text which contains non-ascii characters is pasted correctly. See #1219. """ testdir.makepyfile( test_unicode=""" # -*- coding: utf-8 -*- def test(): assert '☺' == 1 """ ) result = testdir.runpytest("--pastebin=all") if sys.version_info[0] >= 3: expected_msg = "*assert '☺' == 1*" else: expected_msg = "*assert '\\xe2\\x98\\xba' == 1*" result.stdout.fnmatch_lines( [ expected_msg, "*== 1 failed in *", "*Sending information to Paste Service*", ] ) class TestPaste(object): @pytest.fixture def pastebin(self, request): return request.config.pluginmanager.getplugin("pastebin") @pytest.fixture def mocked_urlopen(self, monkeypatch): """ monkeypatch the actual urlopen calls done by the internal plugin function that connects to bpaste service. """ calls = [] def mocked(url, data): calls.append((url, data)) class DummyFile(object): def read(self): # part of html of a normal response return b'View <a href="/raw/3c0c6750bd">raw</a>.' return DummyFile() if sys.version_info < (3, 0): import urllib monkeypatch.setattr(urllib, "urlopen", mocked) else: import urllib.request monkeypatch.setattr(urllib.request, "urlopen", mocked) return calls def test_create_new_paste(self, pastebin, mocked_urlopen): result = pastebin.create_new_paste(b"full-paste-contents") assert result == "https://bpaste.net/show/3c0c6750bd" assert len(mocked_urlopen) == 1 url, data = mocked_urlopen[0] assert type(data) is bytes lexer = "text" assert url == "https://bpaste.net" assert "lexer=%s" % lexer in data.decode() assert "code=full-paste-contents" in data.decode() assert "expiry=1week" in data.decode()
apache-2.0
ltilve/ChromiumGStreamerBackend
third_party/jinja2/exceptions.py
977
4428
# -*- coding: utf-8 -*- """ jinja2.exceptions ~~~~~~~~~~~~~~~~~ Jinja exceptions. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ from jinja2._compat import imap, text_type, PY2, implements_to_string class TemplateError(Exception): """Baseclass for all template errors.""" if PY2: def __init__(self, message=None): if message is not None: message = text_type(message).encode('utf-8') Exception.__init__(self, message) @property def message(self): if self.args: message = self.args[0] if message is not None: return message.decode('utf-8', 'replace') def __unicode__(self): return self.message or u'' else: def __init__(self, message=None): Exception.__init__(self, message) @property def message(self): if self.args: message = self.args[0] if message is not None: return message @implements_to_string class TemplateNotFound(IOError, LookupError, TemplateError): """Raised if a template does not exist.""" # looks weird, but removes the warning descriptor that just # bogusly warns us about message being deprecated message = None def __init__(self, name, message=None): IOError.__init__(self) if message is None: message = name self.message = message self.name = name self.templates = [name] def __str__(self): return self.message class TemplatesNotFound(TemplateNotFound): """Like :class:`TemplateNotFound` but raised if multiple templates are selected. This is a subclass of :class:`TemplateNotFound` exception, so just catching the base exception will catch both. .. versionadded:: 2.2 """ def __init__(self, names=(), message=None): if message is None: message = u'none of the templates given were found: ' + \ u', '.join(imap(text_type, names)) TemplateNotFound.__init__(self, names and names[-1] or None, message) self.templates = list(names) @implements_to_string class TemplateSyntaxError(TemplateError): """Raised to tell the user that there is a problem with the template.""" def __init__(self, message, lineno, name=None, filename=None): TemplateError.__init__(self, message) self.lineno = lineno self.name = name self.filename = filename self.source = None # this is set to True if the debug.translate_syntax_error # function translated the syntax error into a new traceback self.translated = False def __str__(self): # for translated errors we only return the message if self.translated: return self.message # otherwise attach some stuff location = 'line %d' % self.lineno name = self.filename or self.name if name: location = 'File "%s", %s' % (name, location) lines = [self.message, ' ' + location] # if the source is set, add the line to the output if self.source is not None: try: line = self.source.splitlines()[self.lineno - 1] except IndexError: line = None if line: lines.append(' ' + line.strip()) return u'\n'.join(lines) class TemplateAssertionError(TemplateSyntaxError): """Like a template syntax error, but covers cases where something in the template caused an error at compile time that wasn't necessarily caused by a syntax error. However it's a direct subclass of :exc:`TemplateSyntaxError` and has the same attributes. """ class TemplateRuntimeError(TemplateError): """A generic runtime error in the template engine. Under some situations Jinja may raise this exception. """ class UndefinedError(TemplateRuntimeError): """Raised if a template tries to operate on :class:`Undefined`.""" class SecurityError(TemplateRuntimeError): """Raised if a template tries to do something insecure if the sandbox is enabled. """ class FilterArgumentError(TemplateRuntimeError): """This error is raised if a filter was called with inappropriate arguments """
bsd-3-clause
ProfessionalIT/maxigenios-website
sdk/google_appengine/lib/django-1.3/django/template/loaders/eggs.py
229
1432
# Wrapper for loading templates from eggs via pkg_resources.resource_string. try: from pkg_resources import resource_string except ImportError: resource_string = None from django.template.base import TemplateDoesNotExist from django.template.loader import BaseLoader from django.conf import settings class Loader(BaseLoader): is_usable = resource_string is not None def load_template_source(self, template_name, template_dirs=None): """ Loads templates from Python eggs via pkg_resource.resource_string. For every installed app, it tries to get the resource (app, template_name). """ if resource_string is not None: pkg_name = 'templates/' + template_name for app in settings.INSTALLED_APPS: try: return (resource_string(app, pkg_name).decode(settings.FILE_CHARSET), 'egg:%s:%s' % (app, pkg_name)) except: pass raise TemplateDoesNotExist(template_name) _loader = Loader() def load_template_source(template_name, template_dirs=None): import warnings warnings.warn( "'django.template.loaders.eggs.load_template_source' is deprecated; use 'django.template.loaders.eggs.Loader' instead.", DeprecationWarning ) return _loader.load_template_source(template_name, template_dirs) load_template_source.is_usable = resource_string is not None
mit
ecolitan/fatics
venv/lib/python2.7/site-packages/twisted/python/win32.py
36
5436
# -*- test-case-name: twisted.python.test.test_win32 -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Win32 utilities. See also twisted.python.shortcut. @var O_BINARY: the 'binary' mode flag on Windows, or 0 on other platforms, so it may safely be OR'ed into a mask for os.open. """ from __future__ import division, absolute_import import re import os try: import win32api import win32con except ImportError: pass from twisted.python.runtime import platform # http://msdn.microsoft.com/library/default.asp?url=/library/en-us/debug/base/system_error_codes.asp ERROR_FILE_NOT_FOUND = 2 ERROR_PATH_NOT_FOUND = 3 ERROR_INVALID_NAME = 123 ERROR_DIRECTORY = 267 O_BINARY = getattr(os, "O_BINARY", 0) class FakeWindowsError(OSError): """ Stand-in for sometimes-builtin exception on platforms for which it is missing. """ try: WindowsError = WindowsError except NameError: WindowsError = FakeWindowsError # XXX fix this to use python's builtin _winreg? def getProgramsMenuPath(): """ Get the path to the Programs menu. Probably will break on non-US Windows. @return: the filesystem location of the common Start Menu->Programs. @rtype: L{str} """ if not platform.isWindows(): return "C:\\Windows\\Start Menu\\Programs" keyname = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders' hShellFolders = win32api.RegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE, keyname, 0, win32con.KEY_READ) return win32api.RegQueryValueEx(hShellFolders, 'Common Programs')[0] def getProgramFilesPath(): """Get the path to the Program Files folder.""" keyname = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion' currentV = win32api.RegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE, keyname, 0, win32con.KEY_READ) return win32api.RegQueryValueEx(currentV, 'ProgramFilesDir')[0] _cmdLineQuoteRe = re.compile(r'(\\*)"') _cmdLineQuoteRe2 = re.compile(r'(\\+)\Z') def cmdLineQuote(s): """ Internal method for quoting a single command-line argument. @param s: an unquoted string that you want to quote so that something that does cmd.exe-style unquoting will interpret it as a single argument, even if it contains spaces. @type s: C{str} @return: a quoted string. @rtype: C{str} """ quote = ((" " in s) or ("\t" in s) or ('"' in s) or s == '') and '"' or '' return quote + _cmdLineQuoteRe2.sub(r"\1\1", _cmdLineQuoteRe.sub(r'\1\1\\"', s)) + quote def quoteArguments(arguments): """ Quote an iterable of command-line arguments for passing to CreateProcess or a similar API. This allows the list passed to C{reactor.spawnProcess} to match the child process's C{sys.argv} properly. @param arglist: an iterable of C{str}, each unquoted. @return: a single string, with the given sequence quoted as necessary. """ return ' '.join([cmdLineQuote(a) for a in arguments]) class _ErrorFormatter(object): """ Formatter for Windows error messages. @ivar winError: A callable which takes one integer error number argument and returns an L{exceptions.WindowsError} instance for that error (like L{ctypes.WinError}). @ivar formatMessage: A callable which takes one integer error number argument and returns a C{str} giving the message for that error (like L{win32api.FormatMessage}). @ivar errorTab: A mapping from integer error numbers to C{str} messages which correspond to those erorrs (like L{socket.errorTab}). """ def __init__(self, WinError, FormatMessage, errorTab): self.winError = WinError self.formatMessage = FormatMessage self.errorTab = errorTab def fromEnvironment(cls): """ Get as many of the platform-specific error translation objects as possible and return an instance of C{cls} created with them. """ try: from ctypes import WinError except ImportError: WinError = None try: from win32api import FormatMessage except ImportError: FormatMessage = None try: from socket import errorTab except ImportError: errorTab = None return cls(WinError, FormatMessage, errorTab) fromEnvironment = classmethod(fromEnvironment) def formatError(self, errorcode): """ Returns the string associated with a Windows error message, such as the ones found in socket.error. Attempts direct lookup against the win32 API via ctypes and then pywin32 if available), then in the error table in the socket module, then finally defaulting to C{os.strerror}. @param errorcode: the Windows error code @type errorcode: C{int} @return: The error message string @rtype: C{str} """ if self.winError is not None: return self.winError(errorcode).strerror if self.formatMessage is not None: return self.formatMessage(errorcode) if self.errorTab is not None: result = self.errorTab.get(errorcode) if result is not None: return result return os.strerror(errorcode) formatError = _ErrorFormatter.fromEnvironment().formatError
agpl-3.0
arhik/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/contour.py
69
42063
""" These are classes to support contour plotting and labelling for the axes class """ from __future__ import division import warnings import matplotlib as mpl import numpy as np from numpy import ma import matplotlib._cntr as _cntr import matplotlib.path as path import matplotlib.ticker as ticker import matplotlib.cm as cm import matplotlib.colors as colors import matplotlib.collections as collections import matplotlib.font_manager as font_manager import matplotlib.text as text import matplotlib.cbook as cbook import matplotlib.mlab as mlab # Import needed for adding manual selection capability to clabel from matplotlib.blocking_input import BlockingContourLabeler # We can't use a single line collection for contour because a line # collection can have only a single line style, and we want to be able to have # dashed negative contours, for example, and solid positive contours. # We could use a single polygon collection for filled contours, but it # seems better to keep line and filled contours similar, with one collection # per level. class ContourLabeler: '''Mixin to provide labelling capability to ContourSet''' def clabel(self, *args, **kwargs): """ call signature:: clabel(cs, **kwargs) adds labels to line contours in *cs*, where *cs* is a :class:`~matplotlib.contour.ContourSet` object returned by contour. :: clabel(cs, v, **kwargs) only labels contours listed in *v*. Optional keyword arguments: *fontsize*: See http://matplotlib.sf.net/fonts.html *colors*: - if *None*, the color of each label matches the color of the corresponding contour - if one string color, e.g. *colors* = 'r' or *colors* = 'red', all labels will be plotted in this color - if a tuple of matplotlib color args (string, float, rgb, etc), different labels will be plotted in different colors in the order specified *inline*: controls whether the underlying contour is removed or not. Default is *True*. *inline_spacing*: space in pixels to leave on each side of label when placing inline. Defaults to 5. This spacing will be exact for labels at locations where the contour is straight, less so for labels on curved contours. *fmt*: a format string for the label. Default is '%1.3f' Alternatively, this can be a dictionary matching contour levels with arbitrary strings to use for each contour level (i.e., fmt[level]=string) *manual*: if *True*, contour labels will be placed manually using mouse clicks. Click the first button near a contour to add a label, click the second button (or potentially both mouse buttons at once) to finish adding labels. The third button can be used to remove the last label added, but only if labels are not inline. Alternatively, the keyboard can be used to select label locations (enter to end label placement, delete or backspace act like the third mouse button, and any other key will select a label location). .. plot:: mpl_examples/pylab_examples/contour_demo.py """ """ NOTES on how this all works: clabel basically takes the input arguments and uses them to add a list of "label specific" attributes to the ContourSet object. These attributes are all of the form label* and names should be fairly self explanatory. Once these attributes are set, clabel passes control to the labels method (case of automatic label placement) or BlockingContourLabeler (case of manual label placement). """ fontsize = kwargs.get('fontsize', None) inline = kwargs.get('inline', 1) inline_spacing = kwargs.get('inline_spacing', 5) self.labelFmt = kwargs.get('fmt', '%1.3f') _colors = kwargs.get('colors', None) # Detect if manual selection is desired and remove from argument list self.labelManual=kwargs.get('manual',False) if len(args) == 0: levels = self.levels indices = range(len(self.levels)) elif len(args) == 1: levlabs = list(args[0]) indices, levels = [], [] for i, lev in enumerate(self.levels): if lev in levlabs: indices.append(i) levels.append(lev) if len(levels) < len(levlabs): msg = "Specified levels " + str(levlabs) msg += "\n don't match available levels " msg += str(self.levels) raise ValueError(msg) else: raise TypeError("Illegal arguments to clabel, see help(clabel)") self.labelLevelList = levels self.labelIndiceList = indices self.labelFontProps = font_manager.FontProperties() if fontsize == None: font_size = int(self.labelFontProps.get_size_in_points()) else: if type(fontsize) not in [int, float, str]: raise TypeError("Font size must be an integer number.") # Can't it be floating point, as indicated in line above? else: if type(fontsize) == str: font_size = int(self.labelFontProps.get_size_in_points()) else: self.labelFontProps.set_size(fontsize) font_size = fontsize self.labelFontSizeList = [font_size] * len(levels) if _colors == None: self.labelMappable = self self.labelCValueList = np.take(self.cvalues, self.labelIndiceList) else: cmap = colors.ListedColormap(_colors, N=len(self.labelLevelList)) self.labelCValueList = range(len(self.labelLevelList)) self.labelMappable = cm.ScalarMappable(cmap = cmap, norm = colors.NoNorm()) #self.labelTexts = [] # Initialized in ContourSet.__init__ #self.labelCValues = [] # same self.labelXYs = [] if self.labelManual: print 'Select label locations manually using first mouse button.' print 'End manual selection with second mouse button.' if not inline: print 'Remove last label by clicking third mouse button.' blocking_contour_labeler = BlockingContourLabeler(self) blocking_contour_labeler(inline,inline_spacing) else: self.labels(inline,inline_spacing) # Hold on to some old attribute names. These are depricated and will # be removed in the near future (sometime after 2008-08-01), but keeping # for now for backwards compatibility self.cl = self.labelTexts self.cl_xy = self.labelXYs self.cl_cvalues = self.labelCValues self.labelTextsList = cbook.silent_list('text.Text', self.labelTexts) return self.labelTextsList def print_label(self, linecontour,labelwidth): "if contours are too short, don't plot a label" lcsize = len(linecontour) if lcsize > 10 * labelwidth: return 1 xmax = np.amax(linecontour[:,0]) xmin = np.amin(linecontour[:,0]) ymax = np.amax(linecontour[:,1]) ymin = np.amin(linecontour[:,1]) lw = labelwidth if (xmax - xmin) > 1.2* lw or (ymax - ymin) > 1.2 * lw: return 1 else: return 0 def too_close(self, x,y, lw): "if there's a label already nearby, find a better place" if self.labelXYs != []: dist = [np.sqrt((x-loc[0]) ** 2 + (y-loc[1]) ** 2) for loc in self.labelXYs] for d in dist: if d < 1.2*lw: return 1 else: return 0 else: return 0 def get_label_coords(self, distances, XX, YY, ysize, lw): """ labels are ploted at a location with the smallest dispersion of the contour from a straight line unless there's another label nearby, in which case the second best place on the contour is picked up if there's no good place a label isplotted at the beginning of the contour """ hysize = int(ysize/2) adist = np.argsort(distances) for ind in adist: x, y = XX[ind][hysize], YY[ind][hysize] if self.too_close(x,y, lw): continue else: return x,y, ind ind = adist[0] x, y = XX[ind][hysize], YY[ind][hysize] return x,y, ind def get_label_width(self, lev, fmt, fsize): "get the width of the label in points" if cbook.is_string_like(lev): lw = (len(lev)) * fsize else: lw = (len(self.get_text(lev,fmt))) * fsize return lw def get_real_label_width( self, lev, fmt, fsize ): """ This computes actual onscreen label width. This uses some black magic to determine onscreen extent of non-drawn label. This magic may not be very robust. """ # Find middle of axes xx = np.mean( np.asarray(self.ax.axis()).reshape(2,2), axis=1 ) # Temporarily create text object t = text.Text( xx[0], xx[1] ) self.set_label_props( t, self.get_text(lev,fmt), 'k' ) # Some black magic to get onscreen extent # NOTE: This will only work for already drawn figures, as the canvas # does not have a renderer otherwise. This is the reason this function # can't be integrated into the rest of the code. bbox = t.get_window_extent(renderer=self.ax.figure.canvas.renderer) # difference in pixel extent of image lw = np.diff(bbox.corners()[0::2,0])[0] return lw def set_label_props(self, label, text, color): "set the label properties - color, fontsize, text" label.set_text(text) label.set_color(color) label.set_fontproperties(self.labelFontProps) label.set_clip_box(self.ax.bbox) def get_text(self, lev, fmt): "get the text of the label" if cbook.is_string_like(lev): return lev else: if isinstance(fmt,dict): return fmt[lev] else: return fmt%lev def locate_label(self, linecontour, labelwidth): """find a good place to plot a label (relatively flat part of the contour) and the angle of rotation for the text object """ nsize= len(linecontour) if labelwidth > 1: xsize = int(np.ceil(nsize/labelwidth)) else: xsize = 1 if xsize == 1: ysize = nsize else: ysize = labelwidth XX = np.resize(linecontour[:,0],(xsize, ysize)) YY = np.resize(linecontour[:,1],(xsize, ysize)) #I might have fouled up the following: yfirst = YY[:,0].reshape(xsize, 1) ylast = YY[:,-1].reshape(xsize, 1) xfirst = XX[:,0].reshape(xsize, 1) xlast = XX[:,-1].reshape(xsize, 1) s = (yfirst-YY) * (xlast-xfirst) - (xfirst-XX) * (ylast-yfirst) L = np.sqrt((xlast-xfirst)**2+(ylast-yfirst)**2).ravel() dist = np.add.reduce(([(abs(s)[i]/L[i]) for i in range(xsize)]),-1) x,y,ind = self.get_label_coords(dist, XX, YY, ysize, labelwidth) #print 'ind, x, y', ind, x, y # There must be a more efficient way... lc = [tuple(l) for l in linecontour] dind = lc.index((x,y)) #print 'dind', dind #dind = list(linecontour).index((x,y)) return x, y, dind def calc_label_rot_and_inline( self, slc, ind, lw, lc=None, spacing=5 ): """ This function calculates the appropriate label rotation given the linecontour coordinates in screen units, the index of the label location and the label width. It will also break contour and calculate inlining if *lc* is not empty (lc defaults to the empty list if None). *spacing* is the space around the label in pixels to leave empty. Do both of these tasks at once to avoid calling mlab.path_length multiple times, which is relatively costly. The method used here involves calculating the path length along the contour in pixel coordinates and then looking approximately label width / 2 away from central point to determine rotation and then to break contour if desired. """ if lc is None: lc = [] # Half the label width hlw = lw/2.0 # Check if closed and, if so, rotate contour so label is at edge closed = mlab.is_closed_polygon(slc) if closed: slc = np.r_[ slc[ind:-1], slc[:ind+1] ] if len(lc): # Rotate lc also if not empty lc = np.r_[ lc[ind:-1], lc[:ind+1] ] ind = 0 # Path length in pixel space pl = mlab.path_length(slc) pl = pl-pl[ind] # Use linear interpolation to get points around label xi = np.array( [ -hlw, hlw ] ) if closed: # Look at end also for closed contours dp = np.array([pl[-1],0]) else: dp = np.zeros_like(xi) ll = mlab.less_simple_linear_interpolation( pl, slc, dp+xi, extrap=True ) # get vector in pixel space coordinates from one point to other dd = np.diff( ll, axis=0 ).ravel() # Get angle of vector - must be calculated in pixel space for # text rotation to work correctly if np.all(dd==0): # Must deal with case of zero length label rotation = 0.0 else: rotation = np.arctan2(dd[1], dd[0]) * 180.0 / np.pi # Fix angle so text is never upside-down if rotation > 90: rotation = rotation - 180.0 if rotation < -90: rotation = 180.0 + rotation # Break contour if desired nlc = [] if len(lc): # Expand range by spacing xi = dp + xi + np.array([-spacing,spacing]) # Get indices near points of interest I = mlab.less_simple_linear_interpolation( pl, np.arange(len(pl)), xi, extrap=False ) # If those indices aren't beyond contour edge, find x,y if (not np.isnan(I[0])) and int(I[0])<>I[0]: xy1 = mlab.less_simple_linear_interpolation( pl, lc, [ xi[0] ] ) if (not np.isnan(I[1])) and int(I[1])<>I[1]: xy2 = mlab.less_simple_linear_interpolation( pl, lc, [ xi[1] ] ) # Make integer I = [ np.floor(I[0]), np.ceil(I[1]) ] # Actually break contours if closed: # This will remove contour if shorter than label if np.all(~np.isnan(I)): nlc.append( np.r_[ xy2, lc[I[1]:I[0]+1], xy1 ] ) else: # These will remove pieces of contour if they have length zero if not np.isnan(I[0]): nlc.append( np.r_[ lc[:I[0]+1], xy1 ] ) if not np.isnan(I[1]): nlc.append( np.r_[ xy2, lc[I[1]:] ] ) # The current implementation removes contours completely # covered by labels. Uncomment line below to keep # original contour if this is the preferred behavoir. #if not len(nlc): nlc = [ lc ] return (rotation,nlc) def add_label(self,x,y,rotation,lev,cvalue): dx,dy = self.ax.transData.inverted().transform_point((x,y)) t = text.Text(dx, dy, rotation = rotation, horizontalalignment='center', verticalalignment='center') color = self.labelMappable.to_rgba(cvalue,alpha=self.alpha) _text = self.get_text(lev,self.labelFmt) self.set_label_props(t, _text, color) self.labelTexts.append(t) self.labelCValues.append(cvalue) self.labelXYs.append((x,y)) # Add label to plot here - useful for manual mode label selection self.ax.add_artist(t) def pop_label(self,index=-1): '''Defaults to removing last label, but any index can be supplied''' self.labelCValues.pop(index) t = self.labelTexts.pop(index) t.remove() def labels(self, inline, inline_spacing): trans = self.ax.transData # A bit of shorthand for icon, lev, fsize, cvalue in zip( self.labelIndiceList, self.labelLevelList, self.labelFontSizeList, self.labelCValueList ): con = self.collections[icon] lw = self.get_label_width(lev, self.labelFmt, fsize) additions = [] paths = con.get_paths() for segNum, linepath in enumerate(paths): lc = linepath.vertices # Line contour slc0 = trans.transform(lc) # Line contour in screen coords # For closed polygons, add extra point to avoid division by # zero in print_label and locate_label. Other than these # functions, this is not necessary and should probably be # eventually removed. if mlab.is_closed_polygon( lc ): slc = np.r_[ slc0, slc0[1:2,:] ] else: slc = slc0 if self.print_label(slc,lw): # Check if long enough for a label x,y,ind = self.locate_label(slc, lw) if inline: lcarg = lc else: lcarg = None rotation,new=self.calc_label_rot_and_inline( slc0, ind, lw, lcarg, inline_spacing ) # Actually add the label self.add_label(x,y,rotation,lev,cvalue) # If inline, add new contours if inline: for n in new: # Add path if not empty or single point if len(n)>1: additions.append( path.Path(n) ) else: # If not adding label, keep old path additions.append(linepath) # After looping over all segments on a contour, remove old # paths and add new ones if inlining if inline: del paths[:] paths.extend(additions) class ContourSet(cm.ScalarMappable, ContourLabeler): """ Create and store a set of contour lines or filled regions. User-callable method: clabel Useful attributes: ax: the axes object in which the contours are drawn collections: a silent_list of LineCollections or PolyCollections levels: contour levels layers: same as levels for line contours; half-way between levels for filled contours. See _process_colors method. """ def __init__(self, ax, *args, **kwargs): """ Draw contour lines or filled regions, depending on whether keyword arg 'filled' is False (default) or True. The first argument of the initializer must be an axes object. The remaining arguments and keyword arguments are described in ContourSet.contour_doc. """ self.ax = ax self.levels = kwargs.get('levels', None) self.filled = kwargs.get('filled', False) self.linewidths = kwargs.get('linewidths', None) self.linestyles = kwargs.get('linestyles', 'solid') self.alpha = kwargs.get('alpha', 1.0) self.origin = kwargs.get('origin', None) self.extent = kwargs.get('extent', None) cmap = kwargs.get('cmap', None) self.colors = kwargs.get('colors', None) norm = kwargs.get('norm', None) self.extend = kwargs.get('extend', 'neither') self.antialiased = kwargs.get('antialiased', True) self.nchunk = kwargs.get('nchunk', 0) self.locator = kwargs.get('locator', None) if (isinstance(norm, colors.LogNorm) or isinstance(self.locator, ticker.LogLocator)): self.logscale = True if norm is None: norm = colors.LogNorm() if self.extend is not 'neither': raise ValueError('extend kwarg does not work yet with log scale') else: self.logscale = False if self.origin is not None: assert(self.origin in ['lower', 'upper', 'image']) if self.extent is not None: assert(len(self.extent) == 4) if cmap is not None: assert(isinstance(cmap, colors.Colormap)) if self.colors is not None and cmap is not None: raise ValueError('Either colors or cmap must be None') if self.origin == 'image': self.origin = mpl.rcParams['image.origin'] x, y, z = self._contour_args(*args) # also sets self.levels, # self.layers if self.colors is not None: cmap = colors.ListedColormap(self.colors, N=len(self.layers)) if self.filled: self.collections = cbook.silent_list('collections.PolyCollection') else: self.collections = cbook.silent_list('collections.LineCollection') # label lists must be initialized here self.labelTexts = [] self.labelCValues = [] kw = {'cmap': cmap} if norm is not None: kw['norm'] = norm cm.ScalarMappable.__init__(self, **kw) # sets self.cmap; self._process_colors() _mask = ma.getmask(z) if _mask is ma.nomask: _mask = None if self.filled: if self.linewidths is not None: warnings.warn('linewidths is ignored by contourf') C = _cntr.Cntr(x, y, z.filled(), _mask) lowers = self._levels[:-1] uppers = self._levels[1:] for level, level_upper in zip(lowers, uppers): nlist = C.trace(level, level_upper, points = 0, nchunk = self.nchunk) col = collections.PolyCollection(nlist, antialiaseds = (self.antialiased,), edgecolors= 'none', alpha=self.alpha) self.ax.add_collection(col) self.collections.append(col) else: tlinewidths = self._process_linewidths() self.tlinewidths = tlinewidths tlinestyles = self._process_linestyles() C = _cntr.Cntr(x, y, z.filled(), _mask) for level, width, lstyle in zip(self.levels, tlinewidths, tlinestyles): nlist = C.trace(level, points = 0) col = collections.LineCollection(nlist, linewidths = width, linestyle = lstyle, alpha=self.alpha) if level < 0.0 and self.monochrome: ls = mpl.rcParams['contour.negative_linestyle'] col.set_linestyle(ls) col.set_label('_nolegend_') self.ax.add_collection(col, False) self.collections.append(col) self.changed() # set the colors x0 = ma.minimum(x) x1 = ma.maximum(x) y0 = ma.minimum(y) y1 = ma.maximum(y) self.ax.update_datalim([(x0,y0), (x1,y1)]) self.ax.autoscale_view() def changed(self): tcolors = [ (tuple(rgba),) for rgba in self.to_rgba(self.cvalues, alpha=self.alpha)] self.tcolors = tcolors for color, collection in zip(tcolors, self.collections): collection.set_alpha(self.alpha) collection.set_color(color) for label, cv in zip(self.labelTexts, self.labelCValues): label.set_alpha(self.alpha) label.set_color(self.labelMappable.to_rgba(cv)) # add label colors cm.ScalarMappable.changed(self) def _autolev(self, z, N): ''' Select contour levels to span the data. We need two more levels for filled contours than for line contours, because for the latter we need to specify the lower and upper boundary of each range. For example, a single contour boundary, say at z = 0, requires only one contour line, but two filled regions, and therefore three levels to provide boundaries for both regions. ''' if self.locator is None: if self.logscale: self.locator = ticker.LogLocator() else: self.locator = ticker.MaxNLocator(N+1) self.locator.create_dummy_axis() zmax = self.zmax zmin = self.zmin self.locator.set_bounds(zmin, zmax) lev = self.locator() zmargin = (zmax - zmin) * 0.000001 # so z < (zmax + zmargin) if zmax >= lev[-1]: lev[-1] += zmargin if zmin <= lev[0]: if self.logscale: lev[0] = 0.99 * zmin else: lev[0] -= zmargin self._auto = True if self.filled: return lev return lev[1:-1] def _initialize_x_y(self, z): ''' Return X, Y arrays such that contour(Z) will match imshow(Z) if origin is not None. The center of pixel Z[i,j] depends on origin: if origin is None, x = j, y = i; if origin is 'lower', x = j + 0.5, y = i + 0.5; if origin is 'upper', x = j + 0.5, y = Nrows - i - 0.5 If extent is not None, x and y will be scaled to match, as in imshow. If origin is None and extent is not None, then extent will give the minimum and maximum values of x and y. ''' if z.ndim != 2: raise TypeError("Input must be a 2D array.") else: Ny, Nx = z.shape if self.origin is None: # Not for image-matching. if self.extent is None: return np.meshgrid(np.arange(Nx), np.arange(Ny)) else: x0,x1,y0,y1 = self.extent x = np.linspace(x0, x1, Nx) y = np.linspace(y0, y1, Ny) return np.meshgrid(x, y) # Match image behavior: if self.extent is None: x0,x1,y0,y1 = (0, Nx, 0, Ny) else: x0,x1,y0,y1 = self.extent dx = float(x1 - x0)/Nx dy = float(y1 - y0)/Ny x = x0 + (np.arange(Nx) + 0.5) * dx y = y0 + (np.arange(Ny) + 0.5) * dy if self.origin == 'upper': y = y[::-1] return np.meshgrid(x,y) def _check_xyz(self, args): ''' For functions like contour, check that the dimensions of the input arrays match; if x and y are 1D, convert them to 2D using meshgrid. Possible change: I think we should make and use an ArgumentError Exception class (here and elsewhere). ''' # We can strip away the x and y units x = self.ax.convert_xunits( args[0] ) y = self.ax.convert_yunits( args[1] ) x = np.asarray(x, dtype=np.float64) y = np.asarray(y, dtype=np.float64) z = ma.asarray(args[2], dtype=np.float64) if z.ndim != 2: raise TypeError("Input z must be a 2D array.") else: Ny, Nx = z.shape if x.shape == z.shape and y.shape == z.shape: return x,y,z if x.ndim != 1 or y.ndim != 1: raise TypeError("Inputs x and y must be 1D or 2D.") nx, = x.shape ny, = y.shape if nx != Nx or ny != Ny: raise TypeError("Length of x must be number of columns in z,\n" + "and length of y must be number of rows.") x,y = np.meshgrid(x,y) return x,y,z def _contour_args(self, *args): if self.filled: fn = 'contourf' else: fn = 'contour' Nargs = len(args) if Nargs <= 2: z = ma.asarray(args[0], dtype=np.float64) x, y = self._initialize_x_y(z) elif Nargs <=4: x,y,z = self._check_xyz(args[:3]) else: raise TypeError("Too many arguments to %s; see help(%s)" % (fn,fn)) self.zmax = ma.maximum(z) self.zmin = ma.minimum(z) if self.logscale and self.zmin <= 0: z = ma.masked_where(z <= 0, z) warnings.warn('Log scale: values of z <=0 have been masked') self.zmin = z.min() self._auto = False if self.levels is None: if Nargs == 1 or Nargs == 3: lev = self._autolev(z, 7) else: # 2 or 4 args level_arg = args[-1] try: if type(level_arg) == int: lev = self._autolev(z, level_arg) else: lev = np.asarray(level_arg).astype(np.float64) except: raise TypeError( "Last %s arg must give levels; see help(%s)" % (fn,fn)) if self.filled and len(lev) < 2: raise ValueError("Filled contours require at least 2 levels.") # Workaround for cntr.c bug wrt masked interior regions: #if filled: # z = ma.masked_array(z.filled(-1e38)) # It's not clear this is any better than the original bug. self.levels = lev #if self._auto and self.extend in ('both', 'min', 'max'): # raise TypeError("Auto level selection is inconsistent " # + "with use of 'extend' kwarg") self._levels = list(self.levels) if self.extend in ('both', 'min'): self._levels.insert(0, min(self.levels[0],self.zmin) - 1) if self.extend in ('both', 'max'): self._levels.append(max(self.levels[-1],self.zmax) + 1) self._levels = np.asarray(self._levels) self.vmin = np.amin(self.levels) # alternative would be self.layers self.vmax = np.amax(self.levels) if self.extend in ('both', 'min'): self.vmin = 2 * self.levels[0] - self.levels[1] if self.extend in ('both', 'max'): self.vmax = 2 * self.levels[-1] - self.levels[-2] self.layers = self._levels # contour: a line is a thin layer if self.filled: self.layers = 0.5 * (self._levels[:-1] + self._levels[1:]) if self.extend in ('both', 'min'): self.layers[0] = 0.5 * (self.vmin + self._levels[1]) if self.extend in ('both', 'max'): self.layers[-1] = 0.5 * (self.vmax + self._levels[-2]) return (x, y, z) def _process_colors(self): """ Color argument processing for contouring. Note that we base the color mapping on the contour levels, not on the actual range of the Z values. This means we don't have to worry about bad values in Z, and we always have the full dynamic range available for the selected levels. The color is based on the midpoint of the layer, except for an extended end layers. """ self.monochrome = self.cmap.monochrome if self.colors is not None: i0, i1 = 0, len(self.layers) if self.extend in ('both', 'min'): i0 = -1 if self.extend in ('both', 'max'): i1 = i1 + 1 self.cvalues = range(i0, i1) self.set_norm(colors.NoNorm()) else: self.cvalues = self.layers if not self.norm.scaled(): self.set_clim(self.vmin, self.vmax) if self.extend in ('both', 'max', 'min'): self.norm.clip = False self.set_array(self.layers) # self.tcolors are set by the "changed" method def _process_linewidths(self): linewidths = self.linewidths Nlev = len(self.levels) if linewidths is None: tlinewidths = [(mpl.rcParams['lines.linewidth'],)] *Nlev else: if cbook.iterable(linewidths) and len(linewidths) < Nlev: linewidths = list(linewidths) * int(np.ceil(Nlev/len(linewidths))) elif not cbook.iterable(linewidths) and type(linewidths) in [int, float]: linewidths = [linewidths] * Nlev tlinewidths = [(w,) for w in linewidths] return tlinewidths def _process_linestyles(self): linestyles = self.linestyles Nlev = len(self.levels) if linestyles is None: tlinestyles = ['solid'] * Nlev else: if cbook.is_string_like(linestyles): tlinestyles = [linestyles] * Nlev elif cbook.iterable(linestyles) and len(linestyles) <= Nlev: tlinestyles = list(linestyles) * int(np.ceil(Nlev/len(linestyles))) return tlinestyles def get_alpha(self): '''returns alpha to be applied to all ContourSet artists''' return self.alpha def set_alpha(self, alpha): '''sets alpha for all ContourSet artists''' self.alpha = alpha self.changed() contour_doc = """ :func:`~matplotlib.pyplot.contour` and :func:`~matplotlib.pyplot.contourf` draw contour lines and filled contours, respectively. Except as noted, function signatures and return values are the same for both versions. :func:`~matplotlib.pyplot.contourf` differs from the Matlab (TM) version in that it does not draw the polygon edges, because the contouring engine yields simply connected regions with branch cuts. To draw the edges, add line contours with calls to :func:`~matplotlib.pyplot.contour`. call signatures:: contour(Z) make a contour plot of an array *Z*. The level values are chosen automatically. :: contour(X,Y,Z) *X*, *Y* specify the (*x*, *y*) coordinates of the surface :: contour(Z,N) contour(X,Y,Z,N) contour *N* automatically-chosen levels. :: contour(Z,V) contour(X,Y,Z,V) draw contour lines at the values specified in sequence *V* :: contourf(..., V) fill the (len(*V*)-1) regions between the values in *V* :: contour(Z, **kwargs) Use keyword args to control colors, linewidth, origin, cmap ... see below for more details. *X*, *Y*, and *Z* must be arrays with the same dimensions. *Z* may be a masked array, but filled contouring may not handle internal masked regions correctly. ``C = contour(...)`` returns a :class:`~matplotlib.contour.ContourSet` object. Optional keyword arguments: *colors*: [ None | string | (mpl_colors) ] If *None*, the colormap specified by cmap will be used. If a string, like 'r' or 'red', all levels will be plotted in this color. If a tuple of matplotlib color args (string, float, rgb, etc), different levels will be plotted in different colors in the order specified. *alpha*: float The alpha blending value *cmap*: [ None | Colormap ] A cm :class:`~matplotlib.cm.Colormap` instance or *None*. If *cmap* is *None* and *colors* is *None*, a default Colormap is used. *norm*: [ None | Normalize ] A :class:`matplotlib.colors.Normalize` instance for scaling data values to colors. If *norm* is *None* and *colors* is *None*, the default linear scaling is used. *origin*: [ None | 'upper' | 'lower' | 'image' ] If *None*, the first value of *Z* will correspond to the lower left corner, location (0,0). If 'image', the rc value for ``image.origin`` will be used. This keyword is not active if *X* and *Y* are specified in the call to contour. *extent*: [ None | (x0,x1,y0,y1) ] If *origin* is not *None*, then *extent* is interpreted as in :func:`matplotlib.pyplot.imshow`: it gives the outer pixel boundaries. In this case, the position of Z[0,0] is the center of the pixel, not a corner. If *origin* is *None*, then (*x0*, *y0*) is the position of Z[0,0], and (*x1*, *y1*) is the position of Z[-1,-1]. This keyword is not active if *X* and *Y* are specified in the call to contour. *locator*: [ None | ticker.Locator subclass ] If *locator* is None, the default :class:`~matplotlib.ticker.MaxNLocator` is used. The locator is used to determine the contour levels if they are not given explicitly via the *V* argument. *extend*: [ 'neither' | 'both' | 'min' | 'max' ] Unless this is 'neither', contour levels are automatically added to one or both ends of the range so that all data are included. These added ranges are then mapped to the special colormap values which default to the ends of the colormap range, but can be set via :meth:`matplotlib.cm.Colormap.set_under` and :meth:`matplotlib.cm.Colormap.set_over` methods. contour-only keyword arguments: *linewidths*: [ None | number | tuple of numbers ] If *linewidths* is *None*, the default width in ``lines.linewidth`` in ``matplotlibrc`` is used. If a number, all levels will be plotted with this linewidth. If a tuple, different levels will be plotted with different linewidths in the order specified *linestyles*: [None | 'solid' | 'dashed' | 'dashdot' | 'dotted' ] If *linestyles* is *None*, the 'solid' is used. *linestyles* can also be an iterable of the above strings specifying a set of linestyles to be used. If this iterable is shorter than the number of contour levels it will be repeated as necessary. If contour is using a monochrome colormap and the contour level is less than 0, then the linestyle specified in ``contour.negative_linestyle`` in ``matplotlibrc`` will be used. contourf-only keyword arguments: *antialiased*: [ True | False ] enable antialiasing *nchunk*: [ 0 | integer ] If 0, no subdivision of the domain. Specify a positive integer to divide the domain into subdomains of roughly *nchunk* by *nchunk* points. This may never actually be advantageous, so this option may be removed. Chunking introduces artifacts at the chunk boundaries unless *antialiased* is *False*. **Example:** .. plot:: mpl_examples/pylab_examples/contour_demo.py """ def find_nearest_contour( self, x, y, indices=None, pixel=True ): """ Finds contour that is closest to a point. Defaults to measuring distance in pixels (screen space - useful for manual contour labeling), but this can be controlled via a keyword argument. Returns a tuple containing the contour, segment, index of segment, x & y of segment point and distance to minimum point. Call signature:: conmin,segmin,imin,xmin,ymin,dmin = find_nearest_contour( self, x, y, indices=None, pixel=True ) Optional keyword arguments:: *indices*: Indexes of contour levels to consider when looking for nearest point. Defaults to using all levels. *pixel*: If *True*, measure distance in pixel space, if not, measure distance in axes space. Defaults to *True*. """ # This function uses a method that is probably quite # inefficient based on converting each contour segment to # pixel coordinates and then comparing the given point to # those coordinates for each contour. This will probably be # quite slow for complex contours, but for normal use it works # sufficiently well that the time is not noticeable. # Nonetheless, improvements could probably be made. if indices==None: indices = range(len(self.levels)) dmin = 1e10 conmin = None segmin = None xmin = None ymin = None for icon in indices: con = self.collections[icon] paths = con.get_paths() for segNum, linepath in enumerate(paths): lc = linepath.vertices # transfer all data points to screen coordinates if desired if pixel: lc = self.ax.transData.transform(lc) ds = (lc[:,0]-x)**2 + (lc[:,1]-y)**2 d = min( ds ) if d < dmin: dmin = d conmin = icon segmin = segNum imin = mpl.mlab.find( ds == d )[0] xmin = lc[imin,0] ymin = lc[imin,1] return (conmin,segmin,imin,xmin,ymin,dmin)
agpl-3.0
RafaelTorrealba/odoo
addons/stock_dropshipping/tests/test_invoicing.py
257
2284
# Author: Leonardo Pistone # Copyright 2015 Camptocamp SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from openerp.tests.common import TransactionCase class TestCreateInvoice(TransactionCase): def setUp(self): super(TestCreateInvoice, self).setUp() self.Wizard = self.env['stock.invoice.onshipping'] self.customer = self.env.ref('base.res_partner_3') product = self.env.ref('product.product_product_36') dropship_route = self.env.ref('stock_dropshipping.route_drop_shipping') self.so = self.env['sale.order'].create({ 'partner_id': self.customer.id, }) self.sol = self.env['sale.order.line'].create({ 'name': '/', 'order_id': self.so.id, 'product_id': product.id, 'route_id': dropship_route.id, }) def test_po_on_delivery_creates_correct_invoice(self): self.so.action_button_confirm() po = self.so.procurement_group_id.procurement_ids.purchase_id self.assertTrue(po) po.invoice_method = 'picking' po.signal_workflow('purchase_confirm') picking = po.picking_ids self.assertEqual(1, len(picking)) picking.action_done() wizard = self.Wizard.with_context({ 'active_id': picking.id, 'active_ids': [picking.id], }).create({}) invoice_ids = wizard.create_invoice() invoices = self.env['account.invoice'].browse(invoice_ids) self.assertEqual(1, len(invoices)) self.assertEqual(invoices.type, 'in_invoice') self.assertEqual(invoices, po.invoice_ids)
agpl-3.0
mlue/discordbridge
node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/lexer.py
47
27728
# -*- coding: utf-8 -*- """ pygments.lexer ~~~~~~~~~~~~~~ Base lexer classes. :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re, itertools from pygments.filter import apply_filters, Filter from pygments.filters import get_filter_by_name from pygments.token import Error, Text, Other, _TokenType from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \ make_analysator, text_type, add_metaclass, iteritems __all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer', 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this', 'default'] _encoding_map = [(b'\xef\xbb\xbf', 'utf-8'), (b'\xff\xfe\0\0', 'utf-32'), (b'\0\0\xfe\xff', 'utf-32be'), (b'\xff\xfe', 'utf-16'), (b'\xfe\xff', 'utf-16be')] _default_analyse = staticmethod(lambda x: 0.0) class LexerMeta(type): """ This metaclass automagically converts ``analyse_text`` methods into static methods which always return float values. """ def __new__(cls, name, bases, d): if 'analyse_text' in d: d['analyse_text'] = make_analysator(d['analyse_text']) return type.__new__(cls, name, bases, d) @add_metaclass(LexerMeta) class Lexer(object): """ Lexer for a specific language. Basic options recognized: ``stripnl`` Strip leading and trailing newlines from the input (default: True). ``stripall`` Strip all leading and trailing whitespace from the input (default: False). ``ensurenl`` Make sure that the input ends with a newline (default: True). This is required for some lexers that consume input linewise. .. versionadded:: 1.3 ``tabsize`` If given and greater than 0, expand tabs in the input (default: 0). ``encoding`` If given, must be an encoding name. This encoding will be used to convert the input string to Unicode, if it is not already a Unicode string (default: ``'latin1'``). Can also be ``'guess'`` to use a simple UTF-8 / Latin1 detection, or ``'chardet'`` to use the chardet library, if it is installed. """ #: Name of the lexer name = None #: Shortcuts for the lexer aliases = [] #: File name globs filenames = [] #: Secondary file name globs alias_filenames = [] #: MIME types mimetypes = [] #: Priority, should multiple lexers match and no content is provided priority = 0 def __init__(self, **options): self.options = options self.stripnl = get_bool_opt(options, 'stripnl', True) self.stripall = get_bool_opt(options, 'stripall', False) self.ensurenl = get_bool_opt(options, 'ensurenl', True) self.tabsize = get_int_opt(options, 'tabsize', 0) self.encoding = options.get('encoding', 'latin1') # self.encoding = options.get('inencoding', None) or self.encoding self.filters = [] for filter_ in get_list_opt(options, 'filters', ()): self.add_filter(filter_) def __repr__(self): if self.options: return '<pygments.lexers.%s with %r>' % (self.__class__.__name__, self.options) else: return '<pygments.lexers.%s>' % self.__class__.__name__ def add_filter(self, filter_, **options): """ Add a new stream filter to this lexer. """ if not isinstance(filter_, Filter): filter_ = get_filter_by_name(filter_, **options) self.filters.append(filter_) def analyse_text(text): """ Has to return a float between ``0`` and ``1`` that indicates if a lexer wants to highlight this text. Used by ``guess_lexer``. If this method returns ``0`` it won't highlight it in any case, if it returns ``1`` highlighting with this lexer is guaranteed. The `LexerMeta` metaclass automatically wraps this function so that it works like a static method (no ``self`` or ``cls`` parameter) and the return value is automatically converted to `float`. If the return value is an object that is boolean `False` it's the same as if the return values was ``0.0``. """ def get_tokens(self, text, unfiltered=False): """ Return an iterable of (tokentype, value) pairs generated from `text`. If `unfiltered` is set to `True`, the filtering mechanism is bypassed even if filters are defined. Also preprocess the text, i.e. expand tabs and strip it if wanted and applies registered filters. """ if not isinstance(text, text_type): if self.encoding == 'guess': try: text = text.decode('utf-8') if text.startswith(u'\ufeff'): text = text[len(u'\ufeff'):] except UnicodeDecodeError: text = text.decode('latin1') elif self.encoding == 'chardet': try: import chardet except ImportError: raise ImportError('To enable chardet encoding guessing, ' 'please install the chardet library ' 'from http://chardet.feedparser.org/') # check for BOM first decoded = None for bom, encoding in _encoding_map: if text.startswith(bom): decoded = text[len(bom):].decode(encoding, 'replace') break # no BOM found, so use chardet if decoded is None: enc = chardet.detect(text[:1024]) # Guess using first 1KB decoded = text.decode(enc.get('encoding') or 'utf-8', 'replace') text = decoded else: text = text.decode(self.encoding) if text.startswith(u'\ufeff'): text = text[len(u'\ufeff'):] else: if text.startswith(u'\ufeff'): text = text[len(u'\ufeff'):] # text now *is* a unicode string text = text.replace('\r\n', '\n') text = text.replace('\r', '\n') if self.stripall: text = text.strip() elif self.stripnl: text = text.strip('\n') if self.tabsize > 0: text = text.expandtabs(self.tabsize) if self.ensurenl and not text.endswith('\n'): text += '\n' def streamer(): for i, t, v in self.get_tokens_unprocessed(text): yield t, v stream = streamer() if not unfiltered: stream = apply_filters(stream, self.filters, self) return stream def get_tokens_unprocessed(self, text): """ Return an iterable of (index, tokentype, value) pairs where "index" is the starting position of the token within the input text. In subclasses, implement this method as a generator to maximize effectiveness. """ raise NotImplementedError class DelegatingLexer(Lexer): """ This lexer takes two lexer as arguments. A root lexer and a language lexer. First everything is scanned using the language lexer, afterwards all ``Other`` tokens are lexed using the root lexer. The lexers from the ``template`` lexer package use this base lexer. """ def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options): self.root_lexer = _root_lexer(**options) self.language_lexer = _language_lexer(**options) self.needle = _needle Lexer.__init__(self, **options) def get_tokens_unprocessed(self, text): buffered = '' insertions = [] lng_buffer = [] for i, t, v in self.language_lexer.get_tokens_unprocessed(text): if t is self.needle: if lng_buffer: insertions.append((len(buffered), lng_buffer)) lng_buffer = [] buffered += v else: lng_buffer.append((i, t, v)) if lng_buffer: insertions.append((len(buffered), lng_buffer)) return do_insertions(insertions, self.root_lexer.get_tokens_unprocessed(buffered)) #------------------------------------------------------------------------------- # RegexLexer and ExtendedRegexLexer # class include(str): """ Indicates that a state should include rules from another state. """ pass class _inherit(object): """ Indicates the a state should inherit from its superclass. """ def __repr__(self): return 'inherit' inherit = _inherit() class combined(tuple): """ Indicates a state combined from multiple states. """ def __new__(cls, *args): return tuple.__new__(cls, args) def __init__(self, *args): # tuple.__init__ doesn't do anything pass class _PseudoMatch(object): """ A pseudo match object constructed from a string. """ def __init__(self, start, text): self._text = text self._start = start def start(self, arg=None): return self._start def end(self, arg=None): return self._start + len(self._text) def group(self, arg=None): if arg: raise IndexError('No such group') return self._text def groups(self): return (self._text,) def groupdict(self): return {} def bygroups(*args): """ Callback that yields multiple actions for each group in the match. """ def callback(lexer, match, ctx=None): for i, action in enumerate(args): if action is None: continue elif type(action) is _TokenType: data = match.group(i + 1) if data: yield match.start(i + 1), action, data else: data = match.group(i + 1) if data is not None: if ctx: ctx.pos = match.start(i + 1) for item in action(lexer, _PseudoMatch(match.start(i + 1), data), ctx): if item: yield item if ctx: ctx.pos = match.end() return callback class _This(object): """ Special singleton used for indicating the caller class. Used by ``using``. """ this = _This() def using(_other, **kwargs): """ Callback that processes the match with a different lexer. The keyword arguments are forwarded to the lexer, except `state` which is handled separately. `state` specifies the state that the new lexer will start in, and can be an enumerable such as ('root', 'inline', 'string') or a simple string which is assumed to be on top of the root state. Note: For that to work, `_other` must not be an `ExtendedRegexLexer`. """ gt_kwargs = {} if 'state' in kwargs: s = kwargs.pop('state') if isinstance(s, (list, tuple)): gt_kwargs['stack'] = s else: gt_kwargs['stack'] = ('root', s) if _other is this: def callback(lexer, match, ctx=None): # if keyword arguments are given the callback # function has to create a new lexer instance if kwargs: # XXX: cache that somehow kwargs.update(lexer.options) lx = lexer.__class__(**kwargs) else: lx = lexer s = match.start() for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): yield i + s, t, v if ctx: ctx.pos = match.end() else: def callback(lexer, match, ctx=None): # XXX: cache that somehow kwargs.update(lexer.options) lx = _other(**kwargs) s = match.start() for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): yield i + s, t, v if ctx: ctx.pos = match.end() return callback class default: """ Indicates a state or state action (e.g. #pop) to apply. For example default('#pop') is equivalent to ('', Token, '#pop') Note that state tuples may be used as well """ def __init__(self, state): self.state = state class RegexLexerMeta(LexerMeta): """ Metaclass for RegexLexer, creates the self._tokens attribute from self.tokens on the first instantiation. """ def _process_regex(cls, regex, rflags): """Preprocess the regular expression component of a token definition.""" return re.compile(regex, rflags).match def _process_token(cls, token): """Preprocess the token component of a token definition.""" assert type(token) is _TokenType or callable(token), \ 'token type must be simple type or callable, not %r' % (token,) return token def _process_new_state(cls, new_state, unprocessed, processed): """Preprocess the state transition action of a token definition.""" if isinstance(new_state, str): # an existing state if new_state == '#pop': return -1 elif new_state in unprocessed: return (new_state,) elif new_state == '#push': return new_state elif new_state[:5] == '#pop:': return -int(new_state[5:]) else: assert False, 'unknown new state %r' % new_state elif isinstance(new_state, combined): # combine a new state from existing ones tmp_state = '_tmp_%d' % cls._tmpname cls._tmpname += 1 itokens = [] for istate in new_state: assert istate != new_state, 'circular state ref %r' % istate itokens.extend(cls._process_state(unprocessed, processed, istate)) processed[tmp_state] = itokens return (tmp_state,) elif isinstance(new_state, tuple): # push more than one state for istate in new_state: assert (istate in unprocessed or istate in ('#pop', '#push')), \ 'unknown new state ' + istate return new_state else: assert False, 'unknown new state def %r' % new_state def _process_state(cls, unprocessed, processed, state): """Preprocess a single state definition.""" assert type(state) is str, "wrong state name %r" % state assert state[0] != '#', "invalid state name %r" % state if state in processed: return processed[state] tokens = processed[state] = [] rflags = cls.flags for tdef in unprocessed[state]: if isinstance(tdef, include): # it's a state reference assert tdef != state, "circular state reference %r" % state tokens.extend(cls._process_state(unprocessed, processed, str(tdef))) continue if isinstance(tdef, _inherit): # processed already continue if isinstance(tdef, default): new_state = cls._process_new_state(tdef.state, unprocessed, processed) tokens.append((re.compile('').match, None, new_state)) continue assert type(tdef) is tuple, "wrong rule def %r" % tdef try: rex = cls._process_regex(tdef[0], rflags) except Exception as err: raise ValueError("uncompilable regex %r in state %r of %r: %s" % (tdef[0], state, cls, err)) token = cls._process_token(tdef[1]) if len(tdef) == 2: new_state = None else: new_state = cls._process_new_state(tdef[2], unprocessed, processed) tokens.append((rex, token, new_state)) return tokens def process_tokendef(cls, name, tokendefs=None): """Preprocess a dictionary of token definitions.""" processed = cls._all_tokens[name] = {} tokendefs = tokendefs or cls.tokens[name] for state in list(tokendefs): cls._process_state(tokendefs, processed, state) return processed def get_tokendefs(cls): """ Merge tokens from superclasses in MRO order, returning a single tokendef dictionary. Any state that is not defined by a subclass will be inherited automatically. States that *are* defined by subclasses will, by default, override that state in the superclass. If a subclass wishes to inherit definitions from a superclass, it can use the special value "inherit", which will cause the superclass' state definition to be included at that point in the state. """ tokens = {} inheritable = {} for c in itertools.chain((cls,), cls.__mro__): toks = c.__dict__.get('tokens', {}) for state, items in iteritems(toks): curitems = tokens.get(state) if curitems is None: tokens[state] = items try: inherit_ndx = items.index(inherit) except ValueError: continue inheritable[state] = inherit_ndx continue inherit_ndx = inheritable.pop(state, None) if inherit_ndx is None: continue # Replace the "inherit" value with the items curitems[inherit_ndx:inherit_ndx+1] = items try: new_inh_ndx = items.index(inherit) except ValueError: pass else: inheritable[state] = inherit_ndx + new_inh_ndx return tokens def __call__(cls, *args, **kwds): """Instantiate cls after preprocessing its token definitions.""" if '_tokens' not in cls.__dict__: cls._all_tokens = {} cls._tmpname = 0 if hasattr(cls, 'token_variants') and cls.token_variants: # don't process yet pass else: cls._tokens = cls.process_tokendef('', cls.get_tokendefs()) return type.__call__(cls, *args, **kwds) @add_metaclass(RegexLexerMeta) class RegexLexer(Lexer): """ Base for simple stateful regular expression-based lexers. Simplifies the lexing process so that you need only provide a list of states and regular expressions. """ #: Flags for compiling the regular expressions. #: Defaults to MULTILINE. flags = re.MULTILINE #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}`` #: #: The initial state is 'root'. #: ``new_state`` can be omitted to signify no state transition. #: If it is a string, the state is pushed on the stack and changed. #: If it is a tuple of strings, all states are pushed on the stack and #: the current state will be the topmost. #: It can also be ``combined('state1', 'state2', ...)`` #: to signify a new, anonymous state combined from the rules of two #: or more existing ones. #: Furthermore, it can be '#pop' to signify going back one step in #: the state stack, or '#push' to push the current state on the stack #: again. #: #: The tuple can also be replaced with ``include('state')``, in which #: case the rules from the state named by the string are included in the #: current one. tokens = {} def get_tokens_unprocessed(self, text, stack=('root',)): """ Split ``text`` into (tokentype, text) pairs. ``stack`` is the inital stack (default: ``['root']``) """ pos = 0 tokendefs = self._tokens statestack = list(stack) statetokens = tokendefs[statestack[-1]] while 1: for rexmatch, action, new_state in statetokens: m = rexmatch(text, pos) if m: if action is not None: if type(action) is _TokenType: yield pos, action, m.group() else: for item in action(self, m): yield item pos = m.end() if new_state is not None: # state transition if isinstance(new_state, tuple): for state in new_state: if state == '#pop': statestack.pop() elif state == '#push': statestack.append(statestack[-1]) else: statestack.append(state) elif isinstance(new_state, int): # pop del statestack[new_state:] elif new_state == '#push': statestack.append(statestack[-1]) else: assert False, "wrong state def: %r" % new_state statetokens = tokendefs[statestack[-1]] break else: try: if text[pos] == '\n': # at EOL, reset state to "root" statestack = ['root'] statetokens = tokendefs['root'] yield pos, Text, u'\n' pos += 1 continue yield pos, Error, text[pos] pos += 1 except IndexError: break class LexerContext(object): """ A helper object that holds lexer position data. """ def __init__(self, text, pos, stack=None, end=None): self.text = text self.pos = pos self.end = end or len(text) # end=0 not supported ;-) self.stack = stack or ['root'] def __repr__(self): return 'LexerContext(%r, %r, %r)' % ( self.text, self.pos, self.stack) class ExtendedRegexLexer(RegexLexer): """ A RegexLexer that uses a context object to store its state. """ def get_tokens_unprocessed(self, text=None, context=None): """ Split ``text`` into (tokentype, text) pairs. If ``context`` is given, use this lexer context instead. """ tokendefs = self._tokens if not context: ctx = LexerContext(text, 0) statetokens = tokendefs['root'] else: ctx = context statetokens = tokendefs[ctx.stack[-1]] text = ctx.text while 1: for rexmatch, action, new_state in statetokens: m = rexmatch(text, ctx.pos, ctx.end) if m: if action is not None: if type(action) is _TokenType: yield ctx.pos, action, m.group() ctx.pos = m.end() else: for item in action(self, m, ctx): yield item if not new_state: # altered the state stack? statetokens = tokendefs[ctx.stack[-1]] # CAUTION: callback must set ctx.pos! if new_state is not None: # state transition if isinstance(new_state, tuple): for state in new_state: if state == '#pop': ctx.stack.pop() elif state == '#push': ctx.stack.append(ctx.stack[-1]) else: ctx.stack.append(state) elif isinstance(new_state, int): # pop del ctx.stack[new_state:] elif new_state == '#push': ctx.stack.append(ctx.stack[-1]) else: assert False, "wrong state def: %r" % new_state statetokens = tokendefs[ctx.stack[-1]] break else: try: if ctx.pos >= ctx.end: break if text[ctx.pos] == '\n': # at EOL, reset state to "root" ctx.stack = ['root'] statetokens = tokendefs['root'] yield ctx.pos, Text, u'\n' ctx.pos += 1 continue yield ctx.pos, Error, text[ctx.pos] ctx.pos += 1 except IndexError: break def do_insertions(insertions, tokens): """ Helper for lexers which must combine the results of several sublexers. ``insertions`` is a list of ``(index, itokens)`` pairs. Each ``itokens`` iterable should be inserted at position ``index`` into the token stream given by the ``tokens`` argument. The result is a combined token stream. TODO: clean up the code here. """ insertions = iter(insertions) try: index, itokens = next(insertions) except StopIteration: # no insertions for item in tokens: yield item return realpos = None insleft = True # iterate over the token stream where we want to insert # the tokens from the insertion list. for i, t, v in tokens: # first iteration. store the postition of first item if realpos is None: realpos = i oldi = 0 while insleft and i + len(v) >= index: tmpval = v[oldi:index - i] yield realpos, t, tmpval realpos += len(tmpval) for it_index, it_token, it_value in itokens: yield realpos, it_token, it_value realpos += len(it_value) oldi = index - i try: index, itokens = next(insertions) except StopIteration: insleft = False break # not strictly necessary yield realpos, t, v[oldi:] realpos += len(v) - oldi # leftover tokens while insleft: # no normal tokens, set realpos to zero realpos = realpos or 0 for p, t, v in itokens: yield realpos, t, v realpos += len(v) try: index, itokens = next(insertions) except StopIteration: insleft = False break # not strictly necessary
mit
lsbardel/flow
flow/db/instdata/management/commands/cleaninst.py
1
1465
''' Remove badly formed positions from database ''' import os import re from django.contrib.contenttypes.management import update_all_contenttypes from django.core.management.base import copy_helper, CommandError, BaseCommand from django.utils.importlib import import_module from jflow.db.instdata.models import DataId from jflow.db.trade.models import Position class Command(BaseCommand): help = "Clean prospero instruments" def handle(self, *args, **options): tr = 0 ids = DataId.objects.exclude(isin = '') isin = {} for id in ids: nid = isin.get(id.isin,None) if not nid: isin[id.isin] = id else: fc = id.firm_code or nid.firm_code did = nid if nid.instrument: did = id id = nid id.firm_code = fc id.save() pos = Position.objects.filter(dataid = did) for p in pos: pc = Position.objects.filter(dataid = id, dt = p.dt, fund = p.fund) if not pc: p.dataid = id p.save() did.delete() tr += 1 if tr: print('Removed %s dataids' % tr) else: print('Data ids were OK')
bsd-3-clause
aricaldeira/PySPED
pysped/nfe/leiaute/conssitnfe_400.py
2
4578
# -*- coding: utf-8 -*- # # PySPED - Python libraries to deal with Brazil's SPED Project # # Copyright (C) 3100-3102 # Copyright (C) Aristides Caldeira <aristides.caldeira at tauga.com.br> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # PySPED - Bibliotecas Python para o # SPED - Sistema Público de Escrituração Digital # # Copyright (C) 3100-3102 # Copyright (C) Aristides Caldeira <aristides.caldeira arroba tauga.com.br> # # Este programa é um software livre: você pode redistribuir e/ou modificar # este programa sob os termos da licença GNU Affero General Public License, # publicada pela Free Software Foundation, em sua versão 3 ou, de acordo # com sua opção, qualquer versão posterior. # # Este programa é distribuido na esperança de que venha a ser útil, # porém SEM QUAISQUER GARANTIAS, nem mesmo a garantia implícita de # COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Veja a # GNU Affero General Public License para mais detalhes. # # Você deve ter recebido uma cópia da GNU Affero General Public License # juntamente com este programa. Caso esse não seja o caso, acesse: # <http://www.gnu.org/licenses/> # from __future__ import division, print_function, unicode_literals import os from pysped.xml_sped import * from pysped.nfe.leiaute import ESQUEMA_ATUAL_VERSAO_4 as ESQUEMA_ATUAL from pysped.nfe.leiaute import ProtNFe_400 from pysped.nfe.leiaute import ProcEventoCancNFe_100, ProcEventoCCe_100 from pysped.nfe.leiaute import ProcEventoConfRecebimento_100, ProcEvento_100 from pysped.nfe.leiaute import conssitnfe_310 DIRNAME = os.path.dirname(__file__) class ConsSitNFe(conssitnfe_310.ConsSitNFe): def __init__(self): super(ConsSitNFe, self).__init__() self.versao = TagDecimal(nome='consSitNFe', codigo='EP01', propriedade='versao', namespace=NAMESPACE_NFE, valor='4.00', raiz='/') self.caminho_esquema = os.path.join(DIRNAME, 'schema', ESQUEMA_ATUAL + '/') self.arquivo_esquema = 'consSitNFe_v4.00.xsd' class RetConsSitNFe(conssitnfe_310.RetConsSitNFe): def __init__(self): super(RetConsSitNFe, self).__init__() self.versao = TagDecimal(nome='retConsSitNFe', codigo='ER01', propriedade='versao', namespace=NAMESPACE_NFE, valor='4.00', raiz='/') self.dhRecbto = TagDataHoraUTC(nome='dhRecbto', codigo='FR08', raiz='//retConsSitNFe', obrigatorio=False) self.caminho_esquema = os.path.join(DIRNAME, 'schema', ESQUEMA_ATUAL + '/') self.arquivo_esquema = 'retConsSitNFe_v4.00.xsd' def get_xml(self): xml = XMLNFe.get_xml(self) xml += ABERTURA xml += self.versao.xml xml += self.tpAmb.xml xml += self.verAplic.xml xml += self.cStat.xml xml += self.xMotivo.xml xml += self.cUF.xml xml += self.dhRecbto.xml xml += self.chNFe.xml if self.protNFe is not None: xml += self.protNFe.xml if self.retCancNFe is not None: xml += tira_abertura(self.retCancNFe.xml) if self.procEventoNFe is not None: for pen in self.procEventoNFe: xml += tira_abertura(pen.xml) xml += '</retConsSitNFe>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.versao.xml = arquivo self.tpAmb.xml = arquivo self.verAplic.xml = arquivo self.cStat.xml = arquivo self.xMotivo.xml = arquivo self.cUF.xml = arquivo self.dhRecbto.xml = arquivo self.chNFe.xml = arquivo if self._le_noh('//retConsSitNFe/protNFe') is not None: self.protNFe = ProtNFe_400() self.protNFe.xml = arquivo if self._le_nohs('//retConsSitNFe/procEventoNFe') is not None: self.procEventoNFe = self.le_grupo('//retConsSitNFe/procEventoNFe') xml = property(get_xml, set_xml)
lgpl-2.1
devs1991/test_edx_docmode
lms/djangoapps/certificates/migrations/0004_certificategenerationhistory.py
46
1349
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import django.utils.timezone from django.conf import settings import model_utils.fields import xmodule_django.models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('instructor_task', '0001_initial'), ('certificates', '0003_data__default_modes'), ] operations = [ migrations.CreateModel( name='CertificateGenerationHistory', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)), ('course_id', xmodule_django.models.CourseKeyField(max_length=255)), ('is_regeneration', models.BooleanField(default=False)), ('generated_by', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ('instructor_task', models.ForeignKey(to='instructor_task.InstructorTask')), ], ), ]
agpl-3.0
ashwyn/eden-message_parser
modules/geopy/point.py
26
10573
import re from itertools import islice from geopy import util, units, format class Point(object): """ A geodetic point with latitude, longitude, and altitude. Latitude and longitude are floating point values in degrees. Altitude is a floating point value in kilometers. The reference level is never considered and is thus application dependent, so be consistent! The default for all values is 0. Points can be created in a number of ways... With longitude, latitude, and altitude: >>> p1 = Point(41.5, -81, 0) >>> p2 = Point(latitude=41.5, longitude=-81) With a sequence of 0 to 3 values (longitude, latitude, altitude): >>> p1 = Point([41.5, -81, 0]) >>> p2 = Point((41.5, -81)) Copy another `Point` instance: >>> p2 = Point(p1) >>> p2 == p1 True >>> p2 is p1 False Give an object with a 'point' attribute, such as a `Location` instance: >>> p = Point(location) Give a string containing at least latitude and longitude: >>> p1 = Point('41.5,-81.0') >>> p2 = Point('41.5 N -81.0 W') >>> p3 = Point('-41.5 S, 81.0 E, 2.5km') >>> p4 = Point('23 26m 22s N 23 27m 30s E 21.0mi') >>> p5 = Point('''3 26' 22" N 23 27' 30" E''') Point values can be accessed by name or by index: >>> p = Point(41.5, -81.0, 0) >>> p.latitude == p[0] True >>> p.longitude == p[1] True >>> p.altitude == p[2] True When unpacking (or iterating), only latitude and longitude are included: >>> latitude, longitude = p """ UTIL_PATTERNS = dict( FLOAT=r'\d+(?:\.\d+)?', DEGREE=format.DEGREE, PRIME=format.PRIME, DOUBLE_PRIME=format.DOUBLE_PRIME, SEP=r'\s*[,;\s]\s*' ) POINT_PATTERN = re.compile(r""" \s* (?P<latitude> (?P<latitude_degrees>-?%(FLOAT)s)(?:[%(DEGREE)s ][ ]* (?:(?P<latitude_arcminutes>%(FLOAT)s)[%(PRIME)s'm][ ]*)? (?:(?P<latitude_arcseconds>%(FLOAT)s)[%(DOUBLE_PRIME)s"s][ ]*)? )?(?P<latitude_direction>[NS])?) %(SEP)s (?P<longitude> (?P<longitude_degrees>-?%(FLOAT)s)(?:[%(DEGREE)s\s][ ]* (?:(?P<longitude_arcminutes>%(FLOAT)s)[%(PRIME)s'm][ ]*)? (?:(?P<longitude_arcseconds>%(FLOAT)s)[%(DOUBLE_PRIME)s"s][ ]*)? )?(?P<longitude_direction>[EW])?)(?: %(SEP)s (?P<altitude> (?P<altitude_distance>-?%(FLOAT)s)[ ]* (?P<altitude_units>km|m|mi|ft|nm|nmi)))? \s*$ """ % UTIL_PATTERNS, re.X) def __new__(cls, latitude=None, longitude=None, altitude=None): single_arg = longitude is None and altitude is None if single_arg and not isinstance(latitude, util.NUMBER_TYPES): arg = latitude if arg is None: pass elif isinstance(arg, Point): return cls.from_point(arg) elif isinstance(arg, basestring): return cls.from_string(arg) else: try: seq = iter(arg) except TypeError: raise TypeError( "Failed to create Point instance from %r." % (arg,) ) else: return cls.from_sequence(seq) latitude = float(latitude or 0) if abs(latitude) > 90: raise ValueError("Latitude out of range [-90, 90]: %r" % latitude) longitude = float(longitude or 0) if abs(longitude) > 180: raise ValueError("Longitude out of range [-180, 180]: %r" % longitude) altitude = float(altitude or 0) self = super(Point, cls).__new__(cls) self.latitude = latitude self.longitude = longitude self.altitude = altitude return self def __getitem__(self, index): return (self.latitude, self.longitude, self.altitude)[index] def __setitem__(self, index, value): point = [self.latitude, self.longitude, self.altitude] point[index] = value self.latitude, self.longitude, self.altitude = point def __iter__(self): return iter((self.latitude, self.longitude, self.altitude)) def __repr__(self): return "Point(%r, %r, %r)" % ( self.latitude, self.longitude, self.altitude ) def format(self, altitude=None, deg_char='', min_char='m', sec_char='s'): latitude = "%s %s" % ( format.angle(abs(self.latitude), deg_char, min_char, sec_char), self.latitude >= 0 and 'N' or 'S' ) longitude = "%s %s" % ( format.angle(abs(self.longitude), deg_char, min_char, sec_char), self.longitude >= 0 and 'E' or 'W' ) coordinates = [latitude, longitude] if altitude is None: altitude = bool(self.altitude) if altitude: if not isinstance(altitude, basestring): altitude = 'km' coordinates.append(self.format_altitude(altitude)) return ", ".join(coordinates) def format_decimal(self, altitude=None): latitude = "%s" % self.latitude longitude = "%s" % self.longitude coordinates = [latitude, longitude] if altitude is None: altitude = bool(self.altitude) if altitude: if not isinstance(altitude, basestring): altitude = 'km' coordinates.append(self.format_altitude(altitude)) return ", ".join(coordinates) def format_altitude(self, unit='km'): return format.distance(self.altitude, unit) def __str__(self): return self.format() def __unicode__(self): return self.format( None, format.DEGREE, format.PRIME, format.DOUBLE_PRIME ) def __eq__(self, other): return tuple(self) == tuple(other) def __ne__(self, other): return tuple(self) != tuple(other) @classmethod def parse_degrees(cls, degrees, arcminutes, arcseconds, direction=None): negative = degrees < 0 or degrees.startswith('-') degrees = float(degrees or 0) arcminutes = float(arcminutes or 0) arcseconds = float(arcseconds or 0) if arcminutes or arcseconds: more = units.degrees(arcminutes=arcminutes, arcseconds=arcseconds) if negative: degrees -= more else: degrees += more if direction in [None, 'N', 'E']: return degrees elif direction in ['S', 'W']: return -degrees else: raise ValueError("Invalid direction! Should be one of [NSEW].") @classmethod def parse_altitude(cls, distance, unit): if distance is not None: distance = float(distance) CONVERTERS = { 'km': lambda d: d, 'm': lambda d: units.kilometers(meters=d), 'mi': lambda d: units.kilometers(miles=d), 'ft': lambda d: units.kilometers(feet=d), 'nm': lambda d: units.kilometers(nautical=d), 'nmi': lambda d: units.kilometers(nautical=d) } return CONVERTERS[unit](distance) else: return distance @classmethod def from_string(cls, string): """ Create and return a Point instance from a string containing latitude and longitude, and optionally, altitude. Latitude and longitude must be in degrees and may be in decimal form or indicate arcminutes and arcseconds (labeled with Unicode prime and double prime, ASCII quote and double quote or 'm' and 's'). The degree symbol is optional and may be included after the decimal places (in decimal form) and before the arcminutes and arcseconds otherwise. Coordinates given from south and west (indicated by S and W suffixes) will be converted to north and east by switching their signs. If no (or partial) cardinal directions are given, north and east are the assumed directions. Latitude and longitude must be separated by at least whitespace, a comma, or a semicolon (each with optional surrounding whitespace). Altitude, if supplied, must be a decimal number with given units. The following unit abbrevations (case-insensitive) are supported: km (kilometers) m (meters) mi (miles) ft (feet) nm, nmi (nautical miles) Some example strings the will work include: 41.5;-81.0 41.5,-81.0 41.5 -81.0 41.5 N -81.0 W -41.5 S;81.0 E 23 26m 22s N 23 27m 30s E 23 26' 22" N 23 27' 30" E """ match = re.match(cls.POINT_PATTERN, string) if match: latitude = cls.parse_degrees( match.group('latitude_degrees'), match.group('latitude_arcminutes'), match.group('latitude_arcseconds'), match.group('latitude_direction') ) longitude = cls.parse_degrees( match.group('longitude_degrees'), match.group('longitude_arcminutes'), match.group('longitude_arcseconds'), match.group('longitude_direction'), ) altitude = cls.parse_altitude( match.group('altitude_distance'), match.group('altitude_units') ) return cls(latitude, longitude, altitude) else: raise ValueError( "Failed to create Point instance from string: unknown format." ) @classmethod def from_sequence(cls, seq): """ Create and return a new Point instance from any iterable with 0 to 3 elements. The elements, if present, must be latitude, longitude, and altitude, respectively. """ args = tuple(islice(seq, 4)) return cls(*args) @classmethod def from_point(cls, point): """ Create and return a new Point instance from another Point instance. """ return cls(point.latitude, point.longitude, point.altitude)
mit
maxime-beck/compassion-modules
child_compassion/mappings/child_assessment_mapping.py
4
1583
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2016 Compassion CH (http://www.compassion.ch) # Releasing children from poverty in Jesus' name # @author: Maxime Beck <mbcompte@gmail.com> # # The licence is in the file __manifest__.py # ############################################################################## from odoo.addons.message_center_compassion.mappings.base_mapping import \ OnrampMapping from datetime import datetime class ChildAssessmentMapping(OnrampMapping): """ Child Assessment Mapping """ ODOO_MODEL = 'compassion.child.cdpr' MAPPING_NAME = 'beneficiary_cdpr' CONNECT_MAPPING = { 'AssesmentType': 'assesment_type', 'CognitiveOutcomeScore': 'cognitive_score', 'CompletionDate': 'date', 'PhysicalOutcomeScore': 'physical_score', 'SocioEmotionalOutcomeScore': 'sociological_score', 'SpiritualOutcomeScore': 'spiritual_score', 'Age': 'age', 'Beneficiary_GlobalID': ('child_id.global_id', 'compassion.child'), 'CDPRAgeGroup': 'cdpr_age_group', 'SourceKitName': 'source_kit_name', } def _process_connect_data(self, connect_data): # Set end date to correct format for Connect if 'CompletionDate' in connect_data: endDateStr = connect_data.get('CompletionDate') endDate = datetime.strptime(endDateStr, "%Y-%m-%d %H:%M:%S") connect_data['CompletionDate'] = endDate.strftime( "%Y-%m-%dT%H:%M:%SZ")
agpl-3.0
SUSE/teuthology
teuthology/orchestra/daemon/cephadmunit.py
1
5540
import logging from teuthology.orchestra.daemon.state import DaemonState log = logging.getLogger(__name__) class CephadmUnit(DaemonState): def __init__(self, remote, role, id_, *command_args, **command_kwargs): super(CephadmUnit, self).__init__( remote, role, id_, *command_args, **command_kwargs) self._set_commands() self.log = command_kwargs.get('logger', log) self.use_cephadm = command_kwargs.get('use_cephadm') self.is_started = command_kwargs.get('started', False) if self.is_started: self._start_logger() def name(self): return '%s.%s' % (self.type_, self.id_) def _get_systemd_cmd(self, action): return ' '.join([ 'sudo', 'systemctl', action, 'ceph-%s@%s.%s' % (self.fsid, self.type_, self.id_), ]) def _set_commands(self): self.start_cmd = self._get_systemd_cmd('start') self.stop_cmd = self._get_systemd_cmd('stop') self.restart_cmd = self._get_systemd_cmd('restart') self.show_cmd = self._get_systemd_cmd('show') self.status_cmd = self._get_systemd_cmd('status') def kill_cmd(self, sig): return ' '.join([ 'sudo', 'docker', 'kill', '-s', str(int(sig)), 'ceph-%s-%s.%s' % (self.fsid, self.type_, self.id_), ]) def _start_logger(self): name = '%s.%s' % (self.type_, self.id_) #self.log.info('_start_logger %s' % name) self.remote_logger = self.remote.run( args=['sudo', 'journalctl', '-f', '-n', '0', '-u', 'ceph-%s@%s.service' % (self.fsid, name) ], logger=logging.getLogger('journalctl@' + self.cluster + '.' + name), label=name, wait=False, check_status=False, ) def _stop_logger(self): name = '%s.%s' % (self.type_, self.id_) # this is a horrible kludge, since i don't know how else to kill # the journalctl process at the other end :( #self.log.info('_stop_logger %s running pkill' % name) self.remote.run( args=['sudo', 'pkill', '-f', ' '.join(['journalctl', '-f', '-n', '0', '-u', 'ceph-%s@%s.service' % (self.fsid, name)]), ], check_status=False, ) #self.log.info('_stop_logger %s waiting') self.remote_logger.wait() self.remote_logger = None #self.log.info('_stop_logger done') def reset(self): """ Does nothing in this implementation """ pass def restart(self, *args, **kwargs): """ Restart with a new command passed in the arguments :param args: positional arguments passed to remote.run :param kwargs: keyword arguments passed to remote.run """ if not self.running(): self.log.info('Restarting %s (starting--it wasn\'t running)...' % self.name()) self._start_logger() self.remote.sh(self.start_cmd) self.is_started = True else: self.log.info('Restarting %s...' % self.name()) self.remote.sh(self.restart_cmd) def restart_with_args(self, extra_args): """ Restart, adding new paramaters to the current command. :param extra_args: Extra keyword arguments to be added. """ raise NotImplementedError def running(self): """ Are we running? """ return self.is_started def signal(self, sig, silent=False): """ Send a signal to associated remote command :param sig: signal to send """ if not silent: self.log.info('Senging signal %d to %s...' % (sig, self.name())) self.remote.sh(self.kill_cmd(sig)) def start(self, timeout=300): """ Start this daemon instance. """ if self.running(): self.log.warn('Restarting a running daemon') self.restart() return self._start_logger() self.remote.run(self.start_cmd) def stop(self, timeout=300): """ Stop this daemon instance. Note: this can raise a CommandFailedError, CommandCrashedError, or ConnectionLostError. :param timeout: timeout to pass to orchestra.run.wait() """ if not self.running(): self.log.error('Tried to stop a non-running daemon') return self.log.info('Stopping %s...' % self.name()) self.remote.sh(self.stop_cmd) self.is_started = False self._stop_logger() self.log.info('Stopped %s' % self.name()) # FIXME why are there two wait methods? def wait(self, timeout=300): """ Wait for daemon to exit Wait for daemon to stop (but don't trigger the stop). Pass up any exception. Mark the daemon as not running. """ self.log.info('Waiting for %s to exit...' % self.name()) self.remote.sh(self.stop_cmd) self.is_started = False self._stop_logger() self.log.info('Finished waiting for %s to stop' % self.name()) def wait_for_exit(self): """ clear remote run command value after waiting for exit. """ self.wait()
mit
apexdatasolutions/VistA
Utilities/Dox/PythonScripts/FileManDbCallParser.py
5
6861
#!/usr/bin/env python # A JSON file Parser class to parse VistA FileMan db calls json file and generate # the FileMan db call dependencies among packages. #--------------------------------------------------------------------------- # Copyright 2013 The Open Source Electronic Health Record Agent # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #---------------------------------------------------------------- # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 import glob import re import os import os.path import sys import subprocess import re import csv import argparse import json from datetime import datetime, date, time from CrossReference import CrossReference, Routine, Package, Global from CrossReference import PlatformDependentGenericRoutine from CrossReference import FileManField, FileManFile, FileManFieldFactory from LogManager import logger, initConsoleLogging """ need to ignore some dialog function as the input is indeed the dialog file IEN, not fileman file """ IGNORE_CALL_LIST = ("BLD^DIALOG", "EZBLD^DIALOG") class FileManDbCallParser(object): """ A python class to parse fileman db call in JSON Format and integrate with cross reference """ def __init__(self, crossRef): self._crossRef = crossRef def getCrossReference(self): return self._crossRef def parseFileManDbJSONFile(self, dbJsonFile): logger.info("Start parsing JSON file [%s]" % dbJsonFile) with open(dbJsonFile, 'r') as jsonFile: dbCallJson = json.load(jsonFile) for pkgItem in dbCallJson: """ find all the routines under that package """ routines = pkgItem['routines'] for rtn in routines: rtnName = rtn['name'] routine = self._crossRef.getRoutineByName(rtnName) if not routine: logger.warn("Can not find routine [%s]" % rtnName) continue fileManGlobals = rtn['Globals'] self._addFileManGlobals(routine, fileManGlobals) fileManFileNos = rtn['FileMan calls'] self._addFileManDBCalls(routine, fileManFileNos) def _addFileManGlobals(self, routine, fileManGlobals): for fileManGbl in fileManGlobals: fileManFile = self._crossRef.getGlobalByName(fileManGbl) if not fileManFile and fileManGbl[-1] == '(': fileManGblAlt = fileManGbl[:-1] fileManFile = self._crossRef.getGlobalByName(fileManGblAlt) if fileManFile: logger.debug("Classic: Adding fileMan:[%s] to routine:[%s]" % (fileManFile, routine.getName())) routine.addFilemanDbCallGlobal(fileManFile) else: # ignore non-fileman global, could be false positive logger.error("global [%s] is not a valid Fileman file for" " routine %s" % (fileManGbl, routine)) return def isFunctionIgnored(self, callDetail): for item in IGNORE_CALL_LIST: if callDetail.startswith(item): return True return False def _addFileManDBCalls(self, routine, callLists): for callDetail in callLists: if self.isFunctionIgnored(callDetail): logger.debug("Ignore call detail %s" % callDetail) continue fnIdx = callDetail.find('(') if fnIdx < 0: logger.error("Can not extract fileman number from %s" % callDetail) continue callTag = callDetail[:fnIdx] fileNo = callDetail[fnIdx+1:] fileManFile = self._crossRef.getGlobalByFileNo(fileNo) if fileManFile: logger.debug("FileMan: Adding fileMan:[%s] to routine:[%s]" % (fileNo, routine.getName())) routine.addFilemanDbCallGlobal(fileManFile, callTag) else: if self._crossRef.isFileManSubFileByFileNo(fileNo): # subfile subFile = self._crossRef.getFileManSubFileByFileNo(fileNo) rootFile = self._crossRef.getSubFileRootByFileNo(fileNo) assert rootFile logger.debug("FileMan: Adding subFile:[%s] to routine:[%s]" % (subFile, routine.getName())) routine.addFilemanDbCallGlobal(subFile, callTag) else: logger.error("file #%s[%s] is not a valid fileman file, for" " routine [%s]" % (fileNo, callDetail, routine)) """ main entry """ from CallerGraphParser import createCallGraphLogAugumentParser from CallerGraphParser import parseAllCallGraphLogWithArg from DataDictionaryParser import createDataDictionaryAugumentParser from DataDictionaryParser import parseDataDictionaryLogFile def createFileManDBFileAugumentParser(): parser = argparse.ArgumentParser(add_help=False) # no help page argGroup = parser.add_argument_group("FileMan DB Calls JSON file Parser Auguments") argGroup.add_argument('-db', '--filemanDbJson', required=True, help='fileman db call information in JSON format') return parser def parseFileManDBJSONFile(crossRef, fileManJsonFile): fileDbCallParser = FileManDbCallParser(crossRef) fileDbCallParser.parseFileManDbJSONFile(fileManJsonFile) return fileDbCallParser if __name__ == '__main__': callLogArgParser = createCallGraphLogAugumentParser() dataDictArgParser = createDataDictionaryAugumentParser() filemanDBJsonArgParser = createFileManDBFileAugumentParser() parser = argparse.ArgumentParser( description='VistA Cross-Reference FileMan DB Call JSON Files Parser', parents=[callLogArgParser, dataDictArgParser, filemanDBJsonArgParser]) result = parser.parse_args(); initConsoleLogging() logFileParser = parseAllCallGraphLogWithArg(result) crossRef = logFileParser.getCrossReference() DDFileParser = parseDataDictionaryLogFile(crossRef, result.fileSchemaDir) fileDbCallParser = parseFileManDBJSONFile(crossRef, result.filemanDbJson) logger.info("Total # of fileman subfiles are %s" % len(crossRef.getAllFileManSubFiles()))
apache-2.0
edx/edx-val
edxval/wsgi.py
1
1194
""" WSGI config for edxval project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "edxval.settings.base") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application # noqa: E402; pylint: disable=wrong-import-position application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
agpl-3.0
WymanLyu/WYDemo
FXAudio/FXAudio/FXAudio/webRTC/webrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_gencfgs.py
3
3488
#!/usr/bin/env python # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license # that can be found in the LICENSE file in the root of the source # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. """Generate .json files with which the APM module can be tested using the apm_quality_assessment.py script. """ import logging import os import quality_assessment.data_access as data_access OUTPUT_PATH = os.path.abspath('apm_configs') def _GenerateDefaultOverridden(config_override): """Generates one or more APM overriden configurations. For each item in config_override, it overrides the default configuration and writes a new APM configuration file. The default settings are loaded via "-all_default". Check "src/webrtc/modules/audio_processing/test/audioproc_float.cc" and search for "if (FLAGS_all_default) {". For instance, in 55eb6d621489730084927868fed195d3645a9ec9 the default is this: settings.use_aec = rtc::Optional<bool>(true); settings.use_aecm = rtc::Optional<bool>(false); settings.use_agc = rtc::Optional<bool>(true); settings.use_bf = rtc::Optional<bool>(false); settings.use_ed = rtc::Optional<bool>(false); settings.use_hpf = rtc::Optional<bool>(true); settings.use_ie = rtc::Optional<bool>(false); settings.use_le = rtc::Optional<bool>(true); settings.use_ns = rtc::Optional<bool>(true); settings.use_ts = rtc::Optional<bool>(true); settings.use_vad = rtc::Optional<bool>(true); Args: config_override: dict of APM configuration file names as keys; the values are dict instances encoding the audioproc_f flags. """ for config_filename in config_override: config = config_override[config_filename] config['-all_default'] = None config_filepath = os.path.join(OUTPUT_PATH, 'default-{}.json'.format( config_filename)) logging.debug('config file <%s> | %s', config_filepath, config) data_access.AudioProcConfigFile.Save(config_filepath, config) logging.info('config file created: <%s>', config_filepath) def _GenerateAllDefaultButOne(): """Disables the flags enabled by default one-by-one. """ config_sets = { 'no_AEC': {'-aec': 0,}, 'no_AGC': {'-agc': 0,}, 'no_HP_filter': {'-hpf': 0,}, 'no_level_estimator': {'-le': 0,}, 'no_noise_suppressor': {'-ns': 0,}, 'no_transient_suppressor': {'-ts': 0,}, 'no_vad': {'-vad': 0,}, } _GenerateDefaultOverridden(config_sets) def _GenerateAllDefaultPlusOne(): """Enables the flags disabled by default one-by-one. """ config_sets = { 'with_AECM': {'-aec': 0, '-aecm': 1,}, # AEC and AECM are exclusive. 'with_AGC_limiter': {'-agc_limiter': 1,}, 'with_AEC_delay_agnostic': {'-delay_agnostic': 1,}, 'with_drift_compensation': {'-drift_compensation': 1,}, 'with_residual_echo_detector': {'-ed': 1,}, 'with_AEC_extended_filter': {'-extended_filter': 1,}, 'with_intelligibility_enhancer': {'-ie': 1,}, 'with_LC': {'-lc': 1,}, 'with_refined_adaptive_filter': {'-refined_adaptive_filter': 1,}, } _GenerateDefaultOverridden(config_sets) def main(): logging.basicConfig(level=logging.INFO) _GenerateAllDefaultPlusOne() _GenerateAllDefaultButOne() if __name__ == '__main__': main()
apache-2.0
Azure/azure-sdk-for-python
sdk/network/azure-mgmt-dns/azure/mgmt/dns/aio/_dns_management_client.py
1
7940
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from typing import Any, Optional, TYPE_CHECKING from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core import AsyncARMPipelineClient from azure.profiles import KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient import MultiApiClientMixin from msrest import Deserializer, Serializer from ._configuration import DnsManagementClientConfiguration if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials_async import AsyncTokenCredential class _SDKClient(object): def __init__(self, *args, **kwargs): """This is a fake class to support current implemetation of MultiApiClientMixin." Will be removed in final version of multiapi azure-core based client """ pass class DnsManagementClient(MultiApiClientMixin, _SDKClient): """The DNS Management Client. This ready contains multiple API versions, to help you deal with all of the Azure clouds (Azure Stack, Azure Government, Azure China, etc.). By default, it uses the latest API version available on public Azure. For production, you should stick to a particular api-version and/or profile. The profile sets a mapping between an operation group and its API version. The api-version parameter sets the default API version if the operation group is not described in the profile. :param credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: Specifies the Azure subscription ID, which uniquely identifies the Microsoft Azure subscription. :type subscription_id: str :param api_version: API version to use if no profile is provided, or if missing in profile. :type api_version: str :param base_url: Service URL :type base_url: str :param profile: A profile definition, from KnownProfiles to dict. :type profile: azure.profiles.KnownProfiles :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. """ DEFAULT_API_VERSION = '2018-05-01' _PROFILE_TAG = "azure.mgmt.dns.DnsManagementClient" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION, }}, _PROFILE_TAG + " latest" ) def __init__( self, credential: "AsyncTokenCredential", subscription_id: str, api_version: Optional[str] = None, base_url: Optional[str] = None, profile: KnownProfiles = KnownProfiles.default, **kwargs # type: Any ) -> None: if not base_url: base_url = 'https://management.azure.com' self._config = DnsManagementClientConfiguration(credential, subscription_id, **kwargs) self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(DnsManagementClient, self).__init__( api_version=api_version, profile=profile ) @classmethod def _models_dict(cls, api_version): return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} @classmethod def models(cls, api_version=DEFAULT_API_VERSION): """Module depends on the API version: * 2016-04-01: :mod:`v2016_04_01.models<azure.mgmt.dns.v2016_04_01.models>` * 2018-03-01-preview: :mod:`v2018_03_01_preview.models<azure.mgmt.dns.v2018_03_01_preview.models>` * 2018-05-01: :mod:`v2018_05_01.models<azure.mgmt.dns.v2018_05_01.models>` """ if api_version == '2016-04-01': from ..v2016_04_01 import models return models elif api_version == '2018-03-01-preview': from ..v2018_03_01_preview import models return models elif api_version == '2018-05-01': from ..v2018_05_01 import models return models raise ValueError("API version {} is not available".format(api_version)) @property def dns_resource_reference(self): """Instance depends on the API version: * 2018-05-01: :class:`DnsResourceReferenceOperations<azure.mgmt.dns.v2018_05_01.aio.operations.DnsResourceReferenceOperations>` """ api_version = self._get_api_version('dns_resource_reference') if api_version == '2018-05-01': from ..v2018_05_01.aio.operations import DnsResourceReferenceOperations as OperationClass else: raise ValueError("API version {} does not have operation group 'dns_resource_reference'".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def record_sets(self): """Instance depends on the API version: * 2016-04-01: :class:`RecordSetsOperations<azure.mgmt.dns.v2016_04_01.aio.operations.RecordSetsOperations>` * 2018-03-01-preview: :class:`RecordSetsOperations<azure.mgmt.dns.v2018_03_01_preview.aio.operations.RecordSetsOperations>` * 2018-05-01: :class:`RecordSetsOperations<azure.mgmt.dns.v2018_05_01.aio.operations.RecordSetsOperations>` """ api_version = self._get_api_version('record_sets') if api_version == '2016-04-01': from ..v2016_04_01.aio.operations import RecordSetsOperations as OperationClass elif api_version == '2018-03-01-preview': from ..v2018_03_01_preview.aio.operations import RecordSetsOperations as OperationClass elif api_version == '2018-05-01': from ..v2018_05_01.aio.operations import RecordSetsOperations as OperationClass else: raise ValueError("API version {} does not have operation group 'record_sets'".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def zones(self): """Instance depends on the API version: * 2016-04-01: :class:`ZonesOperations<azure.mgmt.dns.v2016_04_01.aio.operations.ZonesOperations>` * 2018-03-01-preview: :class:`ZonesOperations<azure.mgmt.dns.v2018_03_01_preview.aio.operations.ZonesOperations>` * 2018-05-01: :class:`ZonesOperations<azure.mgmt.dns.v2018_05_01.aio.operations.ZonesOperations>` """ api_version = self._get_api_version('zones') if api_version == '2016-04-01': from ..v2016_04_01.aio.operations import ZonesOperations as OperationClass elif api_version == '2018-03-01-preview': from ..v2018_03_01_preview.aio.operations import ZonesOperations as OperationClass elif api_version == '2018-05-01': from ..v2018_05_01.aio.operations import ZonesOperations as OperationClass else: raise ValueError("API version {} does not have operation group 'zones'".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) async def close(self): await self._client.close() async def __aenter__(self): await self._client.__aenter__() return self async def __aexit__(self, *exc_details): await self._client.__aexit__(*exc_details)
mit
stone5495/NewsBlur
vendor/feedvalidator/rss.py
16
1563
"""$Id: rss.py 699 2006-09-25 02:01:18Z rubys $""" __author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>" __version__ = "$Revision: 699 $" __date__ = "$Date: 2006-09-25 02:01:18 +0000 (Mon, 25 Sep 2006) $" __copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim" from base import validatorBase from logging import * from validators import noduplicates # # Rss element. The only valid child element is "channel" # class rss(validatorBase): def do_channel(self): from channel import rss20Channel return rss20Channel(), noduplicates() def do_access_restriction(self): from extension import access_restriction return access_restriction(), noduplicates() def getExpectedAttrNames(self): return [(None, u'version')] def prevalidate(self): self.setFeedType(TYPE_RSS2) # could be anything in the 0.9x family, don't really care self.version = "2.0" if (None,'version') not in self.attrs.getNames(): self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"version"})) elif [e for e in self.dispatcher.loggedEvents if e.__class__==ValidDoctype]: self.version = self.attrs[(None,'version')] if self.attrs[(None,'version')]<>'0.91': self.log(InvalidDoctype({"parent":self.parent.name, "element":self.name, "attr":"version"})) else: self.version = self.attrs[(None,'version')] def validate(self): if not "channel" in self.children: self.log(MissingElement({"parent":self.name, "element":"channel"}))
mit
conejoninja/xbmc-seriesly
core/scrapertools.py
1
52047
#------------------------------------------------------------ # -*- coding: utf-8 -*- #------------------------------------------------------------ # Download Tools # Based on the code from VideoMonkey XBMC Plugin #------------------------------------------------------------ # seriesly # http://blog.tvalacarta.info/plugin-xbmc/seriesly/ #------------------------------------------------------------ # Creado por: # Jesús (tvalacarta@gmail.com) # jurrabi (jurrabi@gmail.com) # bandavi (xbandavix@gmail.com) # Licencia: GPL (http://www.gnu.org/licenses/gpl-3.0.html) #------------------------------------------------------------ # Historial de cambios: #------------------------------------------------------------ import urlparse,urllib2,urllib import time import os import config import logger import re import downloadtools import socket logger.info("[scrapertools.py] init") # True - Muestra las cabeceras HTTP en el log # False - No las muestra DEBUG_LEVEL = False CACHE_ACTIVA = "0" # Automatica CACHE_SIEMPRE = "1" # Cachear todo CACHE_NUNCA = "2" # No cachear nada CACHE_PATH = config.get_setting("cache.dir") logger.info("[scrapertools.py] CACHE_PATH="+CACHE_PATH) DEBUG = False def cache_page(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']],modo_cache=CACHE_ACTIVA, timeout=socket.getdefaulttimeout()): return cachePage(url,post,headers,modo_cache,timeout=timeout) # TODO: (3.1) Quitar el parámetro modoCache (ahora se hace por configuración) # TODO: (3.2) Usar notación minusculas_con_underscores para funciones y variables como recomienda Python http://www.python.org/dev/peps/pep-0008/ def cachePage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']],modoCache=CACHE_ACTIVA, timeout=socket.getdefaulttimeout()): logger.info("[scrapertools.py] cachePage url="+url) modoCache = config.get_setting("cache.mode") ''' if config.get_platform()=="plex": from PMS import HTTP try: logger.info("url="+url) data = HTTP.Request(url) logger.info("descargada") except: data = "" logger.error("Error descargando "+url) import sys for line in sys.exc_info(): logger.error( "%s" % line ) return data ''' # CACHE_NUNCA: Siempre va a la URL a descargar # obligatorio para peticiones POST if modoCache == CACHE_NUNCA or post is not None: logger.info("[scrapertools.py] MODO_CACHE=2 (no cachear)") try: data = downloadpage(url,post,headers, timeout=timeout) except: data="" # CACHE_SIEMPRE: Siempre descarga de cache, sin comprobar fechas, excepto cuando no está elif modoCache == CACHE_SIEMPRE: logger.info("[scrapertools.py] MODO_CACHE=1 (cachear todo)") # Obtiene los handlers del fichero en la cache cachedFile, newFile = getCacheFileNames(url) # Si no hay ninguno, descarga if cachedFile == "": logger.debug("[scrapertools.py] No está en cache") # Lo descarga data = downloadpage(url,post,headers) # Lo graba en cache outfile = open(newFile,"w") outfile.write(data) outfile.flush() outfile.close() logger.info("[scrapertools.py] Grabado a " + newFile) else: logger.info("[scrapertools.py] Leyendo de cache " + cachedFile) infile = open( cachedFile ) data = infile.read() infile.close() # CACHE_ACTIVA: Descarga de la cache si no ha cambiado else: logger.info("[scrapertools.py] MODO_CACHE=0 (automática)") # Datos descargados data = "" # Obtiene los handlers del fichero en la cache cachedFile, newFile = getCacheFileNames(url) # Si no hay ninguno, descarga if cachedFile == "": logger.debug("[scrapertools.py] No está en cache") # Lo descarga data = downloadpage(url,post,headers) # Lo graba en cache outfile = open(newFile,"w") outfile.write(data) outfile.flush() outfile.close() logger.info("[scrapertools.py] Grabado a " + newFile) # Si sólo hay uno comprueba el timestamp (hace una petición if-modified-since) else: # Extrae el timestamp antiguo del nombre del fichero oldtimestamp = time.mktime( time.strptime(cachedFile[-20:-6], "%Y%m%d%H%M%S") ) logger.info("[scrapertools.py] oldtimestamp="+cachedFile[-20:-6]) logger.info("[scrapertools.py] oldtimestamp="+time.ctime(oldtimestamp)) # Hace la petición updated,data = downloadtools.downloadIfNotModifiedSince(url,oldtimestamp) # Si ha cambiado if updated: # Borra el viejo logger.debug("[scrapertools.py] Borrando "+cachedFile) os.remove(cachedFile) # Graba en cache el nuevo outfile = open(newFile,"w") outfile.write(data) outfile.flush() outfile.close() logger.info("[scrapertools.py] Grabado a " + newFile) # Devuelve el contenido del fichero de la cache else: logger.info("[scrapertools.py] Leyendo de cache " + cachedFile) infile = open( cachedFile ) data = infile.read() infile.close() return data def getCacheFileNames(url): # Obtiene el directorio de la cache para esta url siteCachePath = getSiteCachePath(url) # Obtiene el ID de la cache (md5 de la URL) cacheId = get_md5(url) logger.debug("[scrapertools.py] cacheId="+cacheId) # Timestamp actual nowtimestamp = time.strftime("%Y%m%d%H%M%S", time.localtime()) logger.debug("[scrapertools.py] nowtimestamp="+nowtimestamp) # Nombre del fichero # La cache se almacena en una estructura CACHE + URL ruta = os.path.join( siteCachePath , cacheId[:2] , cacheId[2:] ) newFile = os.path.join( ruta , nowtimestamp + ".cache" ) logger.debug("[scrapertools.py] newFile="+newFile) if not os.path.exists(ruta): os.makedirs( ruta ) # Busca ese fichero en la cache cachedFile = getCachedFile(siteCachePath,cacheId) return cachedFile, newFile # Busca ese fichero en la cache def getCachedFile(siteCachePath,cacheId): mascara = os.path.join(siteCachePath,cacheId[:2],cacheId[2:],"*.cache") logger.debug("[scrapertools.py] mascara="+mascara) import glob ficheros = glob.glob( mascara ) logger.debug("[scrapertools.py] Hay %d ficheros con ese id" % len(ficheros)) cachedFile = "" # Si hay más de uno, los borra (serán pruebas de programación) y descarga de nuevo if len(ficheros)>1: logger.debug("[scrapertools.py] Cache inválida") for fichero in ficheros: logger.debug("[scrapertools.py] Borrando "+fichero) os.remove(fichero) cachedFile = "" # Hay uno: fichero cacheado elif len(ficheros)==1: cachedFile = ficheros[0] return cachedFile def getSiteCachePath(url): # Obtiene el dominio principal de la URL dominio = urlparse.urlparse(url)[1] logger.debug("[scrapertools.py] dominio="+dominio) nombres = dominio.split(".") if len(nombres)>1: dominio = nombres[len(nombres)-2]+"."+nombres[len(nombres)-1] else: dominio = nombres[0] logger.debug("[scrapertools.py] dominio="+dominio) # Crea un directorio en la cache para direcciones de ese dominio siteCachePath = os.path.join( CACHE_PATH , dominio ) if not os.path.exists(CACHE_PATH): try: os.mkdir( CACHE_PATH ) except: logger.error("[scrapertools.py] Error al crear directorio "+CACHE_PATH) if not os.path.exists(siteCachePath): try: os.mkdir( siteCachePath ) except: logger.error("[scrapertools.py] Error al crear directorio "+siteCachePath) logger.debug("[scrapertools.py] siteCachePath="+siteCachePath) return siteCachePath def cachePage2(url,headers): logger.info("Descargando " + url) inicio = time.clock() req = urllib2.Request(url) for header in headers: logger.info(header[0]+":"+header[1]) req.add_header(header[0], header[1]) try: response = urllib2.urlopen(req) except: req = urllib2.Request(url.replace(" ","%20")) for header in headers: logger.info(header[0]+":"+header[1]) req.add_header(header[0], header[1]) response = urllib2.urlopen(req) data=response.read() response.close() fin = time.clock() logger.info("Descargado en %d segundos " % (fin-inicio+1)) ''' outfile = open(localFileName,"w") outfile.write(data) outfile.flush() outfile.close() logger.info("Grabado a " + localFileName) ''' return data def cachePagePost(url,post): logger.info("Descargando " + url) inicio = time.clock() req = urllib2.Request(url,post) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3') try: response = urllib2.urlopen(req) except: req = urllib2.Request(url.replace(" ","%20"),post) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3') response = urllib2.urlopen(req) data=response.read() response.close() fin = time.clock() logger.info("Descargado en %d segundos " % (fin-inicio+1)) ''' outfile = open(localFileName,"w") outfile.write(data) outfile.flush() outfile.close() logger.info("Grabado a " + localFileName) ''' return data class NoRedirectHandler(urllib2.HTTPRedirectHandler): def http_error_302(self, req, fp, code, msg, headers): infourl = urllib.addinfourl(fp, headers, req.get_full_url()) infourl.status = code infourl.code = code return infourl http_error_300 = http_error_302 http_error_301 = http_error_302 http_error_303 = http_error_302 http_error_307 = http_error_302 def downloadpage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']],follow_redirects=True, timeout=socket.getdefaulttimeout()): logger.info("[scrapertools.py] downloadpage") logger.info("[scrapertools.py] url="+url) if post is not None: logger.info("[scrapertools.py] post="+post) else: logger.info("[scrapertools.py] post=None") # --------------------------------- # Instala las cookies # --------------------------------- # Inicializa la librería de las cookies ficherocookies = os.path.join( config.get_setting("cookies.dir"), 'cookies.dat' ) logger.info("[scrapertools.py] ficherocookies="+ficherocookies) cj = None ClientCookie = None cookielib = None # Let's see if cookielib is available try: logger.info("[scrapertools.py] Importando cookielib") import cookielib except ImportError: logger.info("[scrapertools.py] cookielib no disponible") # If importing cookielib fails # let's try ClientCookie try: logger.info("[scrapertools.py] Importando ClientCookie") import ClientCookie except ImportError: logger.info("[scrapertools.py] ClientCookie no disponible") # ClientCookie isn't available either urlopen = urllib2.urlopen Request = urllib2.Request else: logger.info("[scrapertools.py] ClientCookie disponible") # imported ClientCookie urlopen = ClientCookie.urlopen Request = ClientCookie.Request cj = ClientCookie.MozillaCookieJar() else: logger.info("[scrapertools.py] cookielib disponible") # importing cookielib worked urlopen = urllib2.urlopen Request = urllib2.Request logger.info("[scrapertools.py] cambio en politicas") #cj = cookielib.LWPCookieJar(ficherocookies,policy=MyCookiePolicy()) #cj = cookielib.MozillaCookieJar(ficherocookies,policy=MyCookiePolicy) #cj = cookielib.FileCookieJar(ficherocookies) try: cj = cookielib.MozillaCookieJar() cj.set_policy(MyCookiePolicy()) except: import traceback logger.info(traceback.format_exc()) if cj is not None: # we successfully imported # one of the two cookie handling modules logger.info("[scrapertools.py] Hay cookies") if os.path.isfile(ficherocookies): logger.info("[scrapertools.py] Leyendo fichero cookies") # if we have a cookie file already saved # then load the cookies into the Cookie Jar try: cj.load(ficherocookies,ignore_discard=True) except: logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") os.remove(ficherocookies) # Now we need to get our Cookie Jar # installed in the opener; # for fetching URLs if cookielib is not None: logger.info("[scrapertools.py] opener usando urllib2 (cookielib)") # if we use cookielib # then we get the HTTPCookieProcessor # and install the opener in urllib2 if not follow_redirects: opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=DEBUG_LEVEL),urllib2.HTTPCookieProcessor(cj),NoRedirectHandler()) else: opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=DEBUG_LEVEL),urllib2.HTTPCookieProcessor(cj)) urllib2.install_opener(opener) else: logger.info("[scrapertools.py] opener usando ClientCookie") # if we use ClientCookie # then we get the HTTPCookieProcessor # and install the opener in ClientCookie opener = ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cj)) ClientCookie.install_opener(opener) # ------------------------------------------------- # Cookies instaladas, lanza la petición # ------------------------------------------------- # Contador inicio = time.clock() # Diccionario para las cabeceras txheaders = {} # Construye el request if post is None: logger.info("[scrapertools.py] petición GET") else: logger.info("[scrapertools.py] petición POST") # Añade las cabeceras logger.info("[scrapertools.py] ---------------------------") for header in headers: logger.info("[scrapertools.py] header %s=%s" % (str(header[0]),str(header[1])) ) txheaders[header[0]]=header[1] logger.info("[scrapertools.py] ---------------------------") req = Request(url, post, txheaders) try: if timeout is None: logger.info("[scrapertools.py] Peticion sin timeout") handle=urlopen(req) else: logger.info("[scrapertools.py] Peticion con timeout") #Para todas las versiones: deftimeout = socket.getdefaulttimeout() socket.setdefaulttimeout(timeout) handle=urlopen(req) socket.setdefaulttimeout(deftimeout) logger.info("[scrapertools.py] ...hecha") # Actualiza el almacén de cookies logger.info("[scrapertools.py] Grabando cookies...") cj.save(ficherocookies,ignore_discard=True) # ,ignore_expires=True logger.info("[scrapertools.py] ...hecho") # Lee los datos y cierra if handle.info().get('Content-Encoding') == 'gzip': logger.info("[scrapertools.py] gzipped") fin = inicio import StringIO data=handle.read() compressedstream = StringIO.StringIO(data) import gzip gzipper = gzip.GzipFile(fileobj=compressedstream) data = gzipper.read() gzipper.close() fin = time.clock() else: logger.info("[scrapertools.py] normal") data = handle.read() except urllib2.HTTPError,e: import traceback logger.info(traceback.format_exc()) data = e.read() #logger.info("data="+repr(data)) return data info = handle.info() logger.info("[scrapertools.py] Respuesta") logger.info("[scrapertools.py] ---------------------------") for header in info: logger.info("[scrapertools.py] "+header+"="+info[header]) handle.close() logger.info("[scrapertools.py] ---------------------------") ''' # Lanza la petición try: response = urllib2.urlopen(req) # Si falla la repite sustituyendo caracteres especiales except: req = urllib2.Request(url.replace(" ","%20")) # Añade las cabeceras for header in headers: req.add_header(header[0],header[1]) response = urllib2.urlopen(req) ''' # Tiempo transcurrido fin = time.clock() logger.info("[scrapertools.py] Descargado en %d segundos " % (fin-inicio+1)) return data import cookielib class MyCookiePolicy(cookielib.DefaultCookiePolicy): def set_ok(self, cookie, request): #logger.info("set_ok Cookie "+repr(cookie)+" request "+repr(request)) #cookie.discard = False #cookie. devuelve = cookielib.DefaultCookiePolicy.set_ok(self, cookie, request) #logger.info("set_ok "+repr(devuelve)) return devuelve def return_ok(self, cookie, request): #logger.info("return_ok Cookie "+repr(cookie)+" request "+repr(request)) #cookie.discard = False devuelve = cookielib.DefaultCookiePolicy.return_ok(self, cookie, request) #logger.info("return_ok "+repr(devuelve)) return devuelve def domain_return_ok(self, domain, request): #logger.info("domain_return_ok domain "+repr(domain)+" request "+repr(request)) devuelve = cookielib.DefaultCookiePolicy.domain_return_ok(self, domain, request) #logger.info("domain_return_ok "+repr(devuelve)) return devuelve def path_return_ok(self,path, request): #logger.info("path_return_ok path "+repr(path)+" request "+repr(request)) devuelve = cookielib.DefaultCookiePolicy.path_return_ok(self, path, request) #logger.info("path_return_ok "+repr(devuelve)) return devuelve def downloadpagewithcookies(url): # --------------------------------- # Instala las cookies # --------------------------------- # Inicializa la librería de las cookies ficherocookies = os.path.join( config.get_data_path(), 'cookies.dat' ) logger.info("[scrapertools.py] Cookiefile="+ficherocookies) cj = None ClientCookie = None cookielib = None # Let's see if cookielib is available try: import cookielib except ImportError: # If importing cookielib fails # let's try ClientCookie try: import ClientCookie except ImportError: # ClientCookie isn't available either urlopen = urllib2.urlopen Request = urllib2.Request else: # imported ClientCookie urlopen = ClientCookie.urlopen Request = ClientCookie.Request cj = ClientCookie.MozillaCookieJar() else: # importing cookielib worked urlopen = urllib2.urlopen Request = urllib2.Request cj = cookielib.MozillaCookieJar() # This is a subclass of FileCookieJar # that has useful load and save methods if cj is not None: # we successfully imported # one of the two cookie handling modules if os.path.isfile(ficherocookies): # if we have a cookie file already saved # then load the cookies into the Cookie Jar try: cj.load(ficherocookies) except: logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") os.remove(ficherocookies) # Now we need to get our Cookie Jar # installed in the opener; # for fetching URLs if cookielib is not None: # if we use cookielib # then we get the HTTPCookieProcessor # and install the opener in urllib2 opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) urllib2.install_opener(opener) else: # if we use ClientCookie # then we get the HTTPCookieProcessor # and install the opener in ClientCookie opener = ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cj)) ClientCookie.install_opener(opener) #print "-------------------------------------------------------" theurl = url # an example url that sets a cookie, # try different urls here and see the cookie collection you can make ! #txheaders = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3', # 'Referer':'http://www.megavideo.com/?s=signup'} txheaders = { 'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3', 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Host':'www.meristation.com', 'Accept-Language':'es-es,es;q=0.8,en-us;q=0.5,en;q=0.3', 'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Keep-Alive':'300', 'Connection':'keep-alive'} # fake a user agent, some websites (like google) don't like automated exploration req = Request(theurl, None, txheaders) handle = urlopen(req) cj.save(ficherocookies) # save the cookies again data=handle.read() handle.close() return data def downloadpageWithoutCookies(url): logger.info("[scrapertools.py] Descargando " + url) inicio = time.clock() req = urllib2.Request(url) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; es-ES; rv:1.9.0.14) Gecko/2009082707 Firefox/3.0.14') req.add_header('X-Requested-With','XMLHttpRequest') try: response = urllib2.urlopen(req) except: req = urllib2.Request(url.replace(" ","%20")) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; es-ES; rv:1.9.0.14) Gecko/2009082707 Firefox/3.0.14') response = urllib2.urlopen(req) data=response.read() response.close() fin = time.clock() logger.info("[scrapertools.py] Descargado en %d segundos " % (fin-inicio+1)) return data def downloadpageGzip(url): # Inicializa la librería de las cookies ficherocookies = os.path.join( config.get_data_path(), 'cookies.dat' ) logger.info("Cookiefile="+ficherocookies) inicio = time.clock() cj = None ClientCookie = None cookielib = None # Let's see if cookielib is available try: import cookielib except ImportError: # If importing cookielib fails # let's try ClientCookie try: import ClientCookie except ImportError: # ClientCookie isn't available either urlopen = urllib2.urlopen Request = urllib2.Request else: # imported ClientCookie urlopen = ClientCookie.urlopen Request = ClientCookie.Request cj = ClientCookie.MozillaCookieJar() else: # importing cookielib worked urlopen = urllib2.urlopen Request = urllib2.Request cj = cookielib.MozillaCookieJar() # This is a subclass of FileCookieJar # that has useful load and save methods # --------------------------------- # Instala las cookies # --------------------------------- if cj is not None: # we successfully imported # one of the two cookie handling modules if os.path.isfile(ficherocookies): # if we have a cookie file already saved # then load the cookies into the Cookie Jar try: cj.load(ficherocookies) except: logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") os.remove(ficherocookies) # Now we need to get our Cookie Jar # installed in the opener; # for fetching URLs if cookielib is not None: # if we use cookielib # then we get the HTTPCookieProcessor # and install the opener in urllib2 opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) urllib2.install_opener(opener) else: # if we use ClientCookie # then we get the HTTPCookieProcessor # and install the opener in ClientCookie opener = ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cj)) ClientCookie.install_opener(opener) #print "-------------------------------------------------------" theurl = url # an example url that sets a cookie, # try different urls here and see the cookie collection you can make ! #txheaders = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3', # 'Referer':'http://www.megavideo.com/?s=signup'} import httplib parsedurl = urlparse.urlparse(url) logger.info("parsedurl="+str(parsedurl)) txheaders = { 'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3', 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language':'es-es,es;q=0.8,en-us;q=0.5,en;q=0.3', 'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept-Encoding':'gzip,deflate', 'Keep-Alive':'300', 'Connection':'keep-alive', 'Referer':parsedurl[0]+"://"+parsedurl[1]} logger.info(str(txheaders)) # fake a user agent, some websites (like google) don't like automated exploration req = Request(theurl, None, txheaders) handle = urlopen(req) cj.save(ficherocookies) # save the cookies again data=handle.read() handle.close() fin = time.clock() logger.info("[scrapertools.py] Descargado 'Gzipped data' en %d segundos " % (fin-inicio+1)) # Descomprime el archivo de datos Gzip try: fin = inicio import StringIO compressedstream = StringIO.StringIO(data) import gzip gzipper = gzip.GzipFile(fileobj=compressedstream) data1 = gzipper.read() gzipper.close() fin = time.clock() logger.info("[scrapertools.py] 'Gzipped data' descomprimido en %d segundos " % (fin-inicio+1)) return data1 except: return data def printMatches(matches): i = 0 for match in matches: logger.info("[scrapertools.py] %d %s" % (i , match)) i = i + 1 def get_match(data,patron,index=0): matches = re.findall( patron , data , flags=re.DOTALL ) return matches[index] def find_single_match(data,patron,index=0): try: matches = re.findall( patron , data , flags=re.DOTALL ) return matches[index] except: return "" def entityunescape(cadena): return unescape(cadena) def unescape(text): """Removes HTML or XML character references and entities from a text string. keep &amp;, &gt;, &lt; in the source code. from Fredrik Lundh http://effbot.org/zone/re-sub.htm#unescape-html """ def fixup(m): text = m.group(0) if text[:2] == "&#": # character reference try: if text[:3] == "&#x": return unichr(int(text[3:-1], 16)).encode("utf-8") else: return unichr(int(text[2:-1])).encode("utf-8") except ValueError: logger.info("error de valor") pass else: # named entity try: ''' if text[1:-1] == "amp": text = "&amp;amp;" elif text[1:-1] == "gt": text = "&amp;gt;" elif text[1:-1] == "lt": text = "&amp;lt;" else: print text[1:-1] text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8") ''' import htmlentitydefs text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8") except KeyError: logger.info("keyerror") pass except: pass return text # leave as is return re.sub("&#?\w+;", fixup, text) # Convierte los codigos html "&ntilde;" y lo reemplaza por "ñ" caracter unicode utf-8 def decodeHtmlentities(string): string = entitiesfix(string) entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8});") def substitute_entity(match): from htmlentitydefs import name2codepoint as n2cp ent = match.group(2) if match.group(1) == "#": return unichr(int(ent)).encode('utf-8') else: cp = n2cp.get(ent) if cp: return unichr(cp).encode('utf-8') else: return match.group() return entity_re.subn(substitute_entity, string)[0] def entitiesfix(string): # Las entidades comienzan siempre con el símbolo & , y terminan con un punto y coma ( ; ). string = string.replace("&aacute","&aacute;") string = string.replace("&eacute","&eacute;") string = string.replace("&iacute","&iacute;") string = string.replace("&oacute","&oacute;") string = string.replace("&uacute","&uacute;") string = string.replace("&Aacute","&Aacute;") string = string.replace("&Eacute","&Eacute;") string = string.replace("&Iacute","&Iacute;") string = string.replace("&Oacute","&Oacute;") string = string.replace("&Uacute","&Uacute;") string = string.replace("&uuml" ,"&uuml;") string = string.replace("&Uuml" ,"&Uuml;") string = string.replace("&ntilde","&ntilde;") string = string.replace("&#191" ,"&#191;") string = string.replace("&#161" ,"&#161;") string = string.replace(";;" ,";") return string def htmlclean(cadena): cadena = cadena.replace("<center>","") cadena = cadena.replace("</center>","") cadena = cadena.replace("<cite>","") cadena = cadena.replace("</cite>","") cadena = cadena.replace("<em>","") cadena = cadena.replace("</em>","") cadena = cadena.replace("<b>","") cadena = cadena.replace("</b>","") cadena = cadena.replace("<u>","") cadena = cadena.replace("</u>","") cadena = cadena.replace("<li>","") cadena = cadena.replace("</li>","") cadena = cadena.replace("<tbody>","") cadena = cadena.replace("</tbody>","") cadena = cadena.replace("<tr>","") cadena = cadena.replace("</tr>","") cadena = cadena.replace("<![CDATA[","") cadena = cadena.replace("<Br />","") cadena = cadena.replace("<BR />","") cadena = cadena.replace("<Br>","") cadena = re.compile("<script.*?</script>",re.DOTALL).sub("",cadena) cadena = re.compile("<option[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</option>","") cadena = re.compile("<i[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</iframe>","") cadena = cadena.replace("</i>","") cadena = re.compile("<table[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</table>","") cadena = re.compile("<td[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</td>","") cadena = re.compile("<div[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</div>","") cadena = re.compile("<dd[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</dd>","") cadena = re.compile("<font[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</font>","") cadena = re.compile("<strong[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</strong>","") cadena = re.compile("<small[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</small>","") cadena = re.compile("<span[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</span>","") cadena = re.compile("<a[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</a>","") cadena = re.compile("<p[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</p>","") cadena = re.compile("<ul[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</ul>","") cadena = re.compile("<h1[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</h1>","") cadena = re.compile("<h2[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</h2>","") cadena = re.compile("<h3[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</h3>","") cadena = re.compile("<h4[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</h4>","") cadena = re.compile("<!--[^-]+-->",re.DOTALL).sub("",cadena) cadena = re.compile("<img[^>]*>",re.DOTALL).sub("",cadena) cadena = re.compile("<br[^>]*>",re.DOTALL).sub("",cadena) cadena = re.compile("<object[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</object>","") cadena = re.compile("<param[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</param>","") cadena = re.compile("<embed[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</embed>","") cadena = re.compile("<title[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</title>","") cadena = re.compile("<link[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("\t","") cadena = entityunescape(cadena) return cadena def slugify(title): #print title # Sustituye acentos y eñes title = title.replace("Á","a") title = title.replace("É","e") title = title.replace("Í","i") title = title.replace("Ó","o") title = title.replace("Ú","u") title = title.replace("á","a") title = title.replace("é","e") title = title.replace("í","i") title = title.replace("ó","o") title = title.replace("ú","u") title = title.replace("À","a") title = title.replace("È","e") title = title.replace("Ì","i") title = title.replace("Ò","o") title = title.replace("Ù","u") title = title.replace("à","a") title = title.replace("è","e") title = title.replace("ì","i") title = title.replace("ò","o") title = title.replace("ù","u") title = title.replace("ç","c") title = title.replace("Ç","C") title = title.replace("Ñ","n") title = title.replace("ñ","n") title = title.replace("/","-") title = title.replace("&amp;","&") # Pasa a minúsculas title = title.lower().strip() # Elimina caracteres no válidos validchars = "abcdefghijklmnopqrstuvwxyz1234567890- " title = ''.join(c for c in title if c in validchars) # Sustituye espacios en blanco duplicados y saltos de línea title = re.compile("\s+",re.DOTALL).sub(" ",title) # Sustituye espacios en blanco por guiones title = re.compile("\s",re.DOTALL).sub("-",title.strip()) # Sustituye espacios en blanco duplicados y saltos de línea title = re.compile("\-+",re.DOTALL).sub("-",title) # Arregla casos especiales if title.startswith("-"): title = title [1:] if title=="": title = "-"+str(time.time()) return title def remove_show_from_title(title,show): #print slugify(title)+" == "+slugify(show) # Quita el nombre del programa del título if slugify(title).startswith(slugify(show)): # Convierte a unicode primero, o el encoding se pierde title = unicode(title,"utf-8","replace") show = unicode(show,"utf-8","replace") title = title[ len(show) : ].strip() if title.startswith("-"): title = title[ 1: ].strip() if title=="": title = str( time.time() ) # Vuelve a utf-8 title = title.encode("utf-8","ignore") show = show.encode("utf-8","ignore") return title def getRandom(str): return get_md5(str) def getLocationHeaderFromResponse(url): return get_header_from_response(url,header_to_get="location") def get_header_from_response(url,header_to_get="",post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']]): header_to_get = header_to_get.lower() logger.info("[scrapertools.py] get_header_from_response url="+url+", header_to_get="+header_to_get) if post is not None: logger.info("[scrapertools.py] post="+post) else: logger.info("[scrapertools.py] post=None") # Inicializa la librería de las cookies ficherocookies = os.path.join( config.get_setting("cookies.dir"), 'cookies.dat' ) logger.info("[scrapertools.py] ficherocookies="+ficherocookies) cj = None ClientCookie = None cookielib = None import cookielib # importing cookielib worked urlopen = urllib2.urlopen Request = urllib2.Request cj = cookielib.MozillaCookieJar() # This is a subclass of FileCookieJar # that has useful load and save methods if os.path.isfile(ficherocookies): logger.info("[scrapertools.py] Leyendo fichero cookies") # if we have a cookie file already saved # then load the cookies into the Cookie Jar try: cj.load(ficherocookies) except: logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") os.remove(ficherocookies) if header_to_get=="location": opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=DEBUG_LEVEL),urllib2.HTTPCookieProcessor(cj),NoRedirectHandler()) else: opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=DEBUG_LEVEL),urllib2.HTTPCookieProcessor(cj)) urllib2.install_opener(opener) # Contador inicio = time.clock() # Diccionario para las cabeceras txheaders = {} # Traza la peticion if post is None: logger.info("[scrapertools.py] petición GET") else: logger.info("[scrapertools.py] petición POST") # Login y password Filenium # http://abcd%40gmail.com:mipass@filenium.com/get/Oi8vd3d3/LmZpbGVz/ZXJ2ZS5j/b20vZmls/ZS9kTnBL/dm11/b0/?.zip if "filenium" in url: from servers import filenium url , authorization_header = filenium.extract_authorization_header(url) headers.append( [ "Authorization",authorization_header ] ) # Array de cabeceras logger.info("[scrapertools.py] ---------------------------") for header in headers: logger.info("[scrapertools.py] header=%s" % str(header[0])) txheaders[header[0]]=header[1] logger.info("[scrapertools.py] ---------------------------") # Construye el request req = Request(url, post, txheaders) handle = urlopen(req) # Actualiza el almacén de cookies cj.save(ficherocookies) # Lee los datos y cierra #data=handle.read() info = handle.info() logger.info("[scrapertools.py] Respuesta") logger.info("[scrapertools.py] ---------------------------") location_header="" for header in info: logger.info("[scrapertools.py] "+header+"="+info[header]) if header==header_to_get: location_header=info[header] handle.close() logger.info("[scrapertools.py] ---------------------------") # Tiempo transcurrido fin = time.clock() logger.info("[scrapertools.py] Descargado en %d segundos " % (fin-inicio+1)) return location_header def get_headers_from_response(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']]): return_headers = [] logger.info("[scrapertools.py] get_headers_from_response url="+url) if post is not None: logger.info("[scrapertools.py] post="+post) else: logger.info("[scrapertools.py] post=None") # Inicializa la librería de las cookies ficherocookies = os.path.join( config.get_setting("cookies.dir"), 'cookies.dat' ) logger.info("[scrapertools.py] ficherocookies="+ficherocookies) cj = None ClientCookie = None cookielib = None import cookielib # importing cookielib worked urlopen = urllib2.urlopen Request = urllib2.Request cj = cookielib.MozillaCookieJar() # This is a subclass of FileCookieJar # that has useful load and save methods if os.path.isfile(ficherocookies): logger.info("[scrapertools.py] Leyendo fichero cookies") # if we have a cookie file already saved # then load the cookies into the Cookie Jar try: cj.load(ficherocookies) except: logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") os.remove(ficherocookies) opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj),NoRedirectHandler()) urllib2.install_opener(opener) # Contador inicio = time.clock() # Diccionario para las cabeceras txheaders = {} # Traza la peticion if post is None: logger.info("[scrapertools.py] petición GET") else: logger.info("[scrapertools.py] petición POST") # Array de cabeceras logger.info("[scrapertools.py] ---------------------------") for header in headers: logger.info("[scrapertools.py] header=%s" % str(header[0])) txheaders[header[0]]=header[1] logger.info("[scrapertools.py] ---------------------------") # Construye el request req = Request(url, post, txheaders) handle = urlopen(req) # Actualiza el almacén de cookies cj.save(ficherocookies) # Lee los datos y cierra #data=handle.read() info = handle.info() logger.info("[scrapertools.py] Respuesta") logger.info("[scrapertools.py] ---------------------------") location_header="" for header in info: logger.info("[scrapertools.py] "+header+"="+info[header]) return_headers.append( [header,info[header]] ) handle.close() logger.info("[scrapertools.py] ---------------------------") # Tiempo transcurrido fin = time.clock() logger.info("[scrapertools.py] Descargado en %d segundos " % (fin-inicio+1)) return return_headers def unseo(cadena): if cadena.upper().startswith("VER GRATIS LA PELICULA "): cadena = cadena[23:] elif cadena.upper().startswith("VER GRATIS PELICULA "): cadena = cadena[20:] elif cadena.upper().startswith("VER ONLINE LA PELICULA "): cadena = cadena[23:] elif cadena.upper().startswith("VER GRATIS "): cadena = cadena[11:] elif cadena.upper().startswith("VER ONLINE "): cadena = cadena[11:] elif cadena.upper().startswith("DESCARGA DIRECTA "): cadena = cadena[17:] return cadena #scrapertools.get_filename_from_url(media_url)[-4:] def get_filename_from_url(url): import urlparse parsed_url = urlparse.urlparse(url) try: filename = parsed_url.path except: # Si falla es porque la implementación de parsed_url no reconoce los atributos como "path" if len(parsed_url)>=4: filename = parsed_url[2] else: filename = "" return filename def get_domain_from_url(url): import urlparse parsed_url = urlparse.urlparse(url) try: filename = parsed_url.netloc except: # Si falla es porque la implementación de parsed_url no reconoce los atributos como "path" if len(parsed_url)>=4: filename = parsed_url[1] else: filename = "" return filename # Parses the title of a tv show episode and returns the season id + episode id in format "1x01" def get_season_and_episode(title): logger.info("get_season_and_episode('"+title+"')") patron ="(\d+)[x|X](\d+)" matches = re.compile(patron).findall(title) logger.info(str(matches)) filename=matches[0][0]+"x"+matches[0][1] logger.info("get_season_and_episode('"+title+"') -> "+filename) return filename def get_sha1(cadena): try: import hashlib devuelve = hashlib.sha1(cadena).hexdigest() except: import sha import binascii devuelve = binascii.hexlify(sha.new(cadena).digest()) return devuelve def get_md5(cadena): try: import hashlib devuelve = hashlib.md5(cadena).hexdigest() except: import md5 import binascii devuelve = binascii.hexlify(md5.new(cadena).digest()) return devuelve def read_body_and_headers(url, post=None, headers=[], follow_redirects=False, timeout=None): logger.info("read_body_and_headers "+url) if post is not None: logger.info("read_body_and_headers post="+post) if len(headers)==0: headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:18.0) Gecko/20100101 Firefox/18.0"]) # Start cookie lib ficherocookies = os.path.join( config.get_data_path(), 'cookies.dat' ) logger.info("read_body_and_headers cookies_file="+ficherocookies) cj = None ClientCookie = None cookielib = None # Let's see if cookielib is available try: logger.info("read_body_and_headers importing cookielib") import cookielib except ImportError: logger.info("read_body_and_headers cookielib no disponible") # If importing cookielib fails # let's try ClientCookie try: logger.info("read_body_and_headers importing ClientCookie") import ClientCookie except ImportError: logger.info("read_body_and_headers ClientCookie not available") # ClientCookie isn't available either urlopen = urllib2.urlopen Request = urllib2.Request else: logger.info("read_body_and_headers ClientCookie available") # imported ClientCookie urlopen = ClientCookie.urlopen Request = ClientCookie.Request cj = ClientCookie.MozillaCookieJar() else: logger.info("read_body_and_headers cookielib available") # importing cookielib worked urlopen = urllib2.urlopen Request = urllib2.Request cj = cookielib.MozillaCookieJar() # This is a subclass of FileCookieJar # that has useful load and save methods if cj is not None: # we successfully imported # one of the two cookie handling modules logger.info("read_body_and_headers Cookies enabled") if os.path.isfile(ficherocookies): logger.info("read_body_and_headers Reading cookie file") # if we have a cookie file already saved # then load the cookies into the Cookie Jar try: cj.load(ficherocookies) except: logger.info("read_body_and_headers Wrong cookie file, deleting...") os.remove(ficherocookies) # Now we need to get our Cookie Jar # installed in the opener; # for fetching URLs if cookielib is not None: logger.info("read_body_and_headers opener using urllib2 (cookielib)") # if we use cookielib # then we get the HTTPCookieProcessor # and install the opener in urllib2 if not follow_redirects: opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=DEBUG_LEVEL),urllib2.HTTPCookieProcessor(cj),NoRedirectHandler()) else: opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=DEBUG_LEVEL),urllib2.HTTPCookieProcessor(cj)) urllib2.install_opener(opener) else: logger.info("read_body_and_headers opener using ClientCookie") # if we use ClientCookie # then we get the HTTPCookieProcessor # and install the opener in ClientCookie opener = ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cj)) ClientCookie.install_opener(opener) # ------------------------------------------------- # Cookies instaladas, lanza la petición # ------------------------------------------------- # Contador inicio = time.clock() # Diccionario para las cabeceras txheaders = {} # Construye el request if post is None: logger.info("read_body_and_headers GET request") else: logger.info("read_body_and_headers POST request") # Añade las cabeceras logger.info("read_body_and_headers ---------------------------") for header in headers: logger.info("read_body_and_headers header %s=%s" % (str(header[0]),str(header[1])) ) txheaders[header[0]]=header[1] logger.info("read_body_and_headers ---------------------------") req = Request(url, post, txheaders) if timeout is None: handle=urlopen(req) else: #Disponible en python 2.6 en adelante --> handle = urlopen(req, timeout=timeout) #Para todas las versiones: try: import socket deftimeout = socket.getdefaulttimeout() socket.setdefaulttimeout(timeout) handle=urlopen(req) socket.setdefaulttimeout(deftimeout) except: import sys for line in sys.exc_info(): logger.info( "%s" % line ) # Actualiza el almacén de cookies cj.save(ficherocookies) # Lee los datos y cierra if handle.info().get('Content-Encoding') == 'gzip': buf = StringIO( handle.read()) f = gzip.GzipFile(fileobj=buf) data = f.read() else: data=handle.read() info = handle.info() logger.info("read_body_and_headers Response") returnheaders=[] logger.info("read_body_and_headers ---------------------------") for header in info: logger.info("read_body_and_headers "+header+"="+info[header]) returnheaders.append([header,info[header]]) handle.close() logger.info("read_body_and_headers ---------------------------") ''' # Lanza la petición try: response = urllib2.urlopen(req) # Si falla la repite sustituyendo caracteres especiales except: req = urllib2.Request(url.replace(" ","%20")) # Añade las cabeceras for header in headers: req.add_header(header[0],header[1]) response = urllib2.urlopen(req) ''' # Tiempo transcurrido fin = time.clock() logger.info("read_body_and_headers Downloaded in %d seconds " % (fin-inicio+1)) logger.info("read_body_and_headers body="+data) return data,returnheaders
gpl-3.0
matthewbauer/em-dosbox
src/cpu/extractfun.py
3
14938
import sys import re import hashlib def error(s): print >> sys.stderr, 'ERROR:', s sys.exit(1) # Functions are stored here, removing duplicates. Then they are printed. class FunctionStore: def __init__(self, namer): self.functions = [] self.hash2idx = {} self.namer = namer self.name2idx = {} self.idx2name = [] self.idnum2str = [] def findname(self, fcases): for case in fcases: naming = namer.getname(case) if naming: return naming error('no name found for ' + str(fcases)) NAMINGS = ( lambda self, x : self.idnum2str[x[0]] + x[2][1], lambda self, x : self.idnum2str[x[0]] + x[2][0] + x[2][1], lambda self, x : self.idnum2str[x[0]] + hex(x[1][0]) + x[2][1], lambda self, x : error('cannot fix name collision at ' + str(x[2])) ) def name_function(self, idx): colidx = -1 namingidx = 0 while True: tryname = self.NAMINGS[namingidx](self, self.functions[idx]) if tryname in self.name2idx: # Name collision, use next name format namingidx += 1 colidx = self.name2idx[tryname] if colidx >= 0: # Rename colliding function newname = self.NAMINGS[namingidx](self, self.functions[colidx]) if namingidx == 2: print self.functions[idx] print self.functions[colidx] self.idx2name[colidx] = newname self.name2idx[newname] = colidx # Reserve old name so it can't be used self.name2idx[tryname] = -1; # New function will be renamed in next iteration of loop else: # Use this name self.idx2name[idx] = tryname self.name2idx[tryname] = idx break # Add new function to function store. If function body is identical # to another, only a pointer to that function is stored. def add(self, fset, fcases, ftext): self.idx2name.append(0); ftexthash = hashlib.sha1(ftext).hexdigest() if ftexthash in self.hash2idx: dupeidx = self.hash2idx[ftexthash] dupe = self.functions[dupeidx] if ftext == dupe[3]: # Tuple provides information needed to generate name # for referring to already existing function. self.functions.append((fset, fcases, dupeidx)) else: error('hash collision') else: idx = len(self.functions) self.hash2idx[ftexthash] = idx naming = self.findname(fcases) self.functions.append((fset, fcases, naming, ftext)) self.name_function(idx) # Return ID number for ID string def getfileid(self, fileid): self.idnum2str.append(fileid) return len(self.idnum2str) - 1 # Print out all functions def output_fun(self, f): for i in xrange(0, len(self.functions)): if len(self.functions[i]) == 4: # This is a real function, not a reference to another one f.write('static int ' + self.idx2name[i] + '(void) {\n') f.write(self.functions[i][3]) f.write('\n}\n\n') ARRAYLEN = 1024 def populate_arrays(self): # Create empty arrays self.funptrarr = []; for i in range(0, len(self.idnum2str)): self.funptrarr.append([-1] * self.ARRAYLEN) # Initialize them for i in xrange(0, len(self.functions)): if len(self.functions[i]) == 4: # This is a real function with its own name idx = i else: # This is a reference to another function idx = self.functions[i][2] # Point all cases to idx for case in self.functions[i][1]: self.funptrarr[self.functions[i][0]][case] = idx # Replace duplicate arrays with index of earlier matching array def deduplicate_arrays(self): for i in range(0, len(self.idnum2str)): for j in range(0, i - 1): if self.funptrarr[i] == self.funptrarr[j]: self.funptrarr[i] = j def output_array(self, f, defname, i): f.write('\nint (* const ' + self.idnum2str[i] + 'funptr[' + str(self.ARRAYLEN) + '])(void) = {\n') line = '' linelen = 0 defnamel = len(defname) for idx in self.funptrarr[i]: if idx >= 0: # Function has a name funname = self.idx2name[idx] funnamel = len(funname) else: # No function here, output default funname = defname funnamel = defnamel if linelen + funnamel + 3 > self.MAXLEN: # Can't fit on line, output line, start next f.write(line + ',\n') line = funname linelen = funnamel else: # Can fit on line, so add to line if line != '': line += ', ' linelen += 2 line += funname linelen += funnamel f.write(line) f.write('\n};\n') MAXLEN = 79 def output_arrays(self, f, defname): self.populate_arrays() self.deduplicate_arrays() for i in range(0, len(self.idnum2str)): if isinstance(self.funptrarr[i], list): self.output_array(f, defname, i) else: f.write('\n/* Omitting ' + self.idnum2str[i] + 'funptr because it matches ' + self.idnum2str[self.funptrarr[i]] + 'funptr */\n') # Reads case statement bodies from a file, transforms them, # and stores them in a FunctionStore. class FunctionReader: def __init__(self, store): self.functionstore = store # This replaces the matched string def replace_match(self, m, r): return (m.string[:m.start()] + r + m.string[m.end():], m.start() + len(r)) # This regular expression identifies relevant strings to search for. # Particular parts are processed using functions below, via REFUNMAP. FUNRE = re.compile('({)|(})|(switch)|(break)|(continue)|' '(goto restart_opcode)|(goto illegal_opcode)|' '(goto decode_end)'); def re_openbrace(self, match): self.bracelevel += 1 return (match.string, match.end()) def re_closebrace(self, match): self.bracelevel -= 1 if self.bracelevel == self.entrylevel: # Back to outermost level, where break statements become returns self.entrylevel = -1 return (match.string, match.end()) def re_startlevel(self, match): # Entering deeper level, where break statements need to remain if self.entrylevel == -1: self.entrylevel = self.bracelevel return (match.string, match.end()) # Only break statements that exit the case that is converted into a # function need to be converted. Other break statements only exit inner # levels and need to remain unaltered. def re_break(self, match): if self.entrylevel == -1: return self.replace_match(match, 'return CASERET_BREAK') else: return (match.string, match.end()) REFUNMAP = { 1 : re_openbrace, 2 : re_closebrace, 3 : re_startlevel, 4 : re_break, 5 : lambda self, match : self.replace_match(match, 'return CASERET_CONTINUE'), 6 : lambda self, match : self.replace_match(match, 'return CASERET_RESTART'), 7 : lambda self, match : self.replace_match(match, 'return CASERET_ILLEGAL'), 8 : lambda self, match : self.replace_match(match, 'return CASERET_END') } # Process a line which should contain part of a case statement body def pr_funcline(self, l): loc = 0 while True: match = self.FUNRE.search(l, loc) if match: (l, loc) = self.REFUNMAP[match.lastindex](self, match) elif self.bracelevel >= 0: self.funtext += l return True else: # Negative bracelevel means closing brace of case. # Assume it is on its own line so line can be discarded. return False # Strip leading lines and trailing whitespace. # Don't strip indentation on first line with text. LEADINGBLANK = re.compile('\s*\n') def strip_func(self, funtext): match = self.LEADINGBLANK.match(self.funtext) if match: startidx = match.end() else: startidx = 0 return self.funtext[startidx:].rstrip() # Add function to function store and prepare for next one. def end_func(self): self.funtext = self.strip_func(self.funtext) if len(self.cases) == 0 and self.funtext != '': error('no case for: ' + self.funtext) elif len(self.cases) > 0: if self.funtext == '': # This would be legal C, but should be dealt with elsewhere error('no code for: ' + str(self.cases)) else: self.functionstore.add(self.fileid, self.cases, self.funtext) self.cases = [] self.funtext = '' # Try to parse a line as one or more case statements. # If this returns False, that means this is not a line of case statements, # and the previous case statement body continues. CASERE = re.compile('\s*case\s+([^:]+):'); def pr_caseline(self, line): idx = 0 l = len(line) newcases = [] # Loop for multiple case statements on one line while idx < l: match = self.CASERE.match(line, idx) if match: try: # Case value could be a C expression newcases.append(eval(match.group(1))) except: error('evaluating case: ' + match.group(1)) idx = match.end() elif idx == 0: # This could be a function line return False elif line.strip != '': # It was just trailing whitespace break else: error('parsing case: ' + line) if self.funtext.strip() == '': self.cases += newcases else: self.end_func() self.cases = newcases return True # Load functions from a file # The fileid is later used for naming STRIP_DEFAULT = re.compile('\n\s*default:.*', re.DOTALL) def from_file(self, fileid, filename): self.entrylevel = -1 self.bracelevel = 0 self.funtext = '' self.cases = [] self.fileid = self.functionstore.getfileid(fileid) inswitch = False with open(filename) as f: for line in f.readlines(): if line.startswith('#'): continue if not inswitch: # Reading up to start of CPU core switch statement if line.startswith(' switch (core.opcode_index'): inswitch = True elif self.bracelevel != 0 or not self.pr_caseline(line): if not self.pr_funcline(line): self.funtext = self.STRIP_DEFAULT.sub('', self.funtext, 1) self.end_func() break class FunctionNamer: names = {} # Create a name that is a legal function name in C. # Underscore is rejected to avoid having multiple underscores side by side. NAMECLEANRE = re.compile('[^0-9A-Za-z]+') def cleanup_name(self, name): n = self.NAMECLEANRE.sub('_', name) nend = len(n) - 1 if n[nend] == '_': return n[:nend] else: return n # Calculate number corresponding to this CASE #define OPCODE_NONE = 0 OPCODE_SIZE = 0x200 OPCODE_0F = 0x100 NAMEMAP = { 'W': OPCODE_NONE, 'D': OPCODE_SIZE, 'B': OPCODE_NONE, # Also OPCODE_SIZE '0F_W': OPCODE_0F|OPCODE_NONE, '0F_D': OPCODE_0F|OPCODE_SIZE, '0F_B': OPCODE_0F|OPCODE_NONE # Also OPCODE_0F|OPCODE_SIZE } def calculate_case(self, opcode_type, idx): return self.NAMEMAP[opcode_type]+int(idx, 16) # Read instruction names from comments in header file NAMINGRE = re.compile('\s*CASE_([^\(]+)\(([^\)]+)\)\s*/\*(.*)\*/\s*') def from_file(self, filename): with open(filename) as f: for line in f.readlines(): match = self.NAMINGRE.match(line) if match: self.names[self.calculate_case(match.group(1), match.group(2).strip())] = ( match.group(1), self.cleanup_name(match.group(3).strip()) ) def getname(self, case): if case in self.names: return self.names[case] else: return False CPP_HEADER = ''' static int x86_illegal(void) { return CASERET_ILLEGAL; } ''' def print_cpp(store, filename): with open(filename, 'w') as f: f.write('/* DOSBox x86 opcode functions file generated by ' + sys.argv[0] + ' */\n') f.write(CPP_HEADER) store.output_fun(f) store.output_arrays(f, 'x86_illegal') namer = FunctionNamer() namer.from_file('core_normal/prefix_none.h') namer.from_file('core_normal/prefix_66.h') namer.from_file('core_normal/prefix_0f.h') namer.from_file('core_normal/prefix_66_0f.h') # TODO: More deduplication is possible by reading all functions into one # single store. However, that has to take into account the differing # environments set up in the core_*.cpp files before the case statement. PROCESSINGS = ( ('x86_', 'core_normal_fun.'), ('x86s_', 'core_simple_fun.'), ('x86p_', 'core_prefetch_fun.') ) for p in PROCESSINGS: store = FunctionStore(namer) reader = FunctionReader(store) reader.from_file(p[0], p[1] + 'ii') print_cpp(store, p[1] + 'h')
gpl-2.0
srickardti/openthread
tools/harness-automation/cases_R140/router_5_3_4.py
18
1876
#!/usr/bin/env python # # Copyright (c) 2016, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # from autothreadharness.harness_case import HarnessCase import unittest class Router_5_3_4(HarnessCase): role = HarnessCase.ROLE_ROUTER case = '5 3 4' golden_devices_required = 6 def on_dialog(self, dialog, title): pass if __name__ == '__main__': unittest.main()
bsd-3-clause
Intel-tensorflow/tensorflow
tensorflow/python/platform/resource_loader_test.py
17
1289
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.platform import googletest from tensorflow.python.platform import resource_loader class ResourceLoaderTest(googletest.TestCase): def test_exception(self): with self.assertRaises(IOError): resource_loader.load_resource("/fake/file/path/dne") def test_exists(self): contents = resource_loader.load_resource( "python/platform/resource_loader.py") self.assertIn(b"tensorflow", contents) if __name__ == "__main__": googletest.main()
apache-2.0
matandobr/Mobile-Security-Framework-MobSF
MobSF/urls.py
1
3977
from django.conf.urls import url from DynamicAnalyzer.views.android import dynamic_analyzer as dz from DynamicAnalyzer.views.android import ( operations, report, tests_common, tests_frida) from MobSF import utils from MobSF.views import home from MobSF.views.api import rest_api from StaticAnalyzer import tests from StaticAnalyzer.views import shared_func from StaticAnalyzer.views.android import ( find, generate_downloads, manifest_view, source_tree, ) from StaticAnalyzer.views.windows import windows from StaticAnalyzer.views.android import static_analyzer as android_sa from StaticAnalyzer.views.ios import static_analyzer as ios_sa from StaticAnalyzer.views.ios import view_source as io_view_source urlpatterns = [ # General url(r'^$', home.index, name='home'), url(r'^upload/$', home.Upload.as_view), url(r'^download/', home.download), url(r'^about$', home.about, name='about'), url(r'^api_docs$', home.api_docs, name='api_docs'), url(r'^recent_scans/$', home.recent_scans, name='recent'), url(r'^delete_scan/$', home.delete_scan), url(r'^search$', home.search), url(r'^error/$', home.error, name='error'), url(r'^not_found/$', home.not_found), url(r'^zip_format/$', home.zip_format), url(r'^mac_only/$', home.mac_only), # Static Analysis # Android url(r'^StaticAnalyzer/$', android_sa.static_analyzer), url(r'^Source$', source_tree.run, name='source_view'), url(r'^Find/$', find.run, name='find_files'), url(r'^generate_downloads/$', generate_downloads.run), url(r'^ManifestView/$', manifest_view.run), # IOS url(r'^StaticAnalyzer_iOS/$', ios_sa.static_analyzer_ios), url(r'^ViewFile/$', io_view_source.run), # Windows url(r'^StaticAnalyzer_Windows/$', windows.staticanalyzer_windows), # Shared url(r'^PDF/$', shared_func.pdf), # App Compare url(r'^compare/(?P<hash1>[0-9a-f]{32})/(?P<hash2>[0-9a-f]{32})/$', shared_func.compare_apps), # Dynamic Analysis url(r'^dynamic_analysis/$', dz.dynamic_analysis, name='dynamic'), url(r'^android_dynamic/$', dz.dynamic_analyzer, name='dynamic_analyzer'), url(r'^httptools$', dz.httptools_start, name='httptools'), url(r'^logcat/$', dz.logcat), # Android Operations url(r'^mobsfy/$', operations.mobsfy), url(r'^screenshot/$', operations.take_screenshot), url(r'^execute_adb/$', operations.execute_adb), url(r'^screen_cast/$', operations.screen_cast), url(r'^touch_events/$', operations.touch), url(r'^get_component/$', operations.get_component), url(r'^mobsf_ca/$', operations.mobsf_ca), # Dynamic Tests url(r'^activity_tester/$', tests_common.activity_tester), url(r'^download_data/$', tests_common.download_data), url(r'^collect_logs/$', tests_common.collect_logs), # Frida url(r'^frida_instrument/$', tests_frida.instrument), url(r'^live_api/$', tests_frida.live_api), url(r'^frida_logs/$', tests_frida.frida_logs), url(r'^list_frida_scripts/$', tests_frida.list_frida_scripts), url(r'^get_script/$', tests_frida.get_script), # Report url(r'^dynamic_report/$', report.view_report), url(r'^dynamic_view_file/$', report.view_file), # REST API url(r'^api/v1/upload$', rest_api.api_upload), url(r'^api/v1/scan$', rest_api.api_scan), url(r'^api/v1/delete_scan$', rest_api.api_delete_scan), url(r'^api/v1/download_pdf$', rest_api.api_pdf_report), url(r'^api/v1/report_json$', rest_api.api_json_report), url(r'^api/v1/view_source$', rest_api.api_view_source, name='api_view_source'), url(r'^api/v1/scans$', rest_api.api_recent_scans), url(r'^api/v1/compare$', rest_api.api_compare), # Test url(r'^tests/$', tests.start_test), ] utils.print_version()
gpl-3.0
javaarchive/PIDLE
cmdpack.py
3
1424
print(''' ================================ Welcome to cmd pack Provided by PIDLE ================================ type help for help pack-pack files to an file unpack-unpack files from packed file exit-to exit ENJOY ''') try: from systools import * import os def isfile(file_path): if not file_path: return False elif not os.path.isfile(file_path): return False else: return True except: print("unable to use pack or unpack") while True: data=input(">>>").lower() if data=="pack": c=[] while True: t=input("File to pack press enter if done") if t=="": break else: if isfile(t): c.append(t) else: raise Exception(t+" is not a file") pack(c,input("finish file: ")) elif data=="unpack": try: unpack(input("File to unpack:\t")) except: raise Exception("Unable to unpack") elif data=="exit": break elif data=="help": print(''' ================================ Welcome to cmd pack Provided by PIDLE ================================ type help for help pack-pack files to an file unpack-unpack files from packed file exit-to exit ENJOY ''')
mit
v1bri/gnuradio
gr-analog/python/analog/qa_pll_refout.py
40
8750
#!/usr/bin/env python # # Copyright 2004,2010,2012,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # import math from gnuradio import gr, gr_unittest, analog, blocks class test_pll_refout(gr_unittest.TestCase): def setUp(self): self.tb = gr.top_block() def tearDown(self): self.tb = None def test_pll_refout(self): expected_result = ((1+0j), (1+6.408735764296125e-10j), (0.9999844431877136+0.005577784031629562j), (0.9998642802238464+0.016474783420562744j), (0.9994739890098572+0.032431427389383316j), (0.9985847473144531+0.05318402871489525j), (0.996917188167572+0.07846084982156754j), (0.9941533207893372+0.10797744989395142j), (0.9899479150772095+0.14143235981464386j), (0.9839394092559814+0.1785029172897339j), (0.9757603406906128+0.2188417762517929j), (0.9650475978851318+0.26207470893859863j), (0.9514514803886414+0.30779871344566345j), (0.9346449971199036+0.35558223724365234j), (0.9143316149711609+0.40496626496315j), (0.8902531862258911+0.4554659426212311j), (0.8621962666511536+0.5065743923187256j), (0.8299974799156189+0.5577671527862549j), (0.7935484647750854+0.6085070967674255j), (0.7527987360954285+0.6582507491111755j), (0.7077582478523254+0.7064547538757324j), (0.6584978699684143+0.7525825500488281j), (0.6051493883132935+0.7961119413375854j), (0.547903835773468+0.8365413546562195j), (0.48700881004333496+0.8733970522880554j), (0.42276495695114136+0.90623939037323j), (0.35552138090133667+0.9346681237220764j), (0.2856702208518982+0.9583280086517334j), (0.21364101767539978+0.976912260055542j), (0.13989387452602386+0.9901664853096008j), (0.06491273641586304+0.9978909492492676j), (-0.01080091018229723+0.9999416470527649j), (-0.08673560619354248+0.9962313771247864j), (-0.16237612068653107+0.9867289662361145j), (-0.23721040785312653+0.9714583158493042j), (-0.3107353150844574+0.95049649477005j), (-0.3824624717235565+0.9239710569381714j), (-0.45192304253578186+0.892056941986084j), (-0.5186731219291687+0.8549726009368896j), (-0.5822963714599609+0.812976598739624j), (-0.6424083709716797+0.7663624882698059j), (-0.6986585855484009+0.7154552340507507j), (-0.7507330775260925+0.6606056690216064j), (-0.7983550429344177+0.6021870970726013j), (-0.841286301612854+0.5405898094177246j), (-0.879327654838562+0.47621726989746094j), (-0.912318229675293+0.4094819128513336j), (-0.9401354789733887+0.340800940990448j), (-0.9626938104629517+0.27059316635131836j), (-0.979943573474884+0.1992751508951187j), (-0.9918696284294128+0.12725839018821716j), (-0.9984893202781677+0.054946307092905045j), (-0.9998509287834167-0.017267409712076187j), (-0.9960314631462097-0.08900183439254761j), (-0.9871346950531006-0.1598907858133316j), (-0.9732890129089355-0.2295832633972168j), (-0.9546451568603516-0.29774588346481323j), (-0.9313743710517883-0.3640628457069397j), (-0.9036663174629211-0.42823725938796997j), (-0.8717266321182251-0.48999255895614624j), (-0.8357754945755005-0.5490713119506836j), (-0.7960456013679504-0.6052366495132446j), (-0.7527803182601929-0.658271849155426j), (-0.706232488155365-0.7079799771308899j), (-0.6566619873046875-0.7541850209236145j), (-0.6043350696563721-0.7967302799224854j), (-0.5495226979255676-0.8354787826538086j), (-0.4924990236759186-0.8703129887580872j), (-0.4335414469242096-0.9011335968971252j), (-0.3729270100593567-0.927860677242279j), (-0.3109343349933624-0.9504314064979553j), (-0.2478405237197876-0.9688008427619934j), (-0.18392162024974823-0.9829409122467041j), (-0.11945075541734695-0.9928401112556458j), (-0.05469784513115883-0.9985029697418213j), (0.010069688782095909-0.9999492764472961j), (0.07459097355604172-0.9972141981124878j), (0.13860897719860077-0.9903472065925598j), (0.2018725872039795-0.979411780834198j), (0.2641367018222809-0.964485228061676j), (0.32516375184059143-0.9456577301025391j), (0.3847236633300781-0.9230318069458008j), (0.44259318709373474-0.8967224955558777j), (0.49855801463127136-0.8668563365936279j), (0.5524120926856995-0.8335711359977722j), (0.6039596796035767-0.7970148921012878j), (0.6530137062072754-0.7573460936546326j), (0.6993972063064575-0.7147331833839417j), (0.7429447770118713-0.6693527102470398j), (0.7835012078285217-0.6213902235031128j), (0.8209227919578552-0.5710391998291016j), (0.8550769090652466-0.5185011625289917j), (0.8858439326286316-0.46398329734802246j), (0.9131162166595459-0.4076994061470032j), (0.936798632144928-0.3498689830303192j), (0.956809401512146-0.2907160222530365j), (0.9730796813964844-0.23046888411045074j), (0.9855544567108154-0.16935895383358002j), (0.9941920042037964-0.10762103646993637j), (0.9989647269248962-0.045491550117731094j)) sampling_freq = 10e3 freq = sampling_freq / 100 loop_bw = math.pi/100.0 maxf = 1 minf = -1 src = analog.sig_source_c(sampling_freq, analog.GR_COS_WAVE, freq, 1.0) pll = analog.pll_refout_cc(loop_bw, maxf, minf) head = blocks.head(gr.sizeof_gr_complex, int (freq)) dst = blocks.vector_sink_c() self.tb.connect(src, pll, head) self.tb.connect(head, dst) self.tb.run() dst_data = dst.data() self.assertComplexTuplesAlmostEqual(expected_result, dst_data, 4) if __name__ == '__main__': gr_unittest.run(test_pll_refout, "test_pll_refout.xml")
gpl-3.0
wolfiex/DSMACC-testing
begin.py
1
5376
#!/usr/local/anaconda/bin/python #/usr/bin/python import multiprocessing,os,subprocess,pickle,sys,time #import pandas as pd import numpy as np import glob,sys,os # options available_cores = 16 pre_formatted_style=False #slower runsaved = False if '--saved' in sys.argv: runsaved=True #runs model using saved versions when -modelname is provided keepx = False if '--keepx' in sys.argv: keepx=True # does not ignore runs marked with an X in the begining icsonly = False if '--icsonly' in sys.argv: icsonly=True # does not ignore runs marked with an X in the begining obs= 0 if '--obs' in sys.argv: obs=int(tuple(open('include.obs'))[0].strip().replace('!obs:','')) # does not ignore runs marked with an X in the begining print 'running with %s observations, with %s lines'%((obs-1.)/4.,obs) last = False if '--last' in sys.argv: last = -1 if '--run' in sys.argv: for i in sys.argv: try: last=int(i) except: None print 'if obs unconstrain species where obs is used' print sys.argv,last ################## ####read files#### file_list = glob.glob('InitCons/*.csv') file_list.sort(key=os.path.getmtime)#getmtime - modified getctime-created if (not os.path.exists('./model') & runsaved==0): sys.exit('No model file found. Please run "make kpp" followed by "make"') print 'Select file to open: \n\n' for i,f in enumerate(file_list): print i , ' - ', f.replace('InitCons/','').replace('.csv','') if (last) : ic_file = file_list[last] else : ic_file = file_list[int(raw_input('Enter Number \n'))] #run simulations ######################################################################################### ncores= available_cores # - 6 #if 6 used from openmp? start = (time.strftime("%Y%m%d%H%M")) # make ics if ('.csv' not in ic_file): ic_file += '.csv' print ic_file os.system("touch Init_cons.dat") os.system("rm Init_cons.dat") os.system("./InitCons/makeics.pl %s"%ic_file) if icsonly: sys.exit('icsonly flag is on, Initi_cons.dat file made - only. Exiting.') import netCDF4 from netCDF4 import Dataset from scipy.io import FortranFile ic_open= tuple(open(ic_file)) numbered = np.array([i for i in enumerate(ic_open[2].strip().split(',')[3:])]) if not keepx: numbered = filter(lambda x: x[1][0] not in 'xX', numbered) n_runs=len(numbered) if (ncores > n_runs): ncores = n_runs def read_fbin(filename): ''' this reads each written binary instance itteratively''' f = FortranFile(filename, 'r') array = [] while True: try: array.append(f.read_reals(dtype=np.float_)) except TypeError: break #array = np.reshape(array, (nspecs,-1)) f.close() return array # run dsmacc def simulate (arg_in): try: #each system call has to be on a new line #os.system("touch Outputs/s_%s.empty"%('run',arg_in[1])) #os.system("rm Outputs/s_%s.*"%('run',arg_in[1])) start = time.strftime("%s") description="%s_%s"%('run',arg_in[1]) model='model' if '-' in description: if runsaved: model='save/exec/%s/model'%(description.split('-')[-1]) else: description = description.split('-')[0] print 'aaaa' linenumber = "%s"%(int(arg_in[0])+1) run ='./%s %s %s %d'%(model, description,int(linenumber),obs) print run ; os.system(run) return int(time.strftime("%s")) - int(start) except Exception as e: return 'Failed on '+ arg_in + e #do runs ######################################################################################### out = multiprocessing.Pool(ncores).map( simulate , numbered ) os.system('rm fort*') #concat resutls ######################################################################################### print '\n Calculations complete! \n Concatenating results. \n ' ic_string='' # get a string format of the intial conditions file for line in ic_open[1:]: ic_string+=line filename= ic_file.split('/')[-1].split('.')[0]+'_'+ time.strftime("%y%m%d%H%M")+'.nc' ncfile = Dataset(filename,'w') print 'add to results folder' ncfile.initial_conditions_str = ic_string ncfile.date = time.strftime("Completion time:%A %d %B %Y at %H:%M") ncfile.description = ic_open[0].strip() spec = ncfile.createDimension('spec', None) time = ncfile.createDimension('time', None) rate = ncfile.createDimension('rate', None) #for each simulation l = 0 for group_name in numbered: print group_name if not runsaved: group_name[1] = group_name[1].split('-')[0] group = ncfile.createGroup(group_name[1]) specvar = group.createVariable( 'Spec' , "f8" ,('time','spec',)) ratevar = group.createVariable( 'Rate' , "f8" ,('time','rate',)) specvar[:] = read_fbin('./Outputs/run_%s_.spec'%group_name[1]) ratevar[:] = read_fbin('./Outputs/run_%s_.rate'%group_name[1]) l += len(specvar[:]) print group specvar.head = ''.join(tuple(open('./Outputs/spec.names'))).replace(' ','').replace('\n','') ratevar.head = ''.join(tuple(open('./Outputs/rate.names'))).replace(' ','').replace('\n','') group.WALL_time = out[int(group_name[0])] # close the file. ncfile.close()a if l <= 1 : os.system('rm '+filename) print 'Failed!' else: print '*** SUCCESS writing %s!'%filename os.system('find . -size +100M | cat >> .gitignore')
gpl-3.0
ClimbsRocks/scikit-learn
examples/feature_selection/plot_rfe_with_cross_validation.py
161
1380
""" =================================================== Recursive feature elimination with cross-validation =================================================== A recursive feature elimination example with automatic tuning of the number of features selected with cross-validation. """ print(__doc__) import matplotlib.pyplot as plt from sklearn.svm import SVC from sklearn.model_selection import StratifiedKFold from sklearn.feature_selection import RFECV from sklearn.datasets import make_classification # Build a classification task using 3 informative features X, y = make_classification(n_samples=1000, n_features=25, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, random_state=0) # Create the RFE object and compute a cross-validated score. svc = SVC(kernel="linear") # The "accuracy" scoring is proportional to the number of correct # classifications rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2), scoring='accuracy') rfecv.fit(X, y) print("Optimal number of features : %d" % rfecv.n_features_) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_) plt.show()
bsd-3-clause
eduNEXT/edunext-platform
cms/djangoapps/contentstore/tests/test_import_draft_order.py
4
2845
""" Tests Draft import order. """ from django.conf import settings from xmodule.modulestore.django import modulestore from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.xml_importer import import_course_from_xml TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT # This test is in the CMS module because the test configuration to use a draft # modulestore is dependent on django. class DraftReorderTestCase(ModuleStoreTestCase): def test_order(self): """ Verify that drafts are imported in the correct order. """ store = modulestore() course_items = import_course_from_xml( store, self.user.id, TEST_DATA_DIR, ['import_draft_order'], create_if_not_present=True ) course_key = course_items[0].id sequential = store.get_item(course_key.make_usage_key('sequential', '0f4f7649b10141b0bdc9922dcf94515a')) verticals = sequential.children # The order that files are read in from the file system is not guaranteed (cannot rely on # alphabetical ordering, for example). Therefore, I have added a lot of variation in filename and desired # ordering so that the test reliably failed with the bug, at least on Linux. # # 'a', 'b', 'c', 'd', and 'z' are all drafts, with 'index_in_children_list' of # 2 , 4 , 6 , 5 , and 0 respectively. # # '5a05be9d59fc4bb79282c94c9e6b88c7' and 'second' are public verticals. self.assertEqual(7, len(verticals)) self.assertEqual(course_key.make_usage_key('vertical', 'z'), verticals[0]) self.assertEqual(course_key.make_usage_key('vertical', '5a05be9d59fc4bb79282c94c9e6b88c7'), verticals[1]) self.assertEqual(course_key.make_usage_key('vertical', 'a'), verticals[2]) self.assertEqual(course_key.make_usage_key('vertical', 'second'), verticals[3]) self.assertEqual(course_key.make_usage_key('vertical', 'b'), verticals[4]) self.assertEqual(course_key.make_usage_key('vertical', 'd'), verticals[5]) self.assertEqual(course_key.make_usage_key('vertical', 'c'), verticals[6]) # Now also test that the verticals in a second sequential are correct. sequential = store.get_item(course_key.make_usage_key('sequential', 'secondseq')) verticals = sequential.children # 'asecond' and 'zsecond' are drafts with 'index_in_children_list' 0 and 2, respectively. # 'secondsubsection' is a public vertical. self.assertEqual(3, len(verticals)) self.assertEqual(course_key.make_usage_key('vertical', 'asecond'), verticals[0]) self.assertEqual(course_key.make_usage_key('vertical', 'secondsubsection'), verticals[1]) self.assertEqual(course_key.make_usage_key('vertical', 'zsecond'), verticals[2])
agpl-3.0
clemensv/qpid-proton
examples/python/reactor/goodbye-world.py
8
1873
#!/usr/bin/python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from __future__ import print_function from proton.reactor import Reactor # So far the reactive hello-world doesn't look too different from a # regular old non-reactive hello-world. The on_reactor_init method can # be used roughly as a 'main' method would. A program that only uses # that one event, however, isn't going to be very reactive. By using # other events, we can write a fully reactive program. class Program: # As before we handle the reactor init event. def on_reactor_init(self, event): print("Hello, World!") # In addition to an initial event, the reactor also produces an # event when it is about to exit. This may not behave much # differently than just putting the goodbye print statement inside # on_reactor_init, but as we grow our program, this piece of it # will always be what happens last, and will always happen # regardless of what other paths the main logic of our program # might take. def on_reactor_final(self, event): print("Goodbye, World!") r = Reactor(Program()) r.run()
apache-2.0
InfoSec812/vertx-web
vertx-web/src/test/sockjs-protocol/unittest2/test/test_break.py
17
11050
import gc import os import sys import weakref from six.moves import StringIO try: import signal except ImportError: signal = None import unittest2 import unittest2 as unittest class TestBreak(unittest2.TestCase): int_handler = None def setUp(self): self._default_handler = signal.getsignal(signal.SIGINT) if self.int_handler is not None: signal.signal(signal.SIGINT, self.int_handler) def tearDown(self): signal.signal(signal.SIGINT, self._default_handler) unittest2.signals._results = weakref.WeakKeyDictionary() unittest2.signals._interrupt_handler = None def testInstallHandler(self): default_handler = signal.getsignal(signal.SIGINT) unittest2.installHandler() self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler) try: pid = os.getpid() os.kill(pid, signal.SIGINT) except KeyboardInterrupt: self.fail("KeyboardInterrupt not handled") self.assertTrue(unittest2.signals._interrupt_handler.called) def testRegisterResult(self): result = unittest2.TestResult() unittest2.registerResult(result) for ref in unittest2.signals._results: if ref is result: break elif ref is not result: self.fail("odd object in result set") else: self.fail("result not found") def testInterruptCaught(self): default_handler = signal.getsignal(signal.SIGINT) result = unittest2.TestResult() unittest2.installHandler() unittest2.registerResult(result) self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler) def test(result): pid = os.getpid() os.kill(pid, signal.SIGINT) result.breakCaught = True self.assertTrue(result.shouldStop) try: test(result) except KeyboardInterrupt: self.fail("KeyboardInterrupt not handled") self.assertTrue(result.breakCaught) def testSecondInterrupt(self): # Can't use skipIf decorator because the signal handler may have # been changed after defining this method. if signal.getsignal(signal.SIGINT) == signal.SIG_IGN: self.skipTest("test requires SIGINT to not be ignored") result = unittest2.TestResult() unittest2.installHandler() unittest2.registerResult(result) def test(result): pid = os.getpid() os.kill(pid, signal.SIGINT) result.breakCaught = True self.assertTrue(result.shouldStop) os.kill(pid, signal.SIGINT) self.fail("Second KeyboardInterrupt not raised") try: test(result) except KeyboardInterrupt: pass else: self.fail("Second KeyboardInterrupt not raised") self.assertTrue(result.breakCaught) def testTwoResults(self): unittest2.installHandler() result = unittest2.TestResult() unittest2.registerResult(result) new_handler = signal.getsignal(signal.SIGINT) result2 = unittest2.TestResult() unittest2.registerResult(result2) self.assertEqual(signal.getsignal(signal.SIGINT), new_handler) result3 = unittest2.TestResult() def test(result): pid = os.getpid() os.kill(pid, signal.SIGINT) try: test(result) except KeyboardInterrupt: self.fail("KeyboardInterrupt not handled") self.assertTrue(result.shouldStop) self.assertTrue(result2.shouldStop) self.assertFalse(result3.shouldStop) def testHandlerReplacedButCalled(self): # Can't use skipIf decorator because the signal handler may have # been changed after defining this method. if signal.getsignal(signal.SIGINT) == signal.SIG_IGN: self.skipTest("test requires SIGINT to not be ignored") # If our handler has been replaced (is no longer installed) but is # called by the *new* handler, then it isn't safe to delay the # SIGINT and we should immediately delegate to the default handler unittest2.installHandler() handler = signal.getsignal(signal.SIGINT) def new_handler(frame, signum): handler(frame, signum) signal.signal(signal.SIGINT, new_handler) try: pid = os.getpid() os.kill(pid, signal.SIGINT) except KeyboardInterrupt: pass else: self.fail("replaced but delegated handler doesn't raise interrupt") def testRunner(self): # Creating a TextTestRunner with the appropriate argument should # register the TextTestResult it creates runner = unittest2.TextTestRunner(stream=StringIO()) result = runner.run(unittest2.TestSuite()) self.assertIn(result, unittest2.signals._results) def testWeakReferences(self): # Calling registerResult on a result should not keep it alive result = unittest2.TestResult() unittest2.registerResult(result) ref = weakref.ref(result) del result # For non-reference counting implementations gc.collect();gc.collect() self.assertIsNone(ref()) def testRemoveResult(self): result = unittest2.TestResult() unittest2.registerResult(result) unittest2.installHandler() self.assertTrue(unittest2.removeResult(result)) # Should this raise an error instead? self.assertFalse(unittest2.removeResult(unittest2.TestResult())) try: pid = os.getpid() os.kill(pid, signal.SIGINT) except KeyboardInterrupt: pass self.assertFalse(result.shouldStop) def testMainInstallsHandler(self): failfast = object() test = object() verbosity = object() result = object() default_handler = signal.getsignal(signal.SIGINT) class FakeRunner(object): initArgs = [] runArgs = [] def __init__(self, *args, **kwargs): self.initArgs.append((args, kwargs)) def run(self, test): self.runArgs.append(test) return result class Program(unittest2.TestProgram): def __init__(self, catchbreak): self.exit = False self.verbosity = verbosity self.failfast = failfast self.catchbreak = catchbreak self.tb_locals = False self.testRunner = FakeRunner self.test = test self.result = None p = Program(False) p.runTests() self.assertEqual(FakeRunner.initArgs, [((), {'verbosity': verbosity, 'failfast': failfast, 'tb_locals': False, 'buffer': None})]) self.assertEqual(FakeRunner.runArgs, [test]) self.assertEqual(p.result, result) self.assertEqual(signal.getsignal(signal.SIGINT), default_handler) FakeRunner.initArgs = [] FakeRunner.runArgs = [] p = Program(True) p.runTests() self.assertEqual(FakeRunner.initArgs, [((), {'verbosity': verbosity, 'failfast': failfast, 'tb_locals': False, 'buffer': None})]) self.assertEqual(FakeRunner.runArgs, [test]) self.assertEqual(p.result, result) self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler) def testRemoveHandler(self): default_handler = signal.getsignal(signal.SIGINT) unittest2.installHandler() unittest2.removeHandler() self.assertEqual(signal.getsignal(signal.SIGINT), default_handler) # check that calling removeHandler multiple times has no ill-effect unittest2.removeHandler() self.assertEqual(signal.getsignal(signal.SIGINT), default_handler) def testRemoveHandlerAsDecorator(self): default_handler = signal.getsignal(signal.SIGINT) unittest2.installHandler() @unittest2.removeHandler def test(): self.assertEqual(signal.getsignal(signal.SIGINT), default_handler) test() self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler) @unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill") @unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows") @unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 " "if threads have been used") class TestBreakDefaultIntHandler(TestBreak): int_handler = signal.default_int_handler @unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill") @unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows") @unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 " "if threads have been used") class TestBreakSignalIgnored(TestBreak): int_handler = signal.SIG_IGN @unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill") @unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows") @unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 " "if threads have been used") class TestBreakSignalDefault(TestBreak): int_handler = signal.SIG_DFL @unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill") @unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows") @unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 " "if threads have been used") class TestBreakDefaultIntHandler(TestBreak): int_handler = signal.default_int_handler @unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill") @unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows") @unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 " "if threads have been used") class TestBreakSignalIgnored(TestBreak): int_handler = signal.SIG_IGN @unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill") @unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows") @unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 " "if threads have been used") class TestBreakSignalDefault(TestBreak): int_handler = signal.SIG_DFL # Should also skip some tests on Jython skipper = unittest2.skipUnless(hasattr(os, 'kill') and signal is not None, "test uses os.kill(...) and the signal module") skipper2 = unittest2.skipIf(sys.platform == 'win32', "can't run on windows") TestBreak = skipper(skipper2(TestBreak)) if __name__ == '__main__': unittest2.main()
apache-2.0
aviciimaxwell/odoo
addons/fetchmail/res_config.py
437
5234
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class fetchmail_config_settings(osv.osv_memory): """ This wizard can be inherited in conjunction with 'res.config.settings', in order to define fields that configure a fetchmail server. It relies on the following convention on the object:: class my_config_settings(osv.osv_memory): _name = 'my.settings' _inherit = ['res.config.settings', 'fetchmail.config.settings'] _columns = { 'fetchmail_stuff': fields.boolean(..., fetchmail_model='my.stuff', fetchmail_name='Incoming Stuff'), } def configure_fetchmail_stuff(self, cr, uid, ids, context=None): return self.configure_fetchmail(cr, uid, 'fetchmail_stuff', context) and in the form view:: <field name="fetchmail_stuff"/> <button type="object" name="configure_fetchmail_stuff"/> The method ``get_default_fetchmail`` determines the value of all fields that start with 'fetchmail_'. It looks up fetchmail server configurations that match the given model name (``fetchmail_model``) and are active. The button action ``configure_fetchmail_stuff`` is caught by the object, and calls automatically the method ``configure_fetchmail``; it opens the fetchmail server configuration form for the corresponding field. """ _name = 'fetchmail.config.settings' def get_default_fetchmail(self, cr, uid, fields, context=None): """ determine the value of all fields like 'fetchmail_XXX' """ ir_model = self.pool.get('ir.model') fetchmail_server = self.pool.get('fetchmail.server') fetchmail_fields = [f for f in fields if f.startswith('fetchmail_')] res = {} for f in fetchmail_fields: model_name = self._columns[f].fetchmail_model model_id = ir_model.search(cr, uid, [('model', '=', model_name)])[0] server_ids = fetchmail_server.search(cr, uid, [('object_id', '=', model_id), ('state', '=', 'done')]) res[f] = bool(server_ids) return res def set_fetchmail(self, cr, uid, ids, context=None): """ deactivate fetchmail servers for all fields like 'fetchmail_XXX' that are False """ config = self.browse(cr, uid, ids[0], context) fetchmail_fields = [f for f in self._columns if f.startswith('fetchmail_')] # determine which models should not have active fetchmail servers, and # deactivate all active servers for those models models = [self._columns[f].fetchmail_model for f in fetchmail_fields if not config[f]] if models: fetchmail_server = self.pool.get('fetchmail.server') server_ids = fetchmail_server.search(cr, uid, [('object_id.model', 'in', models), ('state', '=', 'done')]) fetchmail_server.set_draft(cr, uid, server_ids, context) def configure_fetchmail(self, cr, uid, field, context=None): """ open the form view of the fetchmail.server to configure """ action = { 'type': 'ir.actions.act_window', 'res_model': 'fetchmail.server', 'view_mode': 'form', 'target': 'current', } model_name = self._columns[field].fetchmail_model model_id = self.pool.get('ir.model').search(cr, uid, [('model', '=', model_name)])[0] server_ids = self.pool.get('fetchmail.server').search(cr, uid, [('object_id', '=', model_id)]) if server_ids: action['res_id'] = server_ids[0] else: action['context'] = { 'default_name': self._columns[field].fetchmail_name, 'default_object_id': model_id, } return action def __getattr__(self, name): """ catch calls to 'configure_fetchmail_XXX' """ if name.startswith('configure_fetchmail_'): return (lambda cr, uid, ids, context=None: self.configure_fetchmail(cr, uid, name[10:], context)) return super(fetchmail_config_settings, self).__getattr__(name) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
mr-tweaker/sabermod_kernel_cancro
tools/perf/scripts/python/netdev-times.py
11271
15048
# Display a process of packets and processed time. # It helps us to investigate networking or network device. # # options # tx: show only tx chart # rx: show only rx chart # dev=: show only thing related to specified device # debug: work with debug mode. It shows buffer status. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * all_event_list = []; # insert all tracepoint event related with this script irq_dic = {}; # key is cpu and value is a list which stacks irqs # which raise NET_RX softirq net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry # and a list which stacks receive receive_hunk_list = []; # a list which include a sequence of receive events rx_skb_list = []; # received packet list for matching # skb_copy_datagram_iovec buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and # tx_xmit_list of_count_rx_skb_list = 0; # overflow count tx_queue_list = []; # list of packets which pass through dev_queue_xmit of_count_tx_queue_list = 0; # overflow count tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit of_count_tx_xmit_list = 0; # overflow count tx_free_list = []; # list of packets which is freed # options show_tx = 0; show_rx = 0; dev = 0; # store a name of device specified by option "dev=" debug = 0; # indices of event_info tuple EINFO_IDX_NAME= 0 EINFO_IDX_CONTEXT=1 EINFO_IDX_CPU= 2 EINFO_IDX_TIME= 3 EINFO_IDX_PID= 4 EINFO_IDX_COMM= 5 # Calculate a time interval(msec) from src(nsec) to dst(nsec) def diff_msec(src, dst): return (dst - src) / 1000000.0 # Display a process of transmitting a packet def print_transmit(hunk): if dev != 0 and hunk['dev'].find(dev) < 0: return print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \ (hunk['dev'], hunk['len'], nsecs_secs(hunk['queue_t']), nsecs_nsecs(hunk['queue_t'])/1000, diff_msec(hunk['queue_t'], hunk['xmit_t']), diff_msec(hunk['xmit_t'], hunk['free_t'])) # Format for displaying rx packet processing PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)" PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)" PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)" PF_JOINT= " |" PF_WJOINT= " | |" PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)" PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)" PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)" PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)" PF_CONS_SKB= " | consume_skb(+%.3fmsec)" # Display a process of received packets and interrputs associated with # a NET_RX softirq def print_receive(hunk): show_hunk = 0 irq_list = hunk['irq_list'] cpu = irq_list[0]['cpu'] base_t = irq_list[0]['irq_ent_t'] # check if this hunk should be showed if dev != 0: for i in range(len(irq_list)): if irq_list[i]['name'].find(dev) >= 0: show_hunk = 1 break else: show_hunk = 1 if show_hunk == 0: return print "%d.%06dsec cpu=%d" % \ (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu) for i in range(len(irq_list)): print PF_IRQ_ENTRY % \ (diff_msec(base_t, irq_list[i]['irq_ent_t']), irq_list[i]['irq'], irq_list[i]['name']) print PF_JOINT irq_event_list = irq_list[i]['event_list'] for j in range(len(irq_event_list)): irq_event = irq_event_list[j] if irq_event['event'] == 'netif_rx': print PF_NET_RX % \ (diff_msec(base_t, irq_event['time']), irq_event['skbaddr']) print PF_JOINT print PF_SOFT_ENTRY % \ diff_msec(base_t, hunk['sirq_ent_t']) print PF_JOINT event_list = hunk['event_list'] for i in range(len(event_list)): event = event_list[i] if event['event_name'] == 'napi_poll': print PF_NAPI_POLL % \ (diff_msec(base_t, event['event_t']), event['dev']) if i == len(event_list) - 1: print "" else: print PF_JOINT else: print PF_NET_RECV % \ (diff_msec(base_t, event['event_t']), event['skbaddr'], event['len']) if 'comm' in event.keys(): print PF_WJOINT print PF_CPY_DGRAM % \ (diff_msec(base_t, event['comm_t']), event['pid'], event['comm']) elif 'handle' in event.keys(): print PF_WJOINT if event['handle'] == "kfree_skb": print PF_KFREE_SKB % \ (diff_msec(base_t, event['comm_t']), event['location']) elif event['handle'] == "consume_skb": print PF_CONS_SKB % \ diff_msec(base_t, event['comm_t']) print PF_JOINT def trace_begin(): global show_tx global show_rx global dev global debug for i in range(len(sys.argv)): if i == 0: continue arg = sys.argv[i] if arg == 'tx': show_tx = 1 elif arg =='rx': show_rx = 1 elif arg.find('dev=',0, 4) >= 0: dev = arg[4:] elif arg == 'debug': debug = 1 if show_tx == 0 and show_rx == 0: show_tx = 1 show_rx = 1 def trace_end(): # order all events in time all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME], b[EINFO_IDX_TIME])) # process all events for i in range(len(all_event_list)): event_info = all_event_list[i] name = event_info[EINFO_IDX_NAME] if name == 'irq__softirq_exit': handle_irq_softirq_exit(event_info) elif name == 'irq__softirq_entry': handle_irq_softirq_entry(event_info) elif name == 'irq__softirq_raise': handle_irq_softirq_raise(event_info) elif name == 'irq__irq_handler_entry': handle_irq_handler_entry(event_info) elif name == 'irq__irq_handler_exit': handle_irq_handler_exit(event_info) elif name == 'napi__napi_poll': handle_napi_poll(event_info) elif name == 'net__netif_receive_skb': handle_netif_receive_skb(event_info) elif name == 'net__netif_rx': handle_netif_rx(event_info) elif name == 'skb__skb_copy_datagram_iovec': handle_skb_copy_datagram_iovec(event_info) elif name == 'net__net_dev_queue': handle_net_dev_queue(event_info) elif name == 'net__net_dev_xmit': handle_net_dev_xmit(event_info) elif name == 'skb__kfree_skb': handle_kfree_skb(event_info) elif name == 'skb__consume_skb': handle_consume_skb(event_info) # display receive hunks if show_rx: for i in range(len(receive_hunk_list)): print_receive(receive_hunk_list[i]) # display transmit hunks if show_tx: print " dev len Qdisc " \ " netdevice free" for i in range(len(tx_free_list)): print_transmit(tx_free_list[i]) if debug: print "debug buffer status" print "----------------------------" print "xmit Qdisc:remain:%d overflow:%d" % \ (len(tx_queue_list), of_count_tx_queue_list) print "xmit netdevice:remain:%d overflow:%d" % \ (len(tx_xmit_list), of_count_tx_xmit_list) print "receive:remain:%d overflow:%d" % \ (len(rx_skb_list), of_count_rx_skb_list) # called from perf, when it finds a correspoinding event def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm, irq, irq_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, irq_name) all_event_list.append(event_info) def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret) all_event_list.append(event_info) def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, napi, dev_name) all_event_list.append(event_info) def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, rc, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, rc ,dev_name) all_event_list.append(event_info) def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, protocol, location): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, protocol, location) all_event_list.append(event_info) def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr) all_event_list.append(event_info) def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen) all_event_list.append(event_info) def handle_irq_handler_entry(event_info): (name, context, cpu, time, pid, comm, irq, irq_name) = event_info if cpu not in irq_dic.keys(): irq_dic[cpu] = [] irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time} irq_dic[cpu].append(irq_record) def handle_irq_handler_exit(event_info): (name, context, cpu, time, pid, comm, irq, ret) = event_info if cpu not in irq_dic.keys(): return irq_record = irq_dic[cpu].pop() if irq != irq_record['irq']: return irq_record.update({'irq_ext_t':time}) # if an irq doesn't include NET_RX softirq, drop. if 'event_list' in irq_record.keys(): irq_dic[cpu].append(irq_record) def handle_irq_softirq_raise(event_info): (name, context, cpu, time, pid, comm, vec) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'sirq_raise'}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_irq_softirq_entry(event_info): (name, context, cpu, time, pid, comm, vec) = event_info net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]} def handle_irq_softirq_exit(event_info): (name, context, cpu, time, pid, comm, vec) = event_info irq_list = [] event_list = 0 if cpu in irq_dic.keys(): irq_list = irq_dic[cpu] del irq_dic[cpu] if cpu in net_rx_dic.keys(): sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t'] event_list = net_rx_dic[cpu]['event_list'] del net_rx_dic[cpu] if irq_list == [] or event_list == 0: return rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time, 'irq_list':irq_list, 'event_list':event_list} # merge information realted to a NET_RX softirq receive_hunk_list.append(rec_data) def handle_napi_poll(event_info): (name, context, cpu, time, pid, comm, napi, dev_name) = event_info if cpu in net_rx_dic.keys(): event_list = net_rx_dic[cpu]['event_list'] rec_data = {'event_name':'napi_poll', 'dev':dev_name, 'event_t':time} event_list.append(rec_data) def handle_netif_rx(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'netif_rx', 'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_netif_receive_skb(event_info): global of_count_rx_skb_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu in net_rx_dic.keys(): rec_data = {'event_name':'netif_receive_skb', 'event_t':time, 'skbaddr':skbaddr, 'len':skblen} event_list = net_rx_dic[cpu]['event_list'] event_list.append(rec_data) rx_skb_list.insert(0, rec_data) if len(rx_skb_list) > buffer_budget: rx_skb_list.pop() of_count_rx_skb_list += 1 def handle_net_dev_queue(event_info): global of_count_tx_queue_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time} tx_queue_list.insert(0, skb) if len(tx_queue_list) > buffer_budget: tx_queue_list.pop() of_count_tx_queue_list += 1 def handle_net_dev_xmit(event_info): global of_count_tx_xmit_list (name, context, cpu, time, pid, comm, skbaddr, skblen, rc, dev_name) = event_info if rc == 0: # NETDEV_TX_OK for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: skb['xmit_t'] = time tx_xmit_list.insert(0, skb) del tx_queue_list[i] if len(tx_xmit_list) > buffer_budget: tx_xmit_list.pop() of_count_tx_xmit_list += 1 return def handle_kfree_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr, protocol, location) = event_info for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: del tx_queue_list[i] return for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if rec_data['skbaddr'] == skbaddr: rec_data.update({'handle':"kfree_skb", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return def handle_consume_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr) = event_info for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return def handle_skb_copy_datagram_iovec(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if skbaddr == rec_data['skbaddr']: rec_data.update({'handle':"skb_copy_datagram_iovec", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return
gpl-2.0
DayanaHudson/GMUExchange
node_modules/google-cloud/node_modules/node-forge/tests/server.py
171
5756
#!/usr/bin/env python """ SSL server for Forge tests. - The server changes to the directory of the server script. - SSL uses "server.key" and "server.crt". - Sever performs basic static file serving. - Starts Flash cross domain policy file server. - Defaults to HTTP/HTTPS port 19400. - Defaults to Flash socket policy port 19945. $ ./server.py [options] If you just need a simple HTTP server, also consider: $ python -m SimpleHTTPServer 19400 """ from multiprocessing import Process from optparse import OptionParser import SimpleHTTPServer import SocketServer import os import sys import time # Try to import special Forge SSL module with session cache support # Using the built directory directly python_version = "python" + sys.version[:3] sys.path.insert(0, os.path.join( os.path.dirname(os.path.realpath(__file__)), "..", "dist", "forge_ssl", "lib", python_version, "site-packages")) try: from forge import ssl have_ssl_sessions = True have_ssl = True except ImportError: have_ssl_sessions = False try: import ssl have_ssl = True except ImportError: have_ssl = False # Set address reuse for all TCPServers SocketServer.TCPServer.allow_reuse_address = True # The policy file # NOTE: This format is very strict. Edit with care. policy_file = """\ <?xml version="1.0"?>\ <!DOCTYPE cross-domain-policy\ SYSTEM "http://www.adobe.com/xml/dtds/cross-domain-policy.dtd">\ <cross-domain-policy>\ <allow-access-from domain="*" to-ports="*"/>\ </cross-domain-policy>\0""" class PolicyHandler(SocketServer.BaseRequestHandler): """ The RequestHandler class for our server. Returns a policy file when requested. """ def handle(self): # get some data # TODO: make this more robust (while loop, etc) self.data = self.request.recv(1024).rstrip('\0') #print "%s wrote:" % self.client_address[0] #print repr(self.data) # if policy file request, send the file. if self.data == "<policy-file-request/>": print "Policy server request from %s." % (self.client_address[0]) self.request.send(policy_file) else: print "Policy server received junk from %s: \"%s\"" % \ (self.client_address[0], repr(self.data)) def create_policy_server(options): """Start a policy server""" print "Policy serving from %d." % (options.policy_port) policyd = SocketServer.TCPServer((options.host, options.policy_port), PolicyHandler) return policyd class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer): pass def create_http_server(options, script_dir): """Start a static file server""" # use UTF-8 encoding for javascript files m = SimpleHTTPServer.SimpleHTTPRequestHandler.extensions_map m['.js'] = 'application/javascript;charset=UTF-8' Handler = SimpleHTTPServer.SimpleHTTPRequestHandler # httpd = SocketServer.TCPServer((options.host, options.port), Handler) httpd = ThreadedTCPServer((options.host, options.port), Handler) if options.tls: if not have_ssl: raise Exception("SSL support from Python 2.7 or later is required.") # setup session args if we session support sess_args = {} if have_ssl_sessions: sess_args["sess_id_ctx"] = "forgetest" else: print "Forge SSL with session cache not available, using standard version." httpd.socket = ssl.wrap_socket( httpd.socket, keyfile="server.key", certfile="server.crt", server_side=True, **sess_args) print "Serving from \"%s\"." % (script_dir) print "%s://%s:%d/" % \ (("https" if options.tls else "http"), httpd.server_address[0], options.port) return httpd def serve_forever(server): """Serve until shutdown or keyboard interrupt.""" try: server.serve_forever() except KeyboardInterrupt: return def main(): """Start static file and policy servers""" usage = "Usage: %prog [options]" parser = OptionParser(usage=usage) parser.add_option("", "--host", dest="host", metavar="HOST", default="localhost", help="bind to HOST") parser.add_option("-p", "--port", dest="port", type="int", help="serve on PORT", metavar="PORT", default=19400) parser.add_option("-P", "--policy-port", dest="policy_port", type="int", help="serve policy file on PORT", metavar="PORT", default=19945) parser.add_option("", "--tls", dest="tls", action="store_true", help="serve HTTPS", default=False) (options, args) = parser.parse_args() # Change to script dir so SSL and test files are in current dir. script_dir = os.path.dirname(os.path.realpath(__file__)) os.chdir(script_dir) print "Forge Test Server. Use ctrl-c to exit." # create policy and http servers httpd = create_http_server(options, script_dir) policyd = create_policy_server(options) # start servers server_p = Process(target=serve_forever, args=(httpd,)) policy_p = Process(target=serve_forever, args=(policyd,)) server_p.start() policy_p.start() processes = [server_p, policy_p] while len(processes) > 0: try: for p in processes: if p.is_alive(): p.join(1) else: processes.remove(p) except KeyboardInterrupt: print "\nStopping test server..." # processes each receive interrupt # so no need to shutdown #httpd.shutdown(); #policyd.shutdown(); if __name__ == "__main__": main()
mit
LodewijkSikkel/paparazzi
sw/misc/attitude_reference/att_ref_gui.py
49
12483
#!/usr/bin/env python # # Copyright (C) 2014 Antoine Drouin # # This file is part of paparazzi. # # paparazzi is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # paparazzi is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with paparazzi; see the file COPYING. If not, write to # the Free Software Foundation, 59 Temple Place - Suite 330, # Boston, MA 02111-1307, USA. # """ This is a graphical user interface for playing with reference attitude """ # https://gist.github.com/zed/b966b5a04f2dfc16c98e # https://gist.github.com/nzjrs/51686 # http://jakevdp.github.io/blog/2012/10/07/xkcd-style-plots-in-matplotlib/ # http://chimera.labs.oreilly.com/books/1230000000393/ch12.html#_problem_208 <- threads # TODO: # -cancel workers # # # from __future__ import print_function from gi.repository import Gtk, GObject from matplotlib.figure import Figure from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas import matplotlib.font_manager as fm import math, threading, numpy as np, scipy.signal, pdb, copy, logging import pat.utils as pu import pat.algebra as pa import control as ctl import gui class Reference(gui.Worker): def __init__(self, sp, ref_impl=ctl.att_ref_default, omega=6., xi=0.8, max_vel=pu.rad_of_deg(100), max_accel=pu.rad_of_deg(500)): gui.Worker.__init__(self) self.impl = ref_impl() self.sp = sp self.reset_outputs(sp) self.update(sp, ref_impl, omega, xi, max_vel, max_accel) self.do_work = True def reset_outputs(self, sp): self.euler = np.zeros((len(sp.time), pa.e_size)) self.quat = np.zeros((len(sp.time), pa.q_size)) self.vel = np.zeros((len(sp.time), pa.r_size)) self.accel = np.zeros((len(sp.time), pa.r_size)) def update_type(self, _type): #print('update_type', _type) self.impl = _type() self.do_work = True #self.recompute() def update_param(self, p, v): #print('update_param', p, v) self.impl.set_param(p, v) self.do_work = True #self.recompute() def update_sp(self, sp, ref_impl=None, omega=None, xi=None, max_vel=None, max_accel=None): self.reset_outputs(sp) self.update(sp, ref_impl, omega, xi, max_vel, max_accel) self.do_work = True #self.recompute() def update(self, sp, ref_impl=None, omega=None, xi=None, max_vel=None, max_accel=None): self.sp = sp if ref_impl is not None: self.impl = ref_impl() if omega is not None: self.impl.set_param('omega', omega) if xi is not None: self.impl.set_param('xi', xi) if max_vel is not None: self.impl.set_param('max_vel', max_vel) if max_accel is not None: self.impl.set_param('max_accel', max_accel) def recompute(self): #print("recomputing...") self.start((self.sp,)) def _work_init(self, sp): #print('_work_init ', self, self.impl, sp, sp.dt) self.euler = np.zeros((len(sp.time), pa.e_size)) self.quat = np.zeros((len(sp.time), pa.q_size)) self.vel = np.zeros((len(sp.time), pa.r_size)) self.accel = np.zeros((len(sp.time), pa.r_size)) euler0 = [0.3, 0.1, 0.2] self.impl.set_euler(np.array(euler0)) self.quat[0], self.euler[0], self.vel[0], self.accel[0] = self.impl.quat, self.impl.euler, self.impl.vel, self.impl.accel self.n_iter_per_step = float(len(sp.time)) / self.n_step def _work_step(self, i, sp): start, stop = int(i * self.n_iter_per_step), int((i + 1) * self.n_iter_per_step) # print('_work_step of %s: i %i, start %i, stop %i' % (self.impl, i, start, stop)) for j in range(start, stop): self.impl.update_quat(sp.quat[j], sp.dt) self.quat[j], self.vel[j], self.accel[j] = self.impl.quat, self.impl.vel, self.impl.accel self.euler[j] = pa.euler_of_quat(self.quat[j]) class Setpoint(object): t_static, t_step_phi, t_step_theta, t_step_psi, t_step_random, t_nb = range(0, 6) t_names = ["constant", "step phi", "step theta", "step psi", "step_random"] def __init__(self, type=t_static, duration=10., step_duration=5., step_ampl=pu.rad_of_deg(10.)): self.dt = 1. / 512 self.update(type, duration, step_duration, step_ampl) def update(self, type, duration, step_duration, step_ampl): self.type = type self.duration, self.step_duration, self.step_ampl = duration, step_duration, step_ampl self.time = np.arange(0., self.duration, self.dt) self.euler = np.zeros((len(self.time), pa.e_size)) try: i = [Setpoint.t_step_phi, Setpoint.t_step_theta, Setpoint.t_step_psi].index(self.type) self.euler[:, i] = step_ampl / 2 * scipy.signal.square(math.pi / step_duration * self.time) except Exception as e: print(e) pass self.quat = np.zeros((len(self.time), pa.q_size)) for i in range(0, len(self.time)): self.quat[i] = pa.quat_of_euler(self.euler[i]) class GUI(object): def __init__(self, sp, refs): self.b = Gtk.Builder() self.b.add_from_file("ressources/att_ref_gui.xml") w = self.b.get_object("window") w.connect("delete-event", Gtk.main_quit) mb = self.b.get_object("main_vbox") self.plot = Plot(sp, refs) mb.pack_start(self.plot, True, True, 0) mb = self.b.get_object("main_hbox") ref_classes = [ctl.att_ref_default, ctl.att_ref_sat_naive, ctl.att_ref_sat_nested, ctl.att_ref_sat_nested2, ctl.AttRefFloatNative, ctl.AttRefIntNative] self.ref_views = [gui.AttRefParamView('<b>Ref {}</b>'.format(i+1), ref_classes=ref_classes, active_impl=r.impl) for i, r in enumerate(refs)] for r in self.ref_views: mb.pack_start(r, True, True, 0) w.show_all() class Plot(Gtk.Frame): def __init__(self, sp, refs): Gtk.Frame.__init__(self) self.f = Figure() self.canvas = FigureCanvas(self.f) self.add(self.canvas) self.set_size_request(1024, 600) self.f.subplots_adjust(left=0.07, right=0.98, bottom=0.05, top=0.95, hspace=0.2, wspace=0.2) # self.buffer = self.canvas.get_snapshot() def decorate(self, axis, title=None, ylab=None, legend=None): # font_prop = fm.FontProperties(fname='Humor-Sans-1.0.ttf', size=14) if title is not None: axis.set_title(title) # , fontproperties=font_prop) if ylab is not None: axis.yaxis.set_label_text(ylab) # , fontproperties=font_prop) if legend is not None: axis.legend(legend) # , prop=font_prop) axis.xaxis.grid(color='k', linestyle='-', linewidth=0.2) axis.yaxis.grid(color='k', linestyle='-', linewidth=0.2) def update(self, sp, refs): title = [r'$\phi$', r'$\theta$', r'$\psi$'] legend = ['Ref1', 'Ref2', 'Setpoint'] for i in range(0, 3): axis = self.f.add_subplot(331 + i) axis.clear() for ref in refs: axis.plot(sp.time, pu.deg_of_rad(ref.euler[:, i])) axis.plot(sp.time, pu.deg_of_rad(sp.euler[:, i])) self.decorate(axis, title[i], *(('deg', legend) if i == 0 else (None, None))) title = [r'$p$', r'$q$', r'$r$'] for i in range(0, 3): axis = self.f.add_subplot(334 + i) axis.clear() for ref in refs: axis.plot(sp.time, pu.deg_of_rad(ref.vel[:, i])) self.decorate(axis, title[i], 'deg/s' if i == 0 else None) title = [r'$\dot{p}$', r'$\dot{q}$', r'$\dot{r}$'] for i in range(0, 3): axis = self.f.add_subplot(337 + i) axis.clear() for ref in refs: axis.plot(sp.time, pu.deg_of_rad(ref.accel[:, i])) self.decorate(axis, title[i], 'deg/s2' if i == 0 else None) self.canvas.draw() class Application(object): def __init__(self): self.sp = Setpoint() self.refs = [Reference(self.sp), Reference(self.sp, ref_impl=ctl.AttRefFloatNative)] for nref, r in enumerate(self.refs): r.connect("progress", self.on_ref_update_progress, nref + 1) r.connect("completed", self.on_ref_update_completed, nref + 1) self.gui = GUI(self.sp, self.refs) self.register_gui() self.recompute_sequentially() def on_ref_update_progress(self, ref, v, nref): #print('progress', nref, v) self.gui.ref_views[nref - 1].progress.set_fraction(v) def on_ref_update_completed(self, ref, nref): #print('on_ref_update_completed', ref, nref) self.gui.ref_views[nref - 1].progress.set_fraction(1.0) # recompute remaining refs (if any) self.recompute_sequentially() self.gui.plot.update(self.sp, self.refs) def register_gui(self): self.register_setpoint() for i in range(0, 2): self.gui.ref_views[i].connect(self._on_ref_changed, self._on_ref_param_changed, self.refs[i], self.gui.ref_views[i]) self.gui.ref_views[i].update_view(self.refs[i].impl) def register_setpoint(self): b = self.gui.b c_sp_type = b.get_object("combo_sp_type") for n in Setpoint.t_names: c_sp_type.append_text(n) c_sp_type.set_active(self.sp.type) c_sp_type.connect("changed", self.on_sp_changed) names = ["spin_sp_duration", "spin_sp_step_duration", "spin_sp_step_amplitude"] widgets = [b.get_object(name) for name in names] adjs = [Gtk.Adjustment(self.sp.duration, 1, 100, 1, 10, 0), Gtk.Adjustment(self.sp.step_duration, 0.1, 10., 0.1, 1., 0), Gtk.Adjustment(pu.deg_of_rad(self.sp.step_ampl), 0.1, 180., 1, 10., 0)] for i, w in enumerate(widgets): w.set_adjustment(adjs[i]) w.update() w.connect("value-changed", self.on_sp_changed) def recompute_sequentially(self): """ Somehow running two threads to update both references at the same time produces bogus data.. As a workaround we simply run them one after the other. """ for r in self.refs: if r.running: return for r in self.refs: if r.do_work: r.recompute() return def on_sp_changed(self, widget): b = self.gui.b _type = b.get_object("combo_sp_type").get_active() names = ["spin_sp_duration", "spin_sp_step_duration", "spin_sp_step_amplitude"] _duration, _step_duration, _step_amplitude = [b.get_object(name).get_value() for name in names] #print('_on_sp_changed', _type, _duration, _step_duration, _step_amplitude) _step_amplitude = pu.rad_of_deg(_step_amplitude) self.sp.update(_type, _duration, _step_duration, _step_amplitude) # somehow running two threads to update both references at the same time produces bogus data.. # as a workaround we simply run them one after the other for r in self.refs: r.update_sp(self.sp) #r.recompute() self.recompute_sequentially() def _on_ref_changed(self, widget, ref, view): #print('_on_ref_changed', widget, ref, view) ref.update_type(view.get_selected_ref_class()) view.update_ref_params(ref.impl) self.recompute_sequentially() def _on_ref_param_changed(self, widget, p, ref, view): #print("_on_ref_param_changed: %s %s=%s" % (ref.impl.name, p, val)) val = view.spin_cfg[p]['d2r'](widget.get_value()) ref.update_param(p, val) self.recompute_sequentially() def run(self): Gtk.main() if __name__ == "__main__": logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO) Application().run()
gpl-2.0
admetricks/phantomjs
src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/internal/message_test.py
253
15707
#! /usr/bin/python # # Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # http://code.google.com/p/protobuf/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests python protocol buffers against the golden message. Note that the golden messages exercise every known field type, thus this test ends up exercising and verifying nearly all of the parsing and serialization code in the whole library. TODO(kenton): Merge with wire_format_test? It doesn't make a whole lot of sense to call this a test of the "message" module, which only declares an abstract interface. """ __author__ = 'gps@google.com (Gregory P. Smith)' import copy import math import unittest from google.protobuf import unittest_import_pb2 from google.protobuf import unittest_pb2 from google.protobuf.internal import test_util # Python pre-2.6 does not have isinf() or isnan() functions, so we have # to provide our own. def isnan(val): # NaN is never equal to itself. return val != val def isinf(val): # Infinity times zero equals NaN. return not isnan(val) and isnan(val * 0) def IsPosInf(val): return isinf(val) and (val > 0) def IsNegInf(val): return isinf(val) and (val < 0) class MessageTest(unittest.TestCase): def testGoldenMessage(self): golden_data = test_util.GoldenFile('golden_message').read() golden_message = unittest_pb2.TestAllTypes() golden_message.ParseFromString(golden_data) test_util.ExpectAllFieldsSet(self, golden_message) self.assertTrue(golden_message.SerializeToString() == golden_data) golden_copy = copy.deepcopy(golden_message) self.assertTrue(golden_copy.SerializeToString() == golden_data) def testGoldenExtensions(self): golden_data = test_util.GoldenFile('golden_message').read() golden_message = unittest_pb2.TestAllExtensions() golden_message.ParseFromString(golden_data) all_set = unittest_pb2.TestAllExtensions() test_util.SetAllExtensions(all_set) self.assertEquals(all_set, golden_message) self.assertTrue(golden_message.SerializeToString() == golden_data) golden_copy = copy.deepcopy(golden_message) self.assertTrue(golden_copy.SerializeToString() == golden_data) def testGoldenPackedMessage(self): golden_data = test_util.GoldenFile('golden_packed_fields_message').read() golden_message = unittest_pb2.TestPackedTypes() golden_message.ParseFromString(golden_data) all_set = unittest_pb2.TestPackedTypes() test_util.SetAllPackedFields(all_set) self.assertEquals(all_set, golden_message) self.assertTrue(all_set.SerializeToString() == golden_data) golden_copy = copy.deepcopy(golden_message) self.assertTrue(golden_copy.SerializeToString() == golden_data) def testGoldenPackedExtensions(self): golden_data = test_util.GoldenFile('golden_packed_fields_message').read() golden_message = unittest_pb2.TestPackedExtensions() golden_message.ParseFromString(golden_data) all_set = unittest_pb2.TestPackedExtensions() test_util.SetAllPackedExtensions(all_set) self.assertEquals(all_set, golden_message) self.assertTrue(all_set.SerializeToString() == golden_data) golden_copy = copy.deepcopy(golden_message) self.assertTrue(golden_copy.SerializeToString() == golden_data) def testPositiveInfinity(self): golden_data = ('\x5D\x00\x00\x80\x7F' '\x61\x00\x00\x00\x00\x00\x00\xF0\x7F' '\xCD\x02\x00\x00\x80\x7F' '\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\x7F') golden_message = unittest_pb2.TestAllTypes() golden_message.ParseFromString(golden_data) self.assertTrue(IsPosInf(golden_message.optional_float)) self.assertTrue(IsPosInf(golden_message.optional_double)) self.assertTrue(IsPosInf(golden_message.repeated_float[0])) self.assertTrue(IsPosInf(golden_message.repeated_double[0])) self.assertTrue(golden_message.SerializeToString() == golden_data) def testNegativeInfinity(self): golden_data = ('\x5D\x00\x00\x80\xFF' '\x61\x00\x00\x00\x00\x00\x00\xF0\xFF' '\xCD\x02\x00\x00\x80\xFF' '\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\xFF') golden_message = unittest_pb2.TestAllTypes() golden_message.ParseFromString(golden_data) self.assertTrue(IsNegInf(golden_message.optional_float)) self.assertTrue(IsNegInf(golden_message.optional_double)) self.assertTrue(IsNegInf(golden_message.repeated_float[0])) self.assertTrue(IsNegInf(golden_message.repeated_double[0])) self.assertTrue(golden_message.SerializeToString() == golden_data) def testNotANumber(self): golden_data = ('\x5D\x00\x00\xC0\x7F' '\x61\x00\x00\x00\x00\x00\x00\xF8\x7F' '\xCD\x02\x00\x00\xC0\x7F' '\xD1\x02\x00\x00\x00\x00\x00\x00\xF8\x7F') golden_message = unittest_pb2.TestAllTypes() golden_message.ParseFromString(golden_data) self.assertTrue(isnan(golden_message.optional_float)) self.assertTrue(isnan(golden_message.optional_double)) self.assertTrue(isnan(golden_message.repeated_float[0])) self.assertTrue(isnan(golden_message.repeated_double[0])) self.assertTrue(golden_message.SerializeToString() == golden_data) def testPositiveInfinityPacked(self): golden_data = ('\xA2\x06\x04\x00\x00\x80\x7F' '\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\x7F') golden_message = unittest_pb2.TestPackedTypes() golden_message.ParseFromString(golden_data) self.assertTrue(IsPosInf(golden_message.packed_float[0])) self.assertTrue(IsPosInf(golden_message.packed_double[0])) self.assertTrue(golden_message.SerializeToString() == golden_data) def testNegativeInfinityPacked(self): golden_data = ('\xA2\x06\x04\x00\x00\x80\xFF' '\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\xFF') golden_message = unittest_pb2.TestPackedTypes() golden_message.ParseFromString(golden_data) self.assertTrue(IsNegInf(golden_message.packed_float[0])) self.assertTrue(IsNegInf(golden_message.packed_double[0])) self.assertTrue(golden_message.SerializeToString() == golden_data) def testNotANumberPacked(self): golden_data = ('\xA2\x06\x04\x00\x00\xC0\x7F' '\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF8\x7F') golden_message = unittest_pb2.TestPackedTypes() golden_message.ParseFromString(golden_data) self.assertTrue(isnan(golden_message.packed_float[0])) self.assertTrue(isnan(golden_message.packed_double[0])) self.assertTrue(golden_message.SerializeToString() == golden_data) def testExtremeFloatValues(self): message = unittest_pb2.TestAllTypes() # Most positive exponent, no significand bits set. kMostPosExponentNoSigBits = math.pow(2, 127) message.optional_float = kMostPosExponentNoSigBits message.ParseFromString(message.SerializeToString()) self.assertTrue(message.optional_float == kMostPosExponentNoSigBits) # Most positive exponent, one significand bit set. kMostPosExponentOneSigBit = 1.5 * math.pow(2, 127) message.optional_float = kMostPosExponentOneSigBit message.ParseFromString(message.SerializeToString()) self.assertTrue(message.optional_float == kMostPosExponentOneSigBit) # Repeat last two cases with values of same magnitude, but negative. message.optional_float = -kMostPosExponentNoSigBits message.ParseFromString(message.SerializeToString()) self.assertTrue(message.optional_float == -kMostPosExponentNoSigBits) message.optional_float = -kMostPosExponentOneSigBit message.ParseFromString(message.SerializeToString()) self.assertTrue(message.optional_float == -kMostPosExponentOneSigBit) # Most negative exponent, no significand bits set. kMostNegExponentNoSigBits = math.pow(2, -127) message.optional_float = kMostNegExponentNoSigBits message.ParseFromString(message.SerializeToString()) self.assertTrue(message.optional_float == kMostNegExponentNoSigBits) # Most negative exponent, one significand bit set. kMostNegExponentOneSigBit = 1.5 * math.pow(2, -127) message.optional_float = kMostNegExponentOneSigBit message.ParseFromString(message.SerializeToString()) self.assertTrue(message.optional_float == kMostNegExponentOneSigBit) # Repeat last two cases with values of the same magnitude, but negative. message.optional_float = -kMostNegExponentNoSigBits message.ParseFromString(message.SerializeToString()) self.assertTrue(message.optional_float == -kMostNegExponentNoSigBits) message.optional_float = -kMostNegExponentOneSigBit message.ParseFromString(message.SerializeToString()) self.assertTrue(message.optional_float == -kMostNegExponentOneSigBit) def testExtremeFloatValues(self): message = unittest_pb2.TestAllTypes() # Most positive exponent, no significand bits set. kMostPosExponentNoSigBits = math.pow(2, 1023) message.optional_double = kMostPosExponentNoSigBits message.ParseFromString(message.SerializeToString()) self.assertTrue(message.optional_double == kMostPosExponentNoSigBits) # Most positive exponent, one significand bit set. kMostPosExponentOneSigBit = 1.5 * math.pow(2, 1023) message.optional_double = kMostPosExponentOneSigBit message.ParseFromString(message.SerializeToString()) self.assertTrue(message.optional_double == kMostPosExponentOneSigBit) # Repeat last two cases with values of same magnitude, but negative. message.optional_double = -kMostPosExponentNoSigBits message.ParseFromString(message.SerializeToString()) self.assertTrue(message.optional_double == -kMostPosExponentNoSigBits) message.optional_double = -kMostPosExponentOneSigBit message.ParseFromString(message.SerializeToString()) self.assertTrue(message.optional_double == -kMostPosExponentOneSigBit) # Most negative exponent, no significand bits set. kMostNegExponentNoSigBits = math.pow(2, -1023) message.optional_double = kMostNegExponentNoSigBits message.ParseFromString(message.SerializeToString()) self.assertTrue(message.optional_double == kMostNegExponentNoSigBits) # Most negative exponent, one significand bit set. kMostNegExponentOneSigBit = 1.5 * math.pow(2, -1023) message.optional_double = kMostNegExponentOneSigBit message.ParseFromString(message.SerializeToString()) self.assertTrue(message.optional_double == kMostNegExponentOneSigBit) # Repeat last two cases with values of the same magnitude, but negative. message.optional_double = -kMostNegExponentNoSigBits message.ParseFromString(message.SerializeToString()) self.assertTrue(message.optional_double == -kMostNegExponentNoSigBits) message.optional_double = -kMostNegExponentOneSigBit message.ParseFromString(message.SerializeToString()) self.assertTrue(message.optional_double == -kMostNegExponentOneSigBit) def testSortingRepeatedScalarFieldsDefaultComparator(self): """Check some different types with the default comparator.""" message = unittest_pb2.TestAllTypes() # TODO(mattp): would testing more scalar types strengthen test? message.repeated_int32.append(1) message.repeated_int32.append(3) message.repeated_int32.append(2) message.repeated_int32.sort() self.assertEqual(message.repeated_int32[0], 1) self.assertEqual(message.repeated_int32[1], 2) self.assertEqual(message.repeated_int32[2], 3) message.repeated_float.append(1.1) message.repeated_float.append(1.3) message.repeated_float.append(1.2) message.repeated_float.sort() self.assertAlmostEqual(message.repeated_float[0], 1.1) self.assertAlmostEqual(message.repeated_float[1], 1.2) self.assertAlmostEqual(message.repeated_float[2], 1.3) message.repeated_string.append('a') message.repeated_string.append('c') message.repeated_string.append('b') message.repeated_string.sort() self.assertEqual(message.repeated_string[0], 'a') self.assertEqual(message.repeated_string[1], 'b') self.assertEqual(message.repeated_string[2], 'c') message.repeated_bytes.append('a') message.repeated_bytes.append('c') message.repeated_bytes.append('b') message.repeated_bytes.sort() self.assertEqual(message.repeated_bytes[0], 'a') self.assertEqual(message.repeated_bytes[1], 'b') self.assertEqual(message.repeated_bytes[2], 'c') def testSortingRepeatedScalarFieldsCustomComparator(self): """Check some different types with custom comparator.""" message = unittest_pb2.TestAllTypes() message.repeated_int32.append(-3) message.repeated_int32.append(-2) message.repeated_int32.append(-1) message.repeated_int32.sort(lambda x,y: cmp(abs(x), abs(y))) self.assertEqual(message.repeated_int32[0], -1) self.assertEqual(message.repeated_int32[1], -2) self.assertEqual(message.repeated_int32[2], -3) message.repeated_string.append('aaa') message.repeated_string.append('bb') message.repeated_string.append('c') message.repeated_string.sort(lambda x,y: cmp(len(x), len(y))) self.assertEqual(message.repeated_string[0], 'c') self.assertEqual(message.repeated_string[1], 'bb') self.assertEqual(message.repeated_string[2], 'aaa') def testSortingRepeatedCompositeFieldsCustomComparator(self): """Check passing a custom comparator to sort a repeated composite field.""" message = unittest_pb2.TestAllTypes() message.repeated_nested_message.add().bb = 1 message.repeated_nested_message.add().bb = 3 message.repeated_nested_message.add().bb = 2 message.repeated_nested_message.add().bb = 6 message.repeated_nested_message.add().bb = 5 message.repeated_nested_message.add().bb = 4 message.repeated_nested_message.sort(lambda x,y: cmp(x.bb, y.bb)) self.assertEqual(message.repeated_nested_message[0].bb, 1) self.assertEqual(message.repeated_nested_message[1].bb, 2) self.assertEqual(message.repeated_nested_message[2].bb, 3) self.assertEqual(message.repeated_nested_message[3].bb, 4) self.assertEqual(message.repeated_nested_message[4].bb, 5) self.assertEqual(message.repeated_nested_message[5].bb, 6) if __name__ == '__main__': unittest.main()
bsd-3-clause
willingc/pythondotorg
companies/migrations/0001_initial.py
14
1528
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import markupfield.fields class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Company', fields=[ ('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=200)), ('slug', models.SlugField(unique=True)), ('about', markupfield.fields.MarkupField(rendered_field=True, blank=True)), ('about_markup_type', models.CharField(max_length=30, choices=[('', '--'), ('html', 'html'), ('plain', 'plain'), ('markdown', 'markdown'), ('restructuredtext', 'restructuredtext')], default='restructuredtext', blank=True)), ('contact', models.CharField(max_length=100, blank=True, null=True)), ('_about_rendered', models.TextField(editable=False)), ('email', models.EmailField(max_length=75, blank=True, null=True)), ('url', models.URLField(verbose_name='URL', blank=True, null=True)), ('logo', models.ImageField(upload_to='companies/logos/', blank=True, null=True)), ], options={ 'verbose_name': 'company', 'verbose_name_plural': 'companies', 'ordering': ('name',), }, bases=(models.Model,), ), ]
apache-2.0
acsone/odoo
openerp/tools/view_validation.py
367
2303
""" View validation code (using assertions, not the RNG schema). """ import logging _logger = logging.getLogger(__name__) def valid_page_in_book(arch): """A `page` node must be below a `book` node.""" return not arch.xpath('//page[not(ancestor::notebook)]') def valid_field_in_graph(arch): """ Children of ``graph`` can only be ``field`` """ for child in arch.xpath('/graph/child::*'): if child.tag != 'field': return False return True def valid_field_in_tree(arch): """ Children of ``tree`` view must be ``field`` or ``button``.""" for child in arch.xpath('/tree/child::*'): if child.tag not in ('field', 'button'): return False return True def valid_att_in_field(arch): """ ``field`` nodes must all have a ``@name`` """ return not arch.xpath('//field[not(@name)]') def valid_att_in_label(arch): """ ``label`` nodes must have a ``@for`` or a ``@string`` """ return not arch.xpath('//label[not(@for or @string)]') def valid_att_in_form(arch): return True def valid_type_in_colspan(arch): """A `colspan` attribute must be an `integer` type.""" for attrib in arch.xpath('//*/@colspan'): try: int(attrib) except: return False return True def valid_type_in_col(arch): """A `col` attribute must be an `integer` type.""" for attrib in arch.xpath('//*/@col'): try: int(attrib) except: return False return True def valid_view(arch): if arch.tag == 'form': for pred in [valid_page_in_book, valid_att_in_form, valid_type_in_colspan, valid_type_in_col, valid_att_in_field, valid_att_in_label]: if not pred(arch): _logger.error('Invalid XML: %s', pred.__doc__) return False elif arch.tag == 'graph': for pred in [valid_field_in_graph, valid_att_in_field]: if not pred(arch): _logger.error('Invalid XML: %s', pred.__doc__) return False elif arch.tag == 'tree': for pred in [valid_field_in_tree, valid_att_in_field]: if not pred(arch): _logger.error('Invalid XML: %s', pred.__doc__) return False return True
agpl-3.0