repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
skritchz/android_kernel_motorola_surnia | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
SOKP/external_chromium_org | tools/telemetry/telemetry/core/platform/profiler/netlog_profiler.py | 48 | 1557 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import tempfile
from telemetry.core.platform import profiler
class NetLogProfiler(profiler.Profiler):
_NET_LOG_ARG = '--log-net-log='
@classmethod
def name(cls):
return 'netlog'
@classmethod
def is_supported(cls, browser_type):
return not browser_type.startswith('cros')
@classmethod
def CustomizeBrowserOptions(cls, browser_type, options):
if browser_type.startswith('android'):
dump_file = '/sdcard/net-internals-profile.json'
else:
dump_file = tempfile.mkstemp()[1]
options.AppendExtraBrowserArgs([cls._NET_LOG_ARG + dump_file])
def CollectProfile(self):
# Find output filename from browser argument.
for i in self._browser_backend.browser_options.extra_browser_args:
if i.startswith(self._NET_LOG_ARG):
output_file = i[len(self._NET_LOG_ARG):]
assert output_file
# On Android pull the output file to the host.
if self._platform_backend.GetOSName() == 'android':
host_output_file = '%s.json' % self._output_path
self._browser_backend.adb.device().old_interface.Adb().Pull(
output_file, host_output_file)
# Clean the device
self._browser_backend.adb.device().RunShellCommand('rm %s' % output_file)
output_file = host_output_file
print 'Net-internals log saved as %s' % output_file
print 'To view, open in chrome://net-internals'
return [output_file]
| bsd-3-clause |
shahar-stratoscale/nova | doc/source/conf.py | 1 | 9654 | # -*- coding: utf-8 -*-
#
# nova documentation build configuration file, created by
# sphinx-quickstart on Sat May 1 15:17:47 2010.
#
# This file is execfile()d with the current directory set to
# its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'ext.nova_todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.graphviz',
'oslo.sphinx',
]
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
# Changing the path so that the Hudson build output contains GA code
# and the source docs do not contain the code so local, offline sphinx builds
# are "clean."
templates_path = []
if os.getenv('HUDSON_PUBLISH_DOCS'):
templates_path = ['_ga', '_templates']
else:
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nova'
copyright = u'2010-present, OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from nova.version import version_info
# The full version, including alpha/beta/rc tags.
release = version_info.release_string()
# The short X.Y version.
version = version_info.version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = [
'api_ext/rst_extension_template',
'vmwareapi_readme',
'installer',
]
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use
# for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['nova.']
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
man_pages = [
('man/nova-all', 'nova-all', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-api-ec2', 'nova-api-ec2', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-api-metadata', 'nova-api-metadata', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-api-os-compute', 'nova-api-os-compute',
u'Cloud controller fabric', [u'OpenStack'], 1),
('man/nova-api', 'nova-api', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-cert', 'nova-cert', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-compute', 'nova-compute', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-console', 'nova-console', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-consoleauth', 'nova-consoleauth', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-dhcpbridge', 'nova-dhcpbridge', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-manage', 'nova-manage', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-network', 'nova-network', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-novncproxy', 'nova-novncproxy', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-spicehtml5proxy', 'nova-spicehtml5proxy', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-objectstore', 'nova-objectstore', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-rootwrap', 'nova-rootwrap', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-rpc-zmq-receiver', 'nova-rpc-zmq-receiver', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-scheduler', 'nova-scheduler', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-xvpvncproxy', 'nova-xvpvncproxy', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-conductor', 'nova-conductor', u'Cloud controller fabric',
[u'OpenStack'], 1),
]
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
html_last_updated_fmt = os.popen(git_cmd).read()
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'novadoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Nova.tex', u'Nova Documentation',
u'OpenStack Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/', None),
'swift': ('http://swift.openstack.org', None)}
| apache-2.0 |
GongYiLiao/Python_Daily | 2013/Dec/29/test_pyopencl.py | 1 | 1540 | """ ============================================================================
File name:
test_pyopencl.py
Author:
gong-yi@GongTop0
Created on:
2013/12/29 20:45:43
Purpose:
To show
Copyright:
BSD / Apache
---------------------------------------------------------------------------- """
import pyopencl as cl
import numpy
import numpy.linalg as la
a = numpy.random.rand(50000).astype(numpy.float32)
b = numpy.random.rand(50000).astype(numpy.float32)
print(cl.get_platforms()[0].get_devices())
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
mf = cl.mem_flags
a_buf = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a)
b_buf = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=b)
dest_buf = cl.Buffer(ctx, mf.WRITE_ONLY, b.nbytes)
prg = cl.Program(ctx, """
__kernel void sum(__global const float *a,
__global const float *b,
__global float *c) {
int gid = get_global_id(0);
c[gid] = sin(lgamma(a[gid])) + cos(lgamma(b[gid]));
}
""").build()
prg.sum(queue, a.shape, None, a_buf, b_buf, dest_buf)
a_plus_b = numpy.empty_like(a)
cl.enqueue_copy(queue, a_plus_b, dest_buf)
print(la.norm(a_plus_b - (a+b)), la.norm(a_plus_b))
""" ----------------------------------------------------------------------------
End note:
(end note starts here)
============================================================================ """
| mit |
jbfuzier/dionaea | modules/python/scripts/tftp.py | 11 | 42174 | #********************************************************************************
#* Dionaea
#* - catches bugs -
#*
#*
#*
#* Copyright (C) 2009 Paul Baecher & Markus Koetter
#* Copyright (c) 2006-2009 Michael P. Soulier
#*
#* This program is free software; you can redistribute it and/or
#* modify it under the terms of the GNU General Public License
#* as published by the Free Software Foundation; either version 2
#* of the License, or (at your option) any later version.
#*
#* This program is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#* GNU General Public License for more details.
#*
#* You should have received a copy of the GNU General Public License
#* along with this program; if not, write to the Free Software
#* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#*
#*
#* contact nepenthesdev@gmail.com
#*
#*******************************************************************************/
#* The whole logic is taken from tftpy
#* http://tftpy.sourceforge.net/
#* tftpy is licensed using CNRI Python License
#* which is claimed to be incompatible with the gpl
#* http://www.gnu.org/philosophy/license-list.html
#
#* Nevertheless, the tftpy author Michael P. Soulier
#* gave us a non exclusive permission to use his code in
#* our gpl project
#*******************************************************************************
from dionaea.core import connection, ihandler, g_dionaea, incident
import tempfile
import struct
import logging
import os
DEF_BLKSIZE = 512
MIN_BLKSIZE = 8
DEF_BLKSIZE = 512
MAX_BLKSIZE = 65536
logger = logging.getLogger('tftp')
logger.setLevel(logging.INFO)
def tftpassert(condition, msg):
"""This function is a simple utility that will check the condition
passed for a false state. If it finds one, it throws a TftpException
with the message passed. This just makes the code throughout cleaner
by refactoring."""
if not condition:
raise TftpException(msg)
class TftpException(Exception):
"""This class is the parent class of all exceptions regarding the handling
of the TFTP protocol."""
pass
class TftpErrors(object):
"""This class is a convenience for defining the common tftp error codes,
and making them more readable in the code."""
NotDefined = 0
FileNotFound = 1
AccessViolation = 2
DiskFull = 3
IllegalTftpOp = 4
UnknownTID = 5
FileAlreadyExists = 6
NoSuchUser = 7
FailedNegotiation = 8
class TftpState(object):
"""This class represents a particular state for a TFTP Session. It encapsulates a
state, kind of like an enum. The states mean the following:
nil - Client/Server - Session not yet established
rrq - Client - Just sent RRQ in a download, waiting for response
Server - Just received an RRQ
wrq - Client - Just sent WRQ in an upload, waiting for response
Server - Just received a WRQ
dat - Client/Server - Transferring data
oack - Client - Just received oack
Server - Just sent OACK
ack - Client - Acknowledged oack, awaiting response
Server - Just received ACK to OACK
err - Client/Server - Fatal problems, giving up
fin - Client/Server - Transfer completed
"""
states = ['nil',
'rrq',
'wrq',
'dat',
'oack',
'ack',
'err',
'fin']
def __init__(self, state='nil'):
self.state = state
def getState(self):
return self.__state
def setState(self, state):
if state in TftpState.states:
self.__state = state
state = property(getState, setState)
class TftpSession(connection):
"""This class is the base class for the tftp client and server. Any shared
code should be in this class."""
def __init__(self):
"""Class constructor. Note that the state property must be a TftpState
object."""
self.options = None
self.state = TftpState()
self.dups = 0
self.errors = 0
connection.__init__(self, 'udp')
# def __del__(self):
# print('__del__' + str(self))
def senderror(self, errorcode):
"""This method uses the socket passed, and uses the errorcode, address
and port to compose and send an error packet."""
logger.debug("In senderror, being asked to send error %d to %s:%i"
% (errorcode, self.remote.host, self.remote.port))
errpkt = TftpPacketERR()
errpkt.errorcode = errorcode
self.send(errpkt.encode().buffer)
class TftpPacketWithOptions(object):
"""This class exists to permit some TftpPacket subclasses to share code
regarding options handling. It does not inherit from TftpPacket, as the
goal is just to share code here, and not cause diamond inheritance."""
def __init__(self):
self.options = []
def setoptions(self, options):
logger.debug("in TftpPacketWithOptions.setoptions")
logger.debug("options: " + str(options))
myoptions = {}
for key in options:
newkey = str(key)
myoptions[newkey] = str(options[key])
logger.debug("populated myoptions with %s = %s"
% (newkey, myoptions[newkey]))
logger.debug("setting options hash to: " + str(myoptions))
self._options = myoptions
def getoptions(self):
logger.debug("in TftpPacketWithOptions.getoptions")
return self._options
# Set up getter and setter on options to ensure that they are the proper
# type. They should always be strings, but we don't need to force the
# client to necessarily enter strings if we can avoid it.
options = property(getoptions, setoptions)
def decode_options(self, buffer):
"""This method decodes the section of the buffer that contains an
unknown number of options. It returns a dictionary of option names and
values."""
nulls = 0
format = "!"
options = {}
logger.debug("decode_options: buffer is: " + repr(buffer))
logger.debug("size of buffer is %d bytes" % len(buffer))
if len(buffer) == 0:
logger.debug("size of buffer is zero, returning empty hash")
return {}
# Count the nulls in the buffer. Each one terminates a string.
logger.debug("about to iterate options buffer counting nulls")
length = 0
for c in buffer:
#logger.debug("iterating this byte: " + repr(c))
if c == 0 or c == '\x00':
logger.debug("found a null at length %d" % length)
if length > 0:
format += "%dsx" % length
length = -1
else:
raise TftpException("Invalid options in buffer")
length += 1
logger.debug("about to unpack, format is: %s" % format)
mystruct = struct.unpack(format, buffer)
tftpassert(len(mystruct) % 2 == 0,
"packet with odd number of option/value pairs")
for i in range(0, len(mystruct), 2):
logger.debug("setting option %s to %s" % (mystruct[i], mystruct[i+1]))
options[mystruct[i].decode()] = mystruct[i+1].decode()
return options
class TftpPacket(object):
"""This class is the parent class of all tftp packet classes. It is an
abstract class, providing an interface, and should not be instantiated
directly."""
def __init__(self):
self.opcode = 0
self.buffer = None
def encode(self):
"""The encode method of a TftpPacket takes keyword arguments specific
to the type of packet, and packs an appropriate buffer in network-byte
order suitable for sending over the wire.
This is an abstract method."""
raise NotImplementedError("Abstract method")
def decode(self):
"""The decode method of a TftpPacket takes a buffer off of the wire in
network-byte order, and decodes it, populating internal properties as
appropriate. This can only be done once the first 2-byte opcode has
already been decoded, but the data section does include the entire
datagram.
This is an abstract method."""
raise NotImplementedError("Abstract method")
class TftpPacketInitial(TftpPacket, TftpPacketWithOptions):
"""This class is a common parent class for the RRQ and WRQ packets, as
they share quite a bit of code."""
def __init__(self):
TftpPacket.__init__(self)
TftpPacketWithOptions.__init__(self)
self.filename = None
self.mode = None
def encode(self):
"""Encode the packet's buffer from the instance variables."""
tftpassert(self.filename, "filename required in initial packet")
tftpassert(self.mode, "mode required in initial packet")
ptype = None
if self.opcode == 1:
ptype = "RRQ"
else:
ptype = "WRQ"
logger.debug("Encoding %s packet, filename = %s, mode = %s"
% (ptype, self.filename, self.mode))
for key in self.options:
logger.debug(" Option %s = %s" % (key, self.options[key]))
format = "!H"
format += "%dsx" % len(self.filename)
if self.mode == "octet":
format += "5sx"
else:
raise AssertionError("Unsupported mode: %s" % mode)
# Add options.
options_list = []
if len(self.options.keys()) > 0:
logger.debug("there are options to encode")
for key in self.options:
# Populate the option name
format += "%dsx" % len(key)
options_list.append(key.encode("utf-8"))
# Populate the option value
format += "%dsx" % len(str(self.options[key]))
options_list.append(str(self.options[key]).encode("utf-8"))
logger.debug("format is %s" % format)
logger.debug("options_list is %s" % options_list)
logger.debug("size of struct is %d" % struct.calcsize(format))
self.buffer = struct.pack(format,
self.opcode,
self.filename.encode('utf-8'),
self.mode.encode('utf-8'),
*options_list)
logger.debug("buffer is " + repr(self.buffer))
return self
def decode(self):
tftpassert(self.buffer, "Can't decode, buffer is empty")
# FIXME - this shares a lot of code with decode_options
nulls = 0
format = ""
nulls = length = tlength = 0
logger.debug("in decode: about to iterate buffer counting nulls")
subbuf = self.buffer[2:]
for c in subbuf:
logger.debug("iterating this byte: " + repr(c))
if c == 0 or c == '\x00':
nulls += 1
logger.debug("found a null at length %d, now have %d"
% (length, nulls))
format += "%dsx" % length
length = -1
# At 2 nulls, we want to mark that position for decoding.
if nulls == 2:
break
length += 1
tlength += 1
logger.debug("hopefully found end of mode at length %d" % tlength)
# length should now be the end of the mode.
tftpassert(nulls == 2, "malformed packet")
shortbuf = subbuf[:tlength+1]
logger.debug("about to unpack buffer with format: %s" % format)
logger.debug("unpacking buffer: " + repr(shortbuf))
mystruct = struct.unpack(format, shortbuf)
tftpassert(len(mystruct) == 2, "malformed packet")
try:
logger.debug("setting filename to %s" % mystruct[0])
self.filename = mystruct[0].decode()
logger.debug("setting mode to %s" % mystruct[1])
self.mode = mystruct[1].decode()
except:
tftpassert(0, "malformed packet, filename is not decodeable")
self.options = self.decode_options(subbuf[tlength+1:])
return self
class TftpPacketRRQ(TftpPacketInitial):
"""
2 bytes string 1 byte string 1 byte
-----------------------------------------------
RRQ/ | 01/02 | Filename | 0 | Mode | 0 |
WRQ -----------------------------------------------
"""
def __init__(self):
TftpPacketInitial.__init__(self)
self.opcode = 1
def __str__(self):
s = 'RRQ packet: filename = %s' % self.filename
s += ' mode = %s' % self.mode
if self.options:
s += '\n options = %s' % self.options
return s
class TftpPacketWRQ(TftpPacketInitial):
"""
2 bytes string 1 byte string 1 byte
-----------------------------------------------
RRQ/ | 01/02 | Filename | 0 | Mode | 0 |
WRQ -----------------------------------------------
"""
def __init__(self):
TftpPacketInitial.__init__(self)
self.opcode = 2
def __str__(self):
s = 'WRQ packet: filename = %s' % self.filename
s += ' mode = %s' % self.mode
if self.options:
s += '\n options = %s' % self.options
return s
class TftpPacketDAT(TftpPacket):
"""
2 bytes 2 bytes n bytes
---------------------------------
DATA | 03 | Block # | Data |
---------------------------------
"""
def __init__(self):
TftpPacket.__init__(self)
self.opcode = 3
self.blocknumber = 0
self.data = None
def __str__(self):
s = 'DAT packet: block %s' % self.blocknumber
if self.data:
s += '\n data: %d bytes' % len(self.data)
return s
def encode(self):
"""Encode the DAT packet. This method populates self.buffer, and
returns self for easy method chaining."""
if len(self.data) == 0:
logger.debug("Encoding an empty DAT packet")
format = "!HH%ds" % len(self.data)
self.buffer = struct.pack(format,
self.opcode,
self.blocknumber,
self.data)
return self
def decode(self):
"""Decode self.buffer into instance variables. It returns self for
easy method chaining."""
# We know the first 2 bytes are the opcode. The second two are the
# block number.
(self.blocknumber,) = struct.unpack("!H", self.buffer[2:4])
logger.debug("decoding DAT packet, block number %d" % self.blocknumber)
logger.debug("should be %d bytes in the packet total"
% len(self.buffer))
# Everything else is data.
self.data = self.buffer[4:]
logger.debug("found %d bytes of data"
% len(self.data))
return self
class TftpPacketACK(TftpPacket):
"""
2 bytes 2 bytes
-------------------
ACK | 04 | Block # |
--------------------
"""
def __init__(self):
TftpPacket.__init__(self)
self.opcode = 4
self.blocknumber = 0
def __str__(self):
return 'ACK packet: block %d' % self.blocknumber
def encode(self):
logger.debug("encoding ACK: opcode = %d, block = %d"
% (self.opcode, self.blocknumber))
self.buffer = struct.pack("!HH", self.opcode, self.blocknumber)
return self
def decode(self):
self.opcode, self.blocknumber = struct.unpack("!HH", self.buffer)
logger.debug("decoded ACK packet: opcode = %d, block = %d"
% (self.opcode, self.blocknumber))
return self
class TftpPacketERR(TftpPacket):
"""
2 bytes 2 bytes string 1 byte
----------------------------------------
ERROR | 05 | ErrorCode | ErrMsg | 0 |
----------------------------------------
Error Codes
Value Meaning
0 Not defined, see error message (if any).
1 File not found.
2 Access violation.
3 Disk full or allocation exceeded.
4 Illegal TFTP operation.
5 Unknown transfer ID.
6 File already exists.
7 No such user.
8 Failed to negotiate options
"""
def __init__(self):
TftpPacket.__init__(self)
self.opcode = 5
self.errorcode = 0
self.errmsg = None
# FIXME - integrate in TftpErrors references?
self.errmsgs = {
1: "File not found",
2: "Access violation",
3: "Disk full or allocation exceeded",
4: "Illegal TFTP operation",
5: "Unknown transfer ID",
6: "File already exists",
7: "No such user",
8: "Failed to negotiate options"
}
def __str__(self):
s = 'ERR packet: errorcode = %d' % self.errorcode
s += '\n msg = %s' % self.errmsgs.get(self.errorcode, '')
return s
def encode(self):
"""Encode the DAT packet based on instance variables, populating
self.buffer, returning self."""
format = "!HH%dsx" % len(self.errmsgs[self.errorcode])
logger.debug("encoding ERR packet with format %s" % format)
self.buffer = struct.pack(format,
self.opcode,
self.errorcode,
self.errmsgs[self.errorcode].encode("utf-8"))
return self
def decode(self):
"Decode self.buffer, populating instance variables and return self."
tftpassert(len(self.buffer) > 4, "malformed ERR packet, too short")
logger.debug("Decoding ERR packet, length %s bytes" %
len(self.buffer))
format = "!HH%dsx" % (len(self.buffer) - 5)
logger.debug("Decoding ERR packet with format: %s" % format)
self.opcode, self.errorcode, self.errmsg = struct.unpack(format,
self.buffer)
logger.warn("ERR packet - errorcode: %d, message: %s"
% (self.errorcode, self.errmsg))
return self
class TftpPacketOACK(TftpPacket, TftpPacketWithOptions):
"""
# +-------+---~~---+---+---~~---+---+---~~---+---+---~~---+---+
# | opc | opt1 | 0 | value1 | 0 | optN | 0 | valueN | 0 |
# +-------+---~~---+---+---~~---+---+---~~---+---+---~~---+---+
"""
def __init__(self):
TftpPacket.__init__(self)
TftpPacketWithOptions.__init__(self)
self.opcode = 6
def __str__(self):
return 'OACK packet:\n options = %s' % self.options
def encode(self):
format = "!H" # opcode
options_list = []
logger.debug("in TftpPacketOACK.encode")
for key in self.options:
logger.debug("looping on option key %s" % key)
logger.debug("value is %s" % self.options[key])
format += "%dsx" % len(key)
format += "%dsx" % len(self.options[key])
options_list.append(key.encode("utf-8"))
options_list.append(self.options[key].encode("utf-8"))
self.buffer = struct.pack(format, self.opcode, *options_list)
return self
def decode(self):
self.options = self.decode_options(self.buffer[2:])
return self
def match_options(self, options):
"""This method takes a set of options, and tries to match them with
its own. It can accept some changes in those options from the server as
part of a negotiation. Changed or unchanged, it will return a dict of
the options so that the session can update itself to the negotiated
options."""
for name in self.options:
if name in options:
if name == 'blksize':
# We can accept anything between the min and max values.
size = int(self.options[name])
if size >= MIN_BLKSIZE and size <= MAX_BLKSIZE:
logger.debug("negotiated blksize of %d bytes" % size)
options[name] = size
else:
raise TftpException("Unsupported option: %s" % name)
return True
class TftpPacketFactory(object):
"""This class generates TftpPacket objects. It is responsible for parsing
raw buffers off of the wire and returning objects representing them, via
the parse() method."""
def __init__(self):
self.classes = {
1: TftpPacketRRQ,
2: TftpPacketWRQ,
3: TftpPacketDAT,
4: TftpPacketACK,
5: TftpPacketERR,
6: TftpPacketOACK
}
def parse(self, buffer):
"""This method is used to parse an existing datagram into its
corresponding TftpPacket object. The buffer is the raw bytes off of
the network."""
logger.debug("parsing a %d byte packet" % len(buffer))
(opcode,) = struct.unpack("!H", buffer[:2])
logger.debug("opcode is %d" % opcode)
packet = self.__create(opcode)
packet.buffer = buffer
return packet.decode()
def __create(self, opcode):
"""This method returns the appropriate class object corresponding to
the passed opcode."""
tftpassert( opcode in self.classes,
"Unsupported opcode: %d" % opcode)
if opcode == 1:
packet = TftpPacketRRQ()
elif opcode == 2:
packet = TftpPacketWRQ()
elif opcode == 3:
packet = TftpPacketDAT()
elif opcode == 4:
packet = TftpPacketACK()
elif opcode == 5:
packet = TftpPacketERR()
elif opcode == 6:
packet = TftpPacketOACK()
# packet = self.classes[opcode]()
logger.debug("packet is %s" % packet)
return packet
class TftpServerHandler(TftpSession):
def __init__ (self, state, root, localhost, remotehost, remoteport, packet):
TftpSession.__init__(self)
self.bind(localhost,0)
self.connect(remotehost, remoteport)
self.packet = packet
self.state = state
self.root = root
self.mode = None
self.filename = None
self.options = { 'blksize': DEF_BLKSIZE }
self.blocknumber = 0
self.buffer = None
self.fileobj = None
self.timeouts.idle = 3
self.timeouts.sustain = 120
def handle_timeout_idle(self):
return False
def handle_timeout_sustain(self):
return False
def handle_io_in(self, data):
"""This method informs a handler instance that it has data waiting on
its socket that it must read and process."""
recvpkt = self.packet.parse(data)
# FIXME - refactor into another method, this is too big
if isinstance(recvpkt, TftpPacketRRQ):
logger.debug("Handler %s received RRQ packet" % self)
logger.debug("Requested file is %s, mode is %s" % (recvpkt.filename, recvpkt.mode))
if recvpkt.mode != 'octet':
self.senderror(TftpErrors.IllegalTftpOp)
#raise TftpException("Unsupported mode: %s" % recvpkt.mode)
logger.warn("Unsupported mode: %s" % recvpkt.mode)
self.close()
if self.state.state == 'rrq':
logger.debug("Received RRQ. Composing response.")
self.filename = self.root + os.sep + recvpkt.filename
logger.debug("The path to the desired file is %s" %
self.filename)
self.filename = os.path.abspath(self.filename)
logger.debug("The absolute path is %s" % self.filename)
# Security check. Make sure it's prefixed by the tftproot.
if self.filename.startswith(os.path.abspath(self.root)):
logger.debug("The path appears to be safe: %s" %
self.filename)
else:
self.errors += 1
self.senderror(TftpErrors.AccessViolation)
# raise TftpException("Insecure path: %s" % self.filename)
logger.warn("Insecure path: %s" % self.filename)
self.close()
return len(data)
# Does the file exist?
if os.path.exists(self.filename):
logger.debug("File %s exists." % self.filename)
# Check options. Currently we only support the blksize
# option.
if 'blksize' in recvpkt.options:
logger.debug("RRQ includes a blksize option")
blksize = int(recvpkt.options['blksize'])
# Delete the option now that it's handled.
del recvpkt.options['blksize']
if blksize >= MIN_BLKSIZE and blksize <= MAX_BLKSIZE:
logger.info("Client requested blksize = %d"
% blksize)
self.options['blksize'] = blksize
else:
logger.warning("Client %s requested invalid "
"blocksize %d, responding with default"
% (self.remote.host, blksize))
self.options['blksize'] = DEF_BLKSIZE
if 'tsize' in recvpkt.options:
logger.info('RRQ includes tsize option')
self.options['tsize'] = os.stat(self.filename).st_size
# Delete the option now that it's handled.
del recvpkt.options['tsize']
if len(list(recvpkt.options.keys())) > 0:
logger.warning("Client %s requested unsupported options: %s"
% (self.remote.host, recvpkt.options))
if self.options['blksize'] != DEF_BLKSIZE or 'tsize' in self.options:
logger.info("Options requested, sending OACK")
self.send_oack()
else:
logger.debug("Client %s requested no options."
% self.remote.host)
self.start_download()
else:
logger.warn("Requested file %s does not exist." %
self.filename)
self.senderror(TftpErrors.FileNotFound)
# raise TftpException("Requested file not found: %s" % self.filename)
logger.warn("Requested file not found: %s" % self.filename)
self.close()
return len(data)
else:
# We're receiving an RRQ when we're not expecting one.
logger.warn("Received an RRQ in handler %s "
"but we're in state %s" % (self.remote.host, self.state))
self.errors += 1
# Next packet type
elif isinstance(recvpkt, TftpPacketACK):
logger.debug("Received an ACK from the client.")
if recvpkt.blocknumber == 0 and self.state.state == 'oack':
logger.debug("Received ACK with 0 blocknumber, starting download")
self.start_download()
else:
if self.state.state == 'dat' or self.state.state == 'fin':
if self.blocknumber == recvpkt.blocknumber:
logger.debug("Received ACK for block %d"
% recvpkt.blocknumber)
if self.state.state == 'fin':
# raise TftpException, "Successful transfer."
self.close()
else:
self.send_dat()
elif recvpkt.blocknumber < self.blocknumber:
# Don't resend a DAT due to an old ACK. Fixes the
# sorceror's apprentice problem.
logger.warn("Received old ACK for block number %d"
% recvpkt.blocknumber)
else:
logger.warn("Received ACK for block number "
"%d, apparently from the future"
% recvpkt.blocknumber)
else:
logger.warn("Received ACK with block number %d "
"while in state %s"
% (recvpkt.blocknumber,
self.state.state))
elif isinstance(recvpkt, TftpPacketERR):
logger.warn("Received error packet from client: %s" % recvpkt)
self.state.state = 'err'
logger.warn("Received error from client")
# raise TftpException("Received error from client")
self.close()
return len(data)
# Handle other packet types.
else:
logger.warn("Received packet %s while handling a download"
% recvpkt)
self.senderror(TftpErrors.IllegalTftpOp)
# raise TftpException("Invalid packet received during download")
logger.warn("Invalid packet received during download")
self.close()
return len(data)
return len(data)
def start_download(self):
"""This method opens self.filename, stores the resulting file object
in self.fileobj, and calls send_dat()."""
self.state.state = 'dat'
self.fileobj = open(self.filename, "rb")
self.send_dat()
def send_dat(self, resend=False):
"""This method reads sends a DAT packet based on what is in self.buffer."""
if not resend:
blksize = int(self.options['blksize'])
self.buffer = self.fileobj.read(blksize)
logger.debug("Read %d bytes into buffer" % len(self.buffer))
if len(self.buffer) < blksize:
logger.info("Reached EOF on file %s" % self.filename)
self.state.state = 'fin'
self.blocknumber += 1
if self.blocknumber > 65535:
logger.debug("Blocknumber rolled over to zero")
self.blocknumber = 0
else:
logger.warn("Resending block number %d" % self.blocknumber)
dat = TftpPacketDAT()
dat.data = self.buffer
dat.blocknumber = self.blocknumber
logger.debug("Sending DAT packet %d" % self.blocknumber)
self.send(dat.encode().buffer)
# FIXME - should these be factored-out into the session class?
def send_oack(self):
"""This method sends an OACK packet based on current params."""
logger.debug("Composing and sending OACK packet")
oack = TftpPacketOACK()
oack.options = self.options
self.send(oack.encode().buffer)
self.state.state = 'oack'
logger.debug("state %s" % self.state.state)
class TftpServer(TftpSession):
def __init__(self):
TftpSession.__init__(self)
self.packet = TftpPacketFactory()
self.root = ''
def handle_io_in(self,data):
logger.debug("Data ready on our main socket")
buffer = data
logger.debug("Read %d bytes" % len(buffer))
recvpkt = None
try:
recvpkt = self.packet.parse(buffer)
except TftpException as e:
print(e)
return len(data)
if isinstance(recvpkt, TftpPacketRRQ):
logger.debug("RRQ packet from %s:%i" % (self.remote.host, self.remote.port))
t = TftpServerHandler(TftpState('rrq'), self.root, self.local.host, self.remote.host, self.remote.port, self.packet)
t.handle_io_in(data)
elif isinstance(recvpkt, TftpPacketWRQ):
logger.warn("Write requests not implemented at this time.")
self.senderror(TftpErrors.IllegalTftpOp)
return len(data)
def chroot(self,r):
self.root = r;
class TftpClient(TftpSession):
"""This class is an implementation of a tftp client. Once instantiated, a
download can be initiated via the download() method."""
def __init__(self):
TftpSession.__init__(self)
self.timeouts.idle=5
self.timeouts.sustain = 120
self.options = {}
self.packet = TftpPacketFactory()
self.expected_block = 0
self.curblock = 0
self.bytes = 0
self.filename = None
self.port = 0
self.connected = False
self.idlecount = 0
def __del__(self):
if self.con != None:
self.con.unref()
self.con = None
def download(self, con, host, port, filename, url):
logger.info("Connecting to %s to download" % host)
logger.info(" filename -> %s" % filename)
if 'blksize' in self.options:
size = self.options['blksize']
if size < MIN_BLKSIZE or size > MAX_BLKSIZE:
raise TftpException("Invalid blksize: %d" % size)
else:
self.options['blksize'] = DEF_BLKSIZE
self.filename = filename
self.port = port
self.con = con
self.url = url
if con != None:
self.bind(con.local.host, 0)
self.con.ref()
self.connect(host,0)
if con != None:
i = incident("dionaea.connection.link")
i.parent = con
i.child = self
i.report()
def handle_established(self):
logger.info("connection to %s established" % self.remote.host)
logger.info("port %i established" % self.port)
self.remote.port = self.port
pkt = TftpPacketRRQ()
pkt.filename = self.filename
pkt.mode = "octet" # FIXME - shouldn't hardcode this
pkt.options = self.options
self.last_packet = pkt.encode().buffer
self.send(self.last_packet)
self.state.state = 'rrq'
self.fileobj = tempfile.NamedTemporaryFile(delete=False, prefix='tftp-', suffix=g_dionaea.config()['downloads']['tmp-suffix'], dir=g_dionaea.config()['downloads']['dir'])
# def handle_disconnect(self):
# if self.con:
# self.con.unref()
# return False
def handle_io_in(self, data):
logger.debug('Received packet from server %s:%i' % (self.remote.host, self.remote.port))
if self.connected == False:
self.connect(self.remote.host, self.remote.port)
self.connected = True
if self.con != None:
i = incident("dionaea.connection.link")
i.parent = self.con
i.child = self
i.report()
recvpkt = self.packet.parse(data)
if isinstance(recvpkt, TftpPacketDAT):
logger.debug("recvpkt.blocknumber = %d" % recvpkt.blocknumber)
logger.debug("curblock = %d" % self.curblock)
if self.state.state == 'rrq' and self.options:
logger.info("no OACK, our options were ignored")
self.options = { 'blksize': DEF_BLKSIZE }
self.state.state = 'ack'
self.expected_block = self.curblock + 1
if self.expected_block > 65535:
logger.debug("block number rollover to 0 again")
self.expected_block = 0
if recvpkt.blocknumber == self.expected_block:
logger.debug("good, received block %d in sequence"
% recvpkt.blocknumber)
self.curblock = self.expected_block
# ACK the packet, and save the data.
logger.info("sending ACK to block %d" % self.curblock)
logger.debug("ip = %s, port = %i" % (self.remote.host, self.remote.port))
ackpkt = TftpPacketACK()
ackpkt.blocknumber = self.curblock
self.last_packet = ackpkt.encode().buffer
self.send(self.last_packet)
logger.debug("writing %d bytes to output file"
% len(recvpkt.data))
self.fileobj.write(recvpkt.data)
self.bytes += len(recvpkt.data)
# Check for end-of-file, any less than full data packet.
if len(recvpkt.data) < int(self.options['blksize']):
logger.info("end of file detected")
self.fileobj.close()
icd = incident("dionaea.download.complete")
icd.url = self.url
icd.path = self.fileobj.name
icd.con = self.con
icd.report()
self.close()
self.fileobj.unlink(self.fileobj.name)
elif recvpkt.blocknumber == self.curblock:
logger.warn("dropping duplicate block %d" % self.curblock)
logger.debug("ACKing block %d again, just in case" % self.curblock)
ackpkt = TftpPacketACK()
ackpkt.blocknumber = self.curblock
self.send(ackpkt.encode().buffer)
else:
msg = "Whoa! Received block %d but expected %d" % (recvpkt.blocknumber,
self.curblock+1)
logger.warn(msg)
# Check other packet types.
elif isinstance(recvpkt, TftpPacketOACK):
if not self.state.state == 'rrq':
self.errors += 1
logger.warn("Received OACK in state %s" % self.state.state)
# continue
self.state.state = 'oack'
logger.info("Received OACK from server.")
if len(recvpkt.options.keys()) > 0:
if recvpkt.match_options(self.options):
logger.info("Successful negotiation of options")
# Set options to OACK options
self.options = recvpkt.options
for key in self.options:
logger.info(" %s = %s" % (key, self.options[key]))
logger.debug("sending ACK to OACK")
ackpkt = TftpPacketACK()
ackpkt.blocknumber = 0
self.last_packet = ackpkt.encode().buffer
self.send(self.last_packet)
self.state.state = 'ack'
else:
logger.warn("failed to negotiate options")
self.senderror(TftpErrors.FailedNegotiation)
self.state.state = 'err'
# raise TftpException("Failed to negotiate options")
self.fail()
elif isinstance(recvpkt, TftpPacketACK):
# Umm, we ACK, the server doesn't.
self.state.state = 'err'
# self.senderror(TftpErrors.IllegalTftpOp)
logger.warn("Received ACK from server while in download")
# tftpassert(False, "Received ACK from server while in download")
self.fail()
elif isinstance(recvpkt, TftpPacketERR):
self.state.state = 'err'
# self.senderror(TftpErrors.IllegalTftpOp)
logger.warn("Received ERR from server: " + str(recvpkt))
self.fail()
elif isinstance(recvpkt, TftpPacketWRQ):
self.state.state = 'err'
# self.senderror(TftpErrors.IllegalTftpOp)
# tftpassert(False, "Received WRQ from server: " + str(recvpkt))
logger.warn("Received WRQ from server: " + str(recvpkt))
self.fail()
else:
self.state.state = 'err'
# self.senderror(TftpErrors.IllegalTftpOp)
# tftpassert(False, "Received unknown packet type from server: " + str(recvpkt))
logger.warn("Received unknown packet type from server: " + str(recvpkt))
self.fail()
return len(data)
def handle_error(self, err):
pass
def handle_timeout_sustain(self):
logger.debug("tftp sustain timeout!")
self.fail()
return False
def handle_timeout_idle(self):
logger.debug("tftp idle timeout!")
if self.idlecount > 10:
self.fail()
return False
self.idlecount+=1
self.send(self.last_packet)
return True
def fail(self):
if self.fileobj:
self.fileobj.close()
self.fileobj.unlink(self.fileobj.name)
self.close()
from urllib import parse
class tftpdownloadhandler(ihandler):
def __init__(self, path):
logger.debug("%s ready!" % (self.__class__.__name__))
ihandler.__init__(self, path)
def handle_incident(self, icd):
url = icd.get("url")
if url.startswith('tftp://'):
# python fails parsing tftp://, ftp:// works, so ...
logger.info("do download")
x = parse.urlsplit(url[1:])
if x.netloc == '0.0.0.0':
logger.info("Discarding download from INADDR_ANY")
return
try:
con = icd.con
except AttributeError:
con = None
t=TftpClient()
t.download(con, x.netloc, 69, x.path[1:], url)
| gpl-2.0 |
nicoboss/Floatmotion | OpenGL/raw/GL/VERSION/GL_1_2.py | 9 | 3758 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_VERSION_GL_1_2'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_VERSION_GL_1_2',error_checker=_errors._error_checker)
GL_ALIASED_LINE_WIDTH_RANGE=_C('GL_ALIASED_LINE_WIDTH_RANGE',0x846E)
GL_ALIASED_POINT_SIZE_RANGE=_C('GL_ALIASED_POINT_SIZE_RANGE',0x846D)
GL_BGR=_C('GL_BGR',0x80E0)
GL_BGRA=_C('GL_BGRA',0x80E1)
GL_CLAMP_TO_EDGE=_C('GL_CLAMP_TO_EDGE',0x812F)
GL_LIGHT_MODEL_COLOR_CONTROL=_C('GL_LIGHT_MODEL_COLOR_CONTROL',0x81F8)
GL_MAX_3D_TEXTURE_SIZE=_C('GL_MAX_3D_TEXTURE_SIZE',0x8073)
GL_MAX_ELEMENTS_INDICES=_C('GL_MAX_ELEMENTS_INDICES',0x80E9)
GL_MAX_ELEMENTS_VERTICES=_C('GL_MAX_ELEMENTS_VERTICES',0x80E8)
GL_PACK_IMAGE_HEIGHT=_C('GL_PACK_IMAGE_HEIGHT',0x806C)
GL_PACK_SKIP_IMAGES=_C('GL_PACK_SKIP_IMAGES',0x806B)
GL_PROXY_TEXTURE_3D=_C('GL_PROXY_TEXTURE_3D',0x8070)
GL_RESCALE_NORMAL=_C('GL_RESCALE_NORMAL',0x803A)
GL_SEPARATE_SPECULAR_COLOR=_C('GL_SEPARATE_SPECULAR_COLOR',0x81FA)
GL_SINGLE_COLOR=_C('GL_SINGLE_COLOR',0x81F9)
GL_SMOOTH_LINE_WIDTH_GRANULARITY=_C('GL_SMOOTH_LINE_WIDTH_GRANULARITY',0x0B23)
GL_SMOOTH_LINE_WIDTH_RANGE=_C('GL_SMOOTH_LINE_WIDTH_RANGE',0x0B22)
GL_SMOOTH_POINT_SIZE_GRANULARITY=_C('GL_SMOOTH_POINT_SIZE_GRANULARITY',0x0B13)
GL_SMOOTH_POINT_SIZE_RANGE=_C('GL_SMOOTH_POINT_SIZE_RANGE',0x0B12)
GL_TEXTURE_3D=_C('GL_TEXTURE_3D',0x806F)
GL_TEXTURE_BASE_LEVEL=_C('GL_TEXTURE_BASE_LEVEL',0x813C)
GL_TEXTURE_BINDING_3D=_C('GL_TEXTURE_BINDING_3D',0x806A)
GL_TEXTURE_DEPTH=_C('GL_TEXTURE_DEPTH',0x8071)
GL_TEXTURE_MAX_LEVEL=_C('GL_TEXTURE_MAX_LEVEL',0x813D)
GL_TEXTURE_MAX_LOD=_C('GL_TEXTURE_MAX_LOD',0x813B)
GL_TEXTURE_MIN_LOD=_C('GL_TEXTURE_MIN_LOD',0x813A)
GL_TEXTURE_WRAP_R=_C('GL_TEXTURE_WRAP_R',0x8072)
GL_UNPACK_IMAGE_HEIGHT=_C('GL_UNPACK_IMAGE_HEIGHT',0x806E)
GL_UNPACK_SKIP_IMAGES=_C('GL_UNPACK_SKIP_IMAGES',0x806D)
GL_UNSIGNED_BYTE_2_3_3_REV=_C('GL_UNSIGNED_BYTE_2_3_3_REV',0x8362)
GL_UNSIGNED_BYTE_3_3_2=_C('GL_UNSIGNED_BYTE_3_3_2',0x8032)
GL_UNSIGNED_INT_10_10_10_2=_C('GL_UNSIGNED_INT_10_10_10_2',0x8036)
GL_UNSIGNED_INT_2_10_10_10_REV=_C('GL_UNSIGNED_INT_2_10_10_10_REV',0x8368)
GL_UNSIGNED_INT_8_8_8_8=_C('GL_UNSIGNED_INT_8_8_8_8',0x8035)
GL_UNSIGNED_INT_8_8_8_8_REV=_C('GL_UNSIGNED_INT_8_8_8_8_REV',0x8367)
GL_UNSIGNED_SHORT_1_5_5_5_REV=_C('GL_UNSIGNED_SHORT_1_5_5_5_REV',0x8366)
GL_UNSIGNED_SHORT_4_4_4_4=_C('GL_UNSIGNED_SHORT_4_4_4_4',0x8033)
GL_UNSIGNED_SHORT_4_4_4_4_REV=_C('GL_UNSIGNED_SHORT_4_4_4_4_REV',0x8365)
GL_UNSIGNED_SHORT_5_5_5_1=_C('GL_UNSIGNED_SHORT_5_5_5_1',0x8034)
GL_UNSIGNED_SHORT_5_6_5=_C('GL_UNSIGNED_SHORT_5_6_5',0x8363)
GL_UNSIGNED_SHORT_5_6_5_REV=_C('GL_UNSIGNED_SHORT_5_6_5_REV',0x8364)
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei)
def glCopyTexSubImage3D(target,level,xoffset,yoffset,zoffset,x,y,width,height):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint,_cs.GLsizei,_cs.GLenum,ctypes.c_void_p)
def glDrawRangeElements(mode,start,end,count,type,indices):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei,_cs.GLsizei,_cs.GLint,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
def glTexImage3D(target,level,internalformat,width,height,depth,border,format,type,pixels):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei,_cs.GLsizei,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
def glTexSubImage3D(target,level,xoffset,yoffset,zoffset,width,height,depth,format,type,pixels):pass
| agpl-3.0 |
Theer108/invenio | invenio/modules/textminer/testsuite/test_textminer_references_api.py | 19 | 1302 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2010, 2011, 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
The Refextract task tests suite for tasks
It requires a fully functional invenio installation.
"""
# Note: unit tests were moved to the regression test suite. Keeping
# this file here with empty test case set in order to overwrite any
# previously installed file. Also, keeping TEST_SUITE empty so that
# `inveniocfg --run-unit-tests' would not complain.
from invenio.testsuite import make_test_suite, run_test_suite
TEST_SUITE = make_test_suite()
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
Kazade/NeHe-Website | google_appengine/lib/django-1.4/tests/regressiontests/comment_tests/tests/comment_form_tests.py | 97 | 3007 | from __future__ import absolute_import
import time
from django.conf import settings
from django.contrib.comments.forms import CommentForm
from django.contrib.comments.models import Comment
from . import CommentTestCase
from ..models import Article
class CommentFormTests(CommentTestCase):
def testInit(self):
f = CommentForm(Article.objects.get(pk=1))
self.assertEqual(f.initial['content_type'], str(Article._meta))
self.assertEqual(f.initial['object_pk'], "1")
self.assertNotEqual(f.initial['security_hash'], None)
self.assertNotEqual(f.initial['timestamp'], None)
def testValidPost(self):
a = Article.objects.get(pk=1)
f = CommentForm(a, data=self.getValidData(a))
self.assertTrue(f.is_valid(), f.errors)
return f
def tamperWithForm(self, **kwargs):
a = Article.objects.get(pk=1)
d = self.getValidData(a)
d.update(kwargs)
f = CommentForm(Article.objects.get(pk=1), data=d)
self.assertFalse(f.is_valid())
return f
def testHoneypotTampering(self):
self.tamperWithForm(honeypot="I am a robot")
def testTimestampTampering(self):
self.tamperWithForm(timestamp=str(time.time() - 28800))
def testSecurityHashTampering(self):
self.tamperWithForm(security_hash="Nobody expects the Spanish Inquisition!")
def testContentTypeTampering(self):
self.tamperWithForm(content_type="auth.user")
def testObjectPKTampering(self):
self.tamperWithForm(object_pk="3")
def testSecurityErrors(self):
f = self.tamperWithForm(honeypot="I am a robot")
self.assertTrue("honeypot" in f.security_errors())
def testGetCommentObject(self):
f = self.testValidPost()
c = f.get_comment_object()
self.assertTrue(isinstance(c, Comment))
self.assertEqual(c.content_object, Article.objects.get(pk=1))
self.assertEqual(c.comment, "This is my comment")
c.save()
self.assertEqual(Comment.objects.count(), 1)
def testProfanities(self):
"""Test COMMENTS_ALLOW_PROFANITIES and PROFANITIES_LIST settings"""
a = Article.objects.get(pk=1)
d = self.getValidData(a)
# Save settings in case other tests need 'em
saved = settings.PROFANITIES_LIST, settings.COMMENTS_ALLOW_PROFANITIES
# Don't wanna swear in the unit tests if we don't have to...
settings.PROFANITIES_LIST = ["rooster"]
# Try with COMMENTS_ALLOW_PROFANITIES off
settings.COMMENTS_ALLOW_PROFANITIES = False
f = CommentForm(a, data=dict(d, comment="What a rooster!"))
self.assertFalse(f.is_valid())
# Now with COMMENTS_ALLOW_PROFANITIES on
settings.COMMENTS_ALLOW_PROFANITIES = True
f = CommentForm(a, data=dict(d, comment="What a rooster!"))
self.assertTrue(f.is_valid())
# Restore settings
settings.PROFANITIES_LIST, settings.COMMENTS_ALLOW_PROFANITIES = saved
| bsd-3-clause |
sserrot/champion_relationships | venv/Lib/site-packages/parso/cache.py | 2 | 8668 | import time
import os
import sys
import hashlib
import gc
import shutil
import platform
import errno
import logging
import warnings
try:
import cPickle as pickle
except:
import pickle
from parso._compatibility import FileNotFoundError, PermissionError, scandir
from parso.file_io import FileIO
LOG = logging.getLogger(__name__)
_CACHED_FILE_MINIMUM_SURVIVAL = 60 * 10 # 10 minutes
"""
Cached files should survive at least a few minutes.
"""
_CACHED_FILE_MAXIMUM_SURVIVAL = 60 * 60 * 24 * 30
"""
Maximum time for a cached file to survive if it is not
accessed within.
"""
_CACHED_SIZE_TRIGGER = 600
"""
This setting limits the amount of cached files. It's basically a way to start
garbage collection.
The reasoning for this limit being as big as it is, is the following:
Numpy, Pandas, Matplotlib and Tensorflow together use about 500 files. This
makes Jedi use ~500mb of memory. Since we might want a bit more than those few
libraries, we just increase it a bit.
"""
_PICKLE_VERSION = 33
"""
Version number (integer) for file system cache.
Increment this number when there are any incompatible changes in
the parser tree classes. For example, the following changes
are regarded as incompatible.
- A class name is changed.
- A class is moved to another module.
- A __slot__ of a class is changed.
"""
_VERSION_TAG = '%s-%s%s-%s' % (
platform.python_implementation(),
sys.version_info[0],
sys.version_info[1],
_PICKLE_VERSION
)
"""
Short name for distinguish Python implementations and versions.
It's like `sys.implementation.cache_tag` but for Python2
we generate something similar. See:
http://docs.python.org/3/library/sys.html#sys.implementation
"""
def _get_default_cache_path():
if platform.system().lower() == 'windows':
dir_ = os.path.join(os.getenv('LOCALAPPDATA')
or os.path.expanduser('~'), 'Parso', 'Parso')
elif platform.system().lower() == 'darwin':
dir_ = os.path.join('~', 'Library', 'Caches', 'Parso')
else:
dir_ = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache', 'parso')
return os.path.expanduser(dir_)
_default_cache_path = _get_default_cache_path()
"""
The path where the cache is stored.
On Linux, this defaults to ``~/.cache/parso/``, on OS X to
``~/Library/Caches/Parso/`` and on Windows to ``%LOCALAPPDATA%\\Parso\\Parso\\``.
On Linux, if environment variable ``$XDG_CACHE_HOME`` is set,
``$XDG_CACHE_HOME/parso`` is used instead of the default one.
"""
_CACHE_CLEAR_THRESHOLD = 60 * 60 * 24
def _get_cache_clear_lock(cache_path = None):
"""
The path where the cache lock is stored.
Cache lock will prevent continous cache clearing and only allow garbage
collection once a day (can be configured in _CACHE_CLEAR_THRESHOLD).
"""
cache_path = cache_path or _get_default_cache_path()
return FileIO(os.path.join(cache_path, "PARSO-CACHE-LOCK"))
parser_cache = {}
class _NodeCacheItem(object):
def __init__(self, node, lines, change_time=None):
self.node = node
self.lines = lines
if change_time is None:
change_time = time.time()
self.change_time = change_time
self.last_used = change_time
def load_module(hashed_grammar, file_io, cache_path=None):
"""
Returns a module or None, if it fails.
"""
p_time = file_io.get_last_modified()
if p_time is None:
return None
try:
module_cache_item = parser_cache[hashed_grammar][file_io.path]
if p_time <= module_cache_item.change_time:
module_cache_item.last_used = time.time()
return module_cache_item.node
except KeyError:
return _load_from_file_system(
hashed_grammar,
file_io.path,
p_time,
cache_path=cache_path
)
def _load_from_file_system(hashed_grammar, path, p_time, cache_path=None):
cache_path = _get_hashed_path(hashed_grammar, path, cache_path=cache_path)
try:
try:
if p_time > os.path.getmtime(cache_path):
# Cache is outdated
return None
except OSError as e:
if e.errno == errno.ENOENT:
# In Python 2 instead of an IOError here we get an OSError.
raise FileNotFoundError
else:
raise
with open(cache_path, 'rb') as f:
gc.disable()
try:
module_cache_item = pickle.load(f)
finally:
gc.enable()
except FileNotFoundError:
return None
else:
_set_cache_item(hashed_grammar, path, module_cache_item)
LOG.debug('pickle loaded: %s', path)
return module_cache_item.node
def _set_cache_item(hashed_grammar, path, module_cache_item):
if sum(len(v) for v in parser_cache.values()) >= _CACHED_SIZE_TRIGGER:
# Garbage collection of old cache files.
# We are basically throwing everything away that hasn't been accessed
# in 10 minutes.
cutoff_time = time.time() - _CACHED_FILE_MINIMUM_SURVIVAL
for key, path_to_item_map in parser_cache.items():
parser_cache[key] = {
path: node_item
for path, node_item in path_to_item_map.items()
if node_item.last_used > cutoff_time
}
parser_cache.setdefault(hashed_grammar, {})[path] = module_cache_item
def try_to_save_module(hashed_grammar, file_io, module, lines, pickling=True, cache_path=None):
path = file_io.path
try:
p_time = None if path is None else file_io.get_last_modified()
except OSError:
p_time = None
pickling = False
item = _NodeCacheItem(module, lines, p_time)
_set_cache_item(hashed_grammar, path, item)
if pickling and path is not None:
try:
_save_to_file_system(hashed_grammar, path, item, cache_path=cache_path)
except PermissionError:
# It's not really a big issue if the cache cannot be saved to the
# file system. It's still in RAM in that case. However we should
# still warn the user that this is happening.
warnings.warn(
'Tried to save a file to %s, but got permission denied.',
Warning
)
else:
_remove_cache_and_update_lock(cache_path=cache_path)
def _save_to_file_system(hashed_grammar, path, item, cache_path=None):
with open(_get_hashed_path(hashed_grammar, path, cache_path=cache_path), 'wb') as f:
pickle.dump(item, f, pickle.HIGHEST_PROTOCOL)
def clear_cache(cache_path=None):
if cache_path is None:
cache_path = _default_cache_path
shutil.rmtree(cache_path)
parser_cache.clear()
def clear_inactive_cache(
cache_path=None,
inactivity_threshold=_CACHED_FILE_MAXIMUM_SURVIVAL,
):
if cache_path is None:
cache_path = _get_default_cache_path()
if not os.path.exists(cache_path):
return False
for version_path in os.listdir(cache_path):
version_path = os.path.join(cache_path, version_path)
if not os.path.isdir(version_path):
continue
for file in scandir(version_path):
if (
file.stat().st_atime + _CACHED_FILE_MAXIMUM_SURVIVAL
<= time.time()
):
try:
os.remove(file.path)
except OSError: # silently ignore all failures
continue
else:
return True
def _remove_cache_and_update_lock(cache_path = None):
lock = _get_cache_clear_lock(cache_path=cache_path)
clear_lock_time = lock.get_last_modified()
if (
clear_lock_time is None # first time
or clear_lock_time + _CACHE_CLEAR_THRESHOLD <= time.time()
):
if not lock._touch():
# First make sure that as few as possible other cleanup jobs also
# get started. There is still a race condition but it's probably
# not a big problem.
return False
clear_inactive_cache(cache_path = cache_path)
def _get_hashed_path(hashed_grammar, path, cache_path=None):
directory = _get_cache_directory_path(cache_path=cache_path)
file_hash = hashlib.sha256(path.encode("utf-8")).hexdigest()
return os.path.join(directory, '%s-%s.pkl' % (hashed_grammar, file_hash))
def _get_cache_directory_path(cache_path=None):
if cache_path is None:
cache_path = _default_cache_path
directory = os.path.join(cache_path, _VERSION_TAG)
if not os.path.exists(directory):
os.makedirs(directory)
return directory
| mit |
molobrakos/home-assistant | homeassistant/components/deconz/climate.py | 5 | 3402 | """Support for deCONZ climate devices."""
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
SUPPORT_ON_OFF, SUPPORT_TARGET_TEMPERATURE)
from homeassistant.const import (
ATTR_BATTERY_LEVEL, ATTR_TEMPERATURE, TEMP_CELSIUS)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import ATTR_OFFSET, ATTR_VALVE, NEW_SENSOR
from .deconz_device import DeconzDevice
from .gateway import get_gateway_from_config_entry
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the deCONZ climate devices.
Thermostats are based on the same device class as sensors in deCONZ.
"""
gateway = get_gateway_from_config_entry(hass, config_entry)
@callback
def async_add_climate(sensors):
"""Add climate devices from deCONZ."""
from pydeconz.sensor import THERMOSTAT
entities = []
for sensor in sensors:
if sensor.type in THERMOSTAT and \
not (not gateway.allow_clip_sensor and
sensor.type.startswith('CLIP')):
entities.append(DeconzThermostat(sensor, gateway))
async_add_entities(entities, True)
gateway.listeners.append(async_dispatcher_connect(
hass, gateway.async_event_new_device(NEW_SENSOR), async_add_climate))
async_add_climate(gateway.api.sensors.values())
class DeconzThermostat(DeconzDevice, ClimateDevice):
"""Representation of a deCONZ thermostat."""
def __init__(self, device, gateway):
"""Set up thermostat device."""
super().__init__(device, gateway)
self._features = SUPPORT_ON_OFF
self._features |= SUPPORT_TARGET_TEMPERATURE
@property
def supported_features(self):
"""Return the list of supported features."""
return self._features
@property
def is_on(self):
"""Return true if on."""
return self._device.on
async def async_turn_on(self):
"""Turn on switch."""
data = {'mode': 'auto'}
await self._device.async_set_config(data)
async def async_turn_off(self):
"""Turn off switch."""
data = {'mode': 'off'}
await self._device.async_set_config(data)
@property
def current_temperature(self):
"""Return the current temperature."""
return self._device.temperature
@property
def target_temperature(self):
"""Return the target temperature."""
return self._device.heatsetpoint
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
data = {}
if ATTR_TEMPERATURE in kwargs:
data['heatsetpoint'] = kwargs[ATTR_TEMPERATURE] * 100
await self._device.async_set_config(data)
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def device_state_attributes(self):
"""Return the state attributes of the thermostat."""
attr = {}
if self._device.battery:
attr[ATTR_BATTERY_LEVEL] = self._device.battery
if self._device.offset:
attr[ATTR_OFFSET] = self._device.offset
if self._device.valve is not None:
attr[ATTR_VALVE] = self._device.valve
return attr
| apache-2.0 |
Jeff-Tian/mybnb | Python27/Lib/site-packages/setuptools/site-patch.py | 720 | 2389 | def __boot():
import sys
import os
PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH):
PYTHONPATH = []
else:
PYTHONPATH = PYTHONPATH.split(os.pathsep)
pic = getattr(sys,'path_importer_cache',{})
stdpath = sys.path[len(PYTHONPATH):]
mydir = os.path.dirname(__file__)
#print "searching",stdpath,sys.path
for item in stdpath:
if item==mydir or not item:
continue # skip if current dir. on Windows, or my own directory
importer = pic.get(item)
if importer is not None:
loader = importer.find_module('site')
if loader is not None:
# This should actually reload the current module
loader.load_module('site')
break
else:
try:
import imp # Avoid import loop in Python >= 3.3
stream, path, descr = imp.find_module('site',[item])
except ImportError:
continue
if stream is None:
continue
try:
# This should actually reload the current module
imp.load_module('site',stream,path,descr)
finally:
stream.close()
break
else:
raise ImportError("Couldn't find the real 'site' module")
#print "loaded", __file__
known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp
oldpos = getattr(sys,'__egginsert',0) # save old insertion position
sys.__egginsert = 0 # and reset the current one
for item in PYTHONPATH:
addsitedir(item)
sys.__egginsert += oldpos # restore effective old position
d, nd = makepath(stdpath[0])
insert_at = None
new_path = []
for item in sys.path:
p, np = makepath(item)
if np==nd and insert_at is None:
# We've hit the first 'system' path entry, so added entries go here
insert_at = len(new_path)
if np in known_paths or insert_at is None:
new_path.append(item)
else:
# new path after the insert point, back-insert it
new_path.insert(insert_at, item)
insert_at += 1
sys.path[:] = new_path
if __name__=='site':
__boot()
del __boot
| apache-2.0 |
overtherain/scriptfile | software/googleAppEngine/lib/django_1_3/django/contrib/admindocs/urls.py | 336 | 1089 | from django.conf.urls.defaults import *
from django.contrib.admindocs import views
urlpatterns = patterns('',
url('^$',
views.doc_index,
name='django-admindocs-docroot'
),
url('^bookmarklets/$',
views.bookmarklets,
name='django-admindocs-bookmarklets'
),
url('^tags/$',
views.template_tag_index,
name='django-admindocs-tags'
),
url('^filters/$',
views.template_filter_index,
name='django-admindocs-filters'
),
url('^views/$',
views.view_index,
name='django-admindocs-views-index'
),
url('^views/(?P<view>[^/]+)/$',
views.view_detail,
name='django-admindocs-views-detail'
),
url('^models/$',
views.model_index,
name='django-admindocs-models-index'
),
url('^models/(?P<app_label>[^\.]+)\.(?P<model_name>[^/]+)/$',
views.model_detail,
name='django-admindocs-models-detail'
),
url('^templates/(?P<template>.*)/$',
views.template_detail,
name='django-admindocs-templates'
),
)
| mit |
PerceptumNL/ReadMore | readmore/content/migrations/0002_auto__chg_field_category_image.py | 1 | 4637 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field categories on 'Article'
m2m_table_name = db.shorten_name(u'content_article_categories')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('article', models.ForeignKey(orm[u'content.article'], null=False)),
('category', models.ForeignKey(orm[u'content.category'], null=False))
))
db.create_unique(m2m_table_name, ['article_id', 'category_id'])
# Changing field 'Category.image'
db.alter_column(u'content_category', 'image', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
def backwards(self, orm):
# Removing M2M table for field categories on 'Article'
db.delete_table(db.shorten_name(u'content_article_categories'))
# Changing field 'Category.image'
db.alter_column(u'content_category', 'image', self.gf('django.db.models.fields.URLField')(max_length=255, null=True))
models = {
u'content.article': {
'Meta': {'object_name': 'Article'},
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'articles'", 'symmetrical': 'False', 'to': u"orm['content.Category']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['content.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_content.article_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'content.category': {
'Meta': {'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['content.Category']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_content.category_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'content.wikiarticle': {
'Meta': {'object_name': 'WikiArticle', '_ormbases': [u'content.Article']},
u'article_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['content.Article']", 'unique': 'True', 'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'identifier_type': ('django.db.models.fields.CharField', [], {'default': "'title'", 'max_length': '6', 'blank': 'True'})
},
u'content.wikicategory': {
'Meta': {'object_name': 'WikiCategory', '_ormbases': [u'content.Category']},
u'category_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['content.Category']", 'unique': 'True', 'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'identifier_type': ('django.db.models.fields.CharField', [], {'default': "'title'", 'max_length': '6', 'blank': 'True'}),
'wiki_type': ('django.db.models.fields.CharField', [], {'default': "'14'", 'max_length': '3'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['content'] | agpl-3.0 |
LandRegistry/casework-frontend-alpha | migrations/versions/4a0b77571480_.py | 1 | 1499 | """empty message
Revision ID: 4a0b77571480
Revises: None
Create Date: 2014-07-28 14:50:21.815680
"""
# revision identifiers, used by Alembic.
revision = '4a0b77571480'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('confirmed_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('role',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('roles_users',
sa.Column('users_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['role.id'], ),
sa.ForeignKeyConstraint(['users_id'], ['users.id'], )
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('roles_users')
op.drop_table('role')
op.drop_table('users')
### end Alembic commands ###
| mit |
stackforge/senlin | senlin/tests/unit/profiles/test_nova_server.py | 1 | 82391 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import mock
from oslo_utils import encodeutils
import six
from senlin.common import exception as exc
from senlin.objects import node as node_ob
from senlin.profiles import base as profiles_base
from senlin.profiles.os.nova import server
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
class TestNovaServerBasic(base.SenlinTestCase):
def setUp(self):
super(TestNovaServerBasic, self).setUp()
self.context = utils.dummy_context()
self.spec = {
'type': 'os.nova.server',
'version': '1.0',
'properties': {
'context': {},
'admin_pass': 'adminpass',
'auto_disk_config': True,
'availability_zone': 'FAKE_AZ',
'config_drive': False,
'flavor': 'FLAV',
'image': 'FAKE_IMAGE',
'key_name': 'FAKE_KEYNAME',
"metadata": {"meta var": "meta val"},
'name': 'FAKE_SERVER_NAME',
'networks': [{
'fixed_ip': 'FAKE_IP',
'network': 'FAKE_NET',
'floating_network': 'FAKE_PUBLIC_NET',
}],
'personality': [{
'path': '/etc/motd',
'contents': 'foo',
}],
'scheduler_hints': {
'same_host': 'HOST_ID',
},
'security_groups': ['HIGH_SECURITY_GROUP'],
'user_data': 'FAKE_USER_DATA',
}
}
def test_init(self):
profile = server.ServerProfile('t', self.spec)
self.assertIsNone(profile.server_id)
def test_build_metadata(self):
obj = mock.Mock(id='NODE_ID', cluster_id='')
profile = server.ServerProfile('t', self.spec)
res = profile._build_metadata(obj, None)
self.assertEqual({'cluster_node_id': 'NODE_ID'}, res)
def test_build_metadata_with_inputs(self):
obj = mock.Mock(id='NODE_ID', cluster_id='')
profile = server.ServerProfile('t', self.spec)
res = profile._build_metadata(obj, {'foo': 'bar'})
self.assertEqual({'cluster_node_id': 'NODE_ID', 'foo': 'bar'}, res)
def test_build_metadata_for_cluster_node(self):
obj = mock.Mock(id='NODE_ID', cluster_id='CLUSTER_ID', index=123)
profile = server.ServerProfile('t', self.spec)
res = profile._build_metadata(obj, None)
self.assertEqual(
{
'cluster_id': 'CLUSTER_ID',
'cluster_node_id': 'NODE_ID',
'cluster_node_index': '123'
},
res
)
def _stubout_profile(self, profile, mock_image=False, mock_flavor=False,
mock_keypair=False, mock_net=False):
if mock_image:
image = mock.Mock(id='FAKE_IMAGE_ID')
self.patchobject(profile, '_validate_image', return_value=image)
if mock_flavor:
flavor = mock.Mock(id='FAKE_FLAVOR_ID')
self.patchobject(profile, '_validate_flavor', return_value=flavor)
if mock_keypair:
keypair = mock.Mock()
keypair.name = 'FAKE_KEYNAME'
self.patchobject(profile, '_validate_keypair',
return_value=keypair)
if mock_net:
fake_net = {
'fixed_ip': 'FAKE_IP',
'port': 'FAKE_PORT',
'uuid': 'FAKE_NETWORK_ID',
'floating_network': 'FAKE_PUBLIC_NET_ID',
}
self.patchobject(profile, '_validate_network',
return_value=fake_net)
fake_ports = [{
'id': 'FAKE_PORT'
}]
self.patchobject(profile, '_create_ports_from_properties',
return_value=fake_ports)
def test_do_create(self):
cc = mock.Mock()
nc = mock.Mock()
profile = server.ServerProfile('t', self.spec)
profile._computeclient = cc
profile._networkclient = nc
self._stubout_profile(profile, mock_image=True, mock_flavor=True,
mock_keypair=True, mock_net=True)
mock_zone_info = self.patchobject(profile, '_update_zone_info')
node_obj = mock.Mock(id='FAKE_NODE_ID', index=123,
cluster_id='FAKE_CLUSTER_ID',
data={
'placement': {
'zone': 'AZ1',
'servergroup': 'SERVER_GROUP_1'
}
})
node_obj.name = 'TEST_SERVER'
fake_server = mock.Mock(id='FAKE_ID')
cc.server_create.return_value = fake_server
cc.server_get.return_value = fake_server
# do it
server_id = profile.do_create(node_obj)
# assertion
attrs = dict(
adminPass='adminpass',
availability_zone='AZ1',
config_drive=False,
flavorRef='FAKE_FLAVOR_ID',
imageRef='FAKE_IMAGE_ID',
key_name='FAKE_KEYNAME',
metadata={
'cluster_id': 'FAKE_CLUSTER_ID',
'cluster_node_id': 'FAKE_NODE_ID',
'cluster_node_index': '123',
'meta var': 'meta val'
},
name='FAKE_SERVER_NAME',
networks=[{
'port': 'FAKE_PORT',
}],
personality=[{
'path': '/etc/motd',
'contents': 'foo'
}],
scheduler_hints={
'same_host': 'HOST_ID',
'group': 'SERVER_GROUP_1',
},
security_groups=[{'name': 'HIGH_SECURITY_GROUP'}],
user_data='FAKE_USER_DATA',
)
ud = encodeutils.safe_encode('FAKE_USER_DATA')
attrs['user_data'] = encodeutils.safe_decode(base64.b64encode(ud))
attrs['OS-DCF:diskConfig'] = 'AUTO'
cc.server_create.assert_called_once_with(**attrs)
cc.server_get.assert_called_once_with('FAKE_ID')
mock_zone_info.assert_called_once_with(node_obj, fake_server)
self.assertEqual('FAKE_ID', server_id)
def test_do_create_invalid_image(self):
profile = server.ServerProfile('s2', self.spec)
err = exc.EResourceCreation(type='server', message='boom')
mock_image = self.patchobject(profile, '_validate_image',
side_effect=err)
node_obj = mock.Mock()
self.assertRaises(exc.EResourceCreation, profile.do_create, node_obj)
mock_image.assert_called_once_with(node_obj, 'FAKE_IMAGE', 'create')
def test_do_create_bdm_invalid_image(self):
cc = mock.Mock()
nc = mock.Mock()
node_obj = mock.Mock(id='FAKE_NODE_ID', data={}, index=123,
cluster_id='FAKE_CLUSTER_ID')
bdm_v2 = [
{
'volume_size': 1,
'uuid': '6ce0be68',
'source_type': 'image',
'destination_type': 'volume',
'boot_index': 0,
},
]
spec = {
'type': 'os.nova.server',
'version': '1.0',
'properties': {
'flavor': 'FLAV',
'name': 'FAKE_SERVER_NAME',
'security_groups': ['HIGH_SECURITY_GROUP'],
'block_device_mapping_v2': bdm_v2,
}
}
profile = server.ServerProfile('s2', spec)
profile._computeclient = cc
profile._networkclient = nc
self._stubout_profile(profile, mock_image=True, mock_flavor=True,
mock_keypair=True)
err = exc.EResourceCreation(type='server', message='FOO')
mock_volume = self.patchobject(profile, '_resolve_bdm',
side_effect=err)
self.assertRaises(exc.EResourceCreation,
profile.do_create,
node_obj)
expected_volume = [{
'guest_format': None,
'boot_index': 0,
'uuid': '6ce0be68',
'volume_size': 1,
'device_name': None,
'disk_bus': None,
'source_type': 'image',
'device_type': None,
'destination_type': 'volume',
'delete_on_termination': None
}]
mock_volume.assert_called_once_with(
node_obj, expected_volume, 'create')
def test_do_create_bdm_invalid_volume(self):
cc = mock.Mock()
nc = mock.Mock()
node_obj = mock.Mock(id='FAKE_NODE_ID', data={}, index=123,
cluster_id='FAKE_CLUSTER_ID')
bdm_v2 = [
{
'volume_size': 1,
'uuid': '6ce0be68',
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 0,
},
]
spec = {
'type': 'os.nova.server',
'version': '1.0',
'properties': {
'flavor': 'FLAV',
'name': 'FAKE_SERVER_NAME',
'security_groups': ['HIGH_SECURITY_GROUP'],
'block_device_mapping_v2': bdm_v2,
}
}
profile = server.ServerProfile('s2', spec)
profile._computeclient = cc
profile._networkclient = nc
self._stubout_profile(profile, mock_image=True, mock_flavor=True,
mock_keypair=True)
err = exc.EResourceCreation(type='server', message='FOO')
mock_volume = self.patchobject(profile, '_resolve_bdm',
side_effect=err)
self.assertRaises(exc.EResourceCreation,
profile.do_create,
node_obj)
expected_volume = [{
'guest_format': None,
'boot_index': 0,
'uuid': '6ce0be68',
'volume_size': 1,
'device_name': None,
'disk_bus': None,
'source_type': 'volume',
'device_type': None,
'destination_type': 'volume',
'delete_on_termination': None
}]
mock_volume.assert_called_once_with(
node_obj, expected_volume, 'create')
def test_do_create_invalid_flavor(self):
profile = server.ServerProfile('s2', self.spec)
self._stubout_profile(profile, mock_image=True)
err = exc.EResourceCreation(type='server', message='boom')
mock_flavor = self.patchobject(profile, '_validate_flavor',
side_effect=err)
node_obj = mock.Mock()
self.assertRaises(exc.EResourceCreation, profile.do_create, node_obj)
mock_flavor.assert_called_once_with(node_obj, 'FLAV', 'create')
def test_do_create_invalid_keypair(self):
profile = server.ServerProfile('s2', self.spec)
self._stubout_profile(profile, mock_image=True, mock_flavor=True)
err = exc.EResourceCreation(type='server', message='boom')
mock_kp = self.patchobject(profile, '_validate_keypair',
side_effect=err)
node_obj = mock.Mock()
self.assertRaises(exc.EResourceCreation, profile.do_create, node_obj)
mock_kp.assert_called_once_with(node_obj, 'FAKE_KEYNAME', 'create')
def test_do_create_invalid_network(self):
cc = mock.Mock()
nc = mock.Mock()
node_obj = mock.Mock(id='FAKE_NODE_ID', data={}, index=123,
cluster_id='FAKE_CLUSTER_ID')
spec = {
'type': 'os.nova.server',
'version': '1.0',
'properties': {
'flavor': 'FLAV',
'image': 'FAKE_IMAGE',
'key_name': 'FAKE_KEYNAME',
'name': 'FAKE_SERVER_NAME',
'networks': [{
'network': 'FAKE_NET'
}]
}
}
profile = server.ServerProfile('s2', spec)
profile._computeclient = cc
profile._networkclient = nc
self._stubout_profile(profile, mock_image=True, mock_flavor=True,
mock_keypair=True)
err = exc.EResourceCreation(type='server', message='FOO')
mock_net = self.patchobject(profile, '_validate_network',
side_effect=err)
self.assertRaises(exc.EResourceCreation,
profile.do_create,
node_obj)
expect_params = {
'floating_network': None,
'network': 'FAKE_NET',
'fixed_ip': None,
'floating_ip': None,
'port': None,
'security_groups': None
}
mock_net.assert_called_once_with(
node_obj, expect_params, 'create')
def test_do_create_server_attrs_not_defined(self):
cc = mock.Mock()
nc = mock.Mock()
node_obj = mock.Mock(id='FAKE_NODE_ID', data={}, index=123,
cluster_id='FAKE_CLUSTER_ID')
# Assume image/scheduler_hints/user_data were not defined in spec file
spec = {
'type': 'os.nova.server',
'version': '1.0',
'properties': {
'flavor': 'FLAV',
'name': 'FAKE_SERVER_NAME',
'security_groups': ['HIGH_SECURITY_GROUP'],
}
}
profile = server.ServerProfile('t', spec)
profile._computeclient = cc
profile._networkclient = nc
self._stubout_profile(profile, mock_image=True, mock_flavor=True,
mock_keypair=True, mock_net=True)
mock_zone_info = self.patchobject(profile, '_update_zone_info')
fake_server = mock.Mock(id='FAKE_ID')
cc.server_create.return_value = fake_server
cc.server_get.return_value = fake_server
# do it
server_id = profile.do_create(node_obj)
# assertions
attrs = {
'OS-DCF:diskConfig': 'AUTO',
'flavorRef': 'FAKE_FLAVOR_ID',
'name': 'FAKE_SERVER_NAME',
'metadata': {
'cluster_id': 'FAKE_CLUSTER_ID',
'cluster_node_id': 'FAKE_NODE_ID',
'cluster_node_index': '123',
},
'security_groups': [{'name': 'HIGH_SECURITY_GROUP'}]
}
cc.server_create.assert_called_once_with(**attrs)
cc.server_get.assert_called_once_with('FAKE_ID')
mock_zone_info.assert_called_once_with(node_obj, fake_server)
self.assertEqual('FAKE_ID', server_id)
def test_do_create_obj_name_cluster_id_is_none(self):
cc = mock.Mock()
nc = mock.Mock()
spec = {
'type': 'os.nova.server',
'version': '1.0',
'properties': {
'flavor': 'FLAV',
'name': 'FAKE_SERVER_NAME',
'security_groups': ['HIGH_SECURITY_GROUP'],
}
}
profile = server.ServerProfile('t', spec)
profile._computeclient = cc
profile._networkclient = nc
self._stubout_profile(profile, mock_image=True, mock_flavor=True,
mock_keypair=True, mock_net=True)
mock_zone_info = self.patchobject(profile, '_update_zone_info')
node_obj = mock.Mock(id='FAKE_NODE_ID', cluster_id='', data={},
index=None)
node_obj.name = None
fake_server = mock.Mock(id='FAKE_ID')
cc.server_create.return_value = fake_server
cc.server_get.return_value = fake_server
server_id = profile.do_create(node_obj)
attrs = {
'OS-DCF:diskConfig': 'AUTO',
'flavorRef': 'FAKE_FLAVOR_ID',
'name': 'FAKE_SERVER_NAME',
'metadata': {'cluster_node_id': 'FAKE_NODE_ID'},
'security_groups': [{'name': 'HIGH_SECURITY_GROUP'}]
}
cc.server_create.assert_called_once_with(**attrs)
cc.server_get.assert_called_once_with('FAKE_ID')
mock_zone_info.assert_called_once_with(node_obj, fake_server)
self.assertEqual('FAKE_ID', server_id)
def test_do_create_name_property_is_not_defined(self):
cc = mock.Mock()
nc = mock.Mock()
spec = {
'type': 'os.nova.server',
'version': '1.0',
'properties': {
'flavor': 'FLAV',
'security_groups': ['HIGH_SECURITY_GROUP'],
}
}
profile = server.ServerProfile('t', spec)
profile._computeclient = cc
profile._networkclient = nc
self._stubout_profile(profile, mock_image=True, mock_flavor=True,
mock_keypair=True, mock_net=True)
mock_zone_info = self.patchobject(profile, '_update_zone_info')
node_obj = mock.Mock(id='NODE_ID', cluster_id='', index=-1, data={})
node_obj.name = 'TEST-SERVER'
fake_server = mock.Mock(id='FAKE_ID')
cc.server_create.return_value = fake_server
cc.server_get.return_value = fake_server
# do it
server_id = profile.do_create(node_obj)
# assertions
attrs = {
'OS-DCF:diskConfig': 'AUTO',
'flavorRef': 'FAKE_FLAVOR_ID',
'name': 'TEST-SERVER',
'metadata': {'cluster_node_id': 'NODE_ID'},
'security_groups': [{'name': 'HIGH_SECURITY_GROUP'}]
}
cc.server_create.assert_called_once_with(**attrs)
cc.server_get.assert_called_once_with('FAKE_ID')
mock_zone_info.assert_called_once_with(node_obj, fake_server)
self.assertEqual('FAKE_ID', server_id)
def test_do_create_bdm_v2(self):
cc = mock.Mock()
nc = mock.Mock()
bdm_v2 = [
{
'volume_size': 1,
'uuid': '6ce0be68',
'source_type': 'image',
'destination_type': 'volume',
'boot_index': 0,
},
{
'volume_size': 2,
'source_type': 'blank',
'destination_type': 'volume',
}
]
spec = {
'type': 'os.nova.server',
'version': '1.0',
'properties': {
'flavor': 'FLAV',
'name': 'FAKE_SERVER_NAME',
'security_groups': ['HIGH_SECURITY_GROUP'],
'block_device_mapping_v2': bdm_v2,
}
}
profile = server.ServerProfile('t', spec)
profile._computeclient = cc
profile._networkclient = nc
self._stubout_profile(profile, mock_image=True, mock_flavor=True,
mock_keypair=True, mock_net=True)
mock_zone_info = self.patchobject(profile, '_update_zone_info')
node_obj = mock.Mock(id='NODE_ID', cluster_id='', index=-1, data={})
node_obj.name = None
fake_server = mock.Mock(id='FAKE_ID')
cc.server_create.return_value = fake_server
cc.server_get.return_value = fake_server
# do it
server_id = profile.do_create(node_obj)
# assertions
expected_volume = {
'guest_format': None,
'boot_index': 0,
'uuid': '6ce0be68',
'volume_size': 1,
'device_name': None,
'disk_bus': None,
'source_type': 'image',
'device_type': None,
'destination_type': 'volume',
'delete_on_termination': None
}
self.assertEqual(expected_volume,
profile.properties['block_device_mapping_v2'][0])
attrs = {
'OS-DCF:diskConfig': 'AUTO',
'flavorRef': 'FAKE_FLAVOR_ID',
'name': 'FAKE_SERVER_NAME',
'metadata': {'cluster_node_id': 'NODE_ID'},
'security_groups': [{'name': 'HIGH_SECURITY_GROUP'}],
'block_device_mapping_v2': bdm_v2
}
cc.server_create.assert_called_once_with(**attrs)
cc.server_get.assert_called_once_with('FAKE_ID')
profile._validate_image.assert_called_once_with(
node_obj, expected_volume['uuid'], 'create')
mock_zone_info.assert_called_once_with(node_obj, fake_server)
self.assertEqual('FAKE_ID', server_id)
@mock.patch.object(node_ob.Node, 'update')
def test_do_create_wait_server_timeout(self, mock_node_obj):
cc = mock.Mock()
nc = mock.Mock()
profile = server.ServerProfile('t', self.spec)
profile._computeclient = cc
profile._networkclient = nc
self._stubout_profile(profile, mock_image=True, mock_flavor=True,
mock_keypair=True, mock_net=True)
node_obj = mock.Mock(id='FAKE_NODE_ID', index=123,
cluster_id='FAKE_CLUSTER_ID',
data={
'placement': {
'zone': 'AZ1',
'servergroup': 'SERVER_GROUP_1'
}
})
node_obj.name = 'TEST_SERVER'
server_obj = mock.Mock(id='FAKE_ID')
cc.server_create.return_value = server_obj
err = exc.InternalError(code=500, message='TIMEOUT')
cc.wait_for_server.side_effect = err
ex = self.assertRaises(exc.EResourceCreation, profile.do_create,
node_obj)
self.assertEqual('FAKE_ID', ex.resource_id)
self.assertEqual('Failed in creating server: TIMEOUT.',
six.text_type(ex))
mock_node_obj.assert_called_once_with(mock.ANY, node_obj.id,
{'data': node_obj.data})
cc.wait_for_server.assert_called_once_with('FAKE_ID')
@mock.patch.object(node_ob.Node, 'update')
def test_do_create_failed(self, mock_node_obj):
cc = mock.Mock()
profile = server.ServerProfile('t', self.spec)
profile._computeclient = cc
self._stubout_profile(profile, mock_image=True, mock_flavor=True,
mock_keypair=True, mock_net=True)
mock_zone_info = self.patchobject(profile, '_update_zone_info')
node_obj = mock.Mock(id='FAKE_NODE_ID', index=123,
cluster_id='FAKE_CLUSTER_ID',
data={
'placement': {
'zone': 'AZ1',
'servergroup': 'SERVER_GROUP_1'
}
})
node_obj.name = 'TEST_SERVER'
cc.server_create.side_effect = exc.InternalError(
code=500, message="creation failed.")
# do it
ex = self.assertRaises(exc.EResourceCreation, profile.do_create,
node_obj)
# assertions
mock_node_obj.assert_called_once_with(mock.ANY, node_obj.id,
{'data': node_obj.data})
self.assertEqual('Failed in creating server: creation failed.',
six.text_type(ex))
self.assertIsNone(ex.resource_id)
self.assertEqual(0, cc.wait_for_server.call_count)
self.assertEqual(0, mock_zone_info.call_count)
def test_do_delete_ok(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
cc.server_delete.return_value = None
profile._computeclient = cc
test_server = mock.Mock(physical_id='FAKE_ID')
test_server.data = {}
res = profile.do_delete(test_server)
self.assertTrue(res)
cc.server_delete.assert_called_once_with('FAKE_ID', True)
cc.wait_for_server_delete.assert_called_once_with('FAKE_ID', None)
def test_do_delete_no_physical_id(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
profile._computeclient = cc
test_server = mock.Mock(physical_id=None)
test_server.data = {}
# do it
res = profile.do_delete(test_server)
# assertions
self.assertTrue(res)
self.assertFalse(cc.server_delete.called)
self.assertFalse(cc.wait_for_server_delete.called)
@mock.patch.object(node_ob.Node, 'update')
def test_do_delete_no_physical_id_with_internal_ports(self, mock_node_obj):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
nc = mock.Mock()
nc.port_delete.return_value = None
nc.floatingip_delete.return_value = None
profile._computeclient = cc
profile._networkclient = nc
test_server = mock.Mock(physical_id=None)
test_server.data = {'internal_ports': [{
'floating': {
'remove': True,
'id': 'FAKE_FLOATING_ID',
},
'id': 'FAKE_PORT_ID',
'remove': True
}]}
# do it
res = profile.do_delete(test_server)
# assertions
self.assertTrue(res)
mock_node_obj.assert_called_once_with(
mock.ANY, test_server.id, {'data': {'internal_ports': []}})
self.assertFalse(cc.server_delete.called)
self.assertFalse(cc.wait_for_server_delete.called)
@mock.patch.object(node_ob.Node, 'update')
def test_do_delete_ports_ok(self, mock_node_obj):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
cc.server_delete.return_value = None
nc = mock.Mock()
nc.port_delete.return_value = None
nc.floatingip_delete.return_value = None
profile._computeclient = cc
profile._networkclient = nc
test_server = mock.Mock(physical_id='FAKE_ID')
test_server.Node = mock.Mock()
test_server.data = {'internal_ports': [{
'floating': {
'remove': True,
'id': 'FAKE_FLOATING_ID',
},
'id': 'FAKE_PORT_ID',
'remove': True
}]}
res = profile.do_delete(test_server)
self.assertTrue(res)
mock_node_obj.assert_called_once_with(
mock.ANY, test_server.id, {'data': {'internal_ports': []}})
nc.floatingip_delete.assert_called_once_with('FAKE_FLOATING_ID')
nc.port_delete.assert_called_once_with('FAKE_PORT_ID')
cc.server_delete.assert_called_once_with('FAKE_ID', True)
cc.wait_for_server_delete.assert_called_once_with('FAKE_ID', None)
def test_do_delete_ignore_missing_force(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
profile._computeclient = cc
test_server = mock.Mock(physical_id='FAKE_ID')
test_server.data = {}
res = profile.do_delete(test_server, ignore_missing=False, force=True)
self.assertTrue(res)
cc.server_force_delete.assert_called_once_with('FAKE_ID', False)
cc.wait_for_server_delete.assert_called_once_with('FAKE_ID', None)
@mock.patch.object(node_ob.Node, 'update')
def test_do_delete_with_delete_failure(self, mock_node_obj):
cc = mock.Mock()
nc = mock.Mock()
profile = server.ServerProfile('t', self.spec)
profile._computeclient = cc
profile._networkclient = nc
err = exc.InternalError(code=500, message='Nova Error')
cc.server_delete.side_effect = err
obj = mock.Mock(physical_id='FAKE_ID')
obj.data = {'internal_ports': [{
'floating': {
'remove': True,
'id': 'FAKE_FLOATING_ID',
},
'id': 'FAKE_PORT_ID',
'remove': True
}]}
# do it
ex = self.assertRaises(exc.EResourceDeletion,
profile.do_delete, obj)
mock_node_obj.assert_called_once_with(mock.ANY, obj.id,
{'data': obj.data})
self.assertEqual("Failed in deleting server 'FAKE_ID': "
"Nova Error.", six.text_type(ex))
cc.server_delete.assert_called_once_with('FAKE_ID', True)
self.assertEqual(0, cc.wait_for_server_delete.call_count)
nc.port_delete.assert_called_once_with('FAKE_PORT_ID')
@mock.patch.object(node_ob.Node, 'update')
def test_do_delete_with_force_delete_failure(self, mock_node_obj):
cc = mock.Mock()
nc = mock.Mock()
profile = server.ServerProfile('t', self.spec)
profile._computeclient = cc
profile._networkclient = nc
err = exc.InternalError(code=500, message='Nova Error')
cc.server_force_delete.side_effect = err
obj = mock.Mock(physical_id='FAKE_ID')
obj.data = {}
# do it
ex = self.assertRaises(exc.EResourceDeletion,
profile.do_delete, obj, force=True)
mock_node_obj.assert_not_called()
self.assertEqual("Failed in deleting server 'FAKE_ID': "
"Nova Error.", six.text_type(ex))
cc.server_force_delete.assert_called_once_with('FAKE_ID', True)
self.assertEqual(0, cc.wait_for_server_delete.call_count)
nc.port_delete.assert_not_called()
@mock.patch.object(node_ob.Node, 'update')
def test_do_delete_wait_for_server_timeout(self, mock_node_obj):
cc = mock.Mock()
nc = mock.Mock()
profile = server.ServerProfile('t', self.spec)
profile._computeclient = cc
profile._networkclient = nc
err = exc.InternalError(code=500, message='TIMEOUT')
cc.wait_for_server_delete.side_effect = err
obj = mock.Mock(physical_id='FAKE_ID')
obj.data = {'internal_ports': [{
'floating': {
'remove': True,
'id': 'FAKE_FLOATING_ID',
},
'id': 'FAKE_PORT_ID',
'remove': True
}]}
# do it
ex = self.assertRaises(exc.EResourceDeletion,
profile.do_delete, obj, timeout=20)
mock_node_obj.assert_called_once_with(mock.ANY, obj.id,
{'data': obj.data})
self.assertEqual("Failed in deleting server 'FAKE_ID': TIMEOUT.",
six.text_type(ex))
cc.server_delete.assert_called_once_with('FAKE_ID', True)
cc.wait_for_server_delete.assert_called_once_with('FAKE_ID', 20)
nc.port_delete.assert_called_once_with('FAKE_PORT_ID')
@mock.patch.object(node_ob.Node, 'update')
def test_do_delete_wait_for_server_timeout_delete_ports(
self, mock_node_obj):
cc = mock.Mock()
nc = mock.Mock()
nc.port_delete.return_value = None
nc.floatingip_delete.return_value = None
profile = server.ServerProfile('t', self.spec)
profile._computeclient = cc
profile._networkclient = nc
test_server = mock.Mock(physical_id='FAKE_ID')
test_server.Node = mock.Mock()
test_server.data = {'internal_ports': [{
'floating': {
'remove': True,
'id': 'FAKE_FLOATING_ID',
},
'id': 'FAKE_PORT_ID',
'remove': True
}]}
err = exc.InternalError(code=500, message='TIMEOUT')
cc.wait_for_server_delete.side_effect = err
# do it
ex = self.assertRaises(exc.EResourceDeletion,
profile.do_delete, test_server, timeout=20)
self.assertEqual("Failed in deleting server 'FAKE_ID': TIMEOUT.",
six.text_type(ex))
mock_node_obj.assert_called_once_with(
mock.ANY, test_server.id, {'data': {'internal_ports': []}})
cc.server_delete.assert_called_once_with('FAKE_ID', True)
cc.wait_for_server_delete.assert_called_once_with('FAKE_ID', 20)
nc.port_delete.assert_called_once_with('FAKE_PORT_ID')
@mock.patch.object(node_ob.Node, 'update')
def test_do_delete_wait_for_server_timeout_no_internal_ports(
self, mock_node_obj):
cc = mock.Mock()
nc = mock.Mock()
nc.port_delete.return_value = None
nc.floatingip_delete.return_value = None
profile = server.ServerProfile('t', self.spec)
profile._computeclient = cc
profile._networkclient = nc
test_server = mock.Mock(physical_id='FAKE_ID')
test_server.Node = mock.Mock()
test_server.data = {}
err = exc.InternalError(code=500, message='TIMEOUT')
cc.wait_for_server_delete.side_effect = err
# do it
ex = self.assertRaises(exc.EResourceDeletion,
profile.do_delete, test_server, timeout=20)
self.assertEqual("Failed in deleting server 'FAKE_ID': TIMEOUT.",
six.text_type(ex))
mock_node_obj.assert_not_called()
cc.server_delete.assert_called_once_with('FAKE_ID', True)
cc.wait_for_server_delete.assert_called_once_with('FAKE_ID', 20)
nc.port_delete.assert_not_called()
def test_do_get_details(self):
cc = mock.Mock()
profile = server.ServerProfile('t', self.spec)
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
# Test normal path
nova_server = mock.Mock()
nova_server.to_dict.return_value = {
'OS-DCF:diskConfig': 'MANUAL',
'OS-EXT-AZ:availability_zone': 'nova',
'OS-EXT-STS:power_state': 1,
'OS-EXT-STS:task_state': None,
'OS-EXT-STS:vm_state': 'active',
'OS-SRV-USG:launched_at': 'TIMESTAMP1',
'OS-SRV-USG:terminated_at': None,
'accessIPv4': 'FAKE_IPV4',
'accessIPv6': 'FAKE_IPV6',
'addresses': {
'private': [{
'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:5e:00:81',
'version': 4,
'addr': '10.0.0.3',
'OS-EXT-IPS:type': 'fixed'
}]
},
'config_drive': True,
'created': 'CREATED_TIMESTAMP',
'flavor': {
'id': '1',
'links': [{
'href': 'http://url_flavor',
'rel': 'bookmark'
}]
},
'hostId': 'FAKE_HOST_ID',
'id': 'FAKE_ID',
'image': {
'id': 'FAKE_IMAGE',
'links': [{
'href': 'http://url_image',
'rel': 'bookmark'
}],
},
'attached_volumes': [{
'id': 'FAKE_VOLUME',
}],
'key_name': 'FAKE_KEY',
'links': [{
'href': 'http://url1',
'rel': 'self'
}, {
'href': 'http://url2',
'rel': 'bookmark'
}],
'metadata': {},
'name': 'FAKE_NAME',
'progress': 0,
'security_groups': [{'name': 'default'}],
'status': 'FAKE_STATUS',
'tenant_id': 'FAKE_TENANT',
'updated': 'UPDATE_TIMESTAMP',
'user_id': 'FAKE_USER_ID',
}
cc.server_get.return_value = nova_server
res = profile.do_get_details(node_obj)
expected = {
'OS-DCF:diskConfig': 'MANUAL',
'OS-EXT-AZ:availability_zone': 'nova',
'OS-EXT-STS:power_state': 1,
'OS-EXT-STS:task_state': '-',
'OS-EXT-STS:vm_state': 'active',
'OS-SRV-USG:launched_at': 'TIMESTAMP1',
'OS-SRV-USG:terminated_at': '-',
'accessIPv4': 'FAKE_IPV4',
'accessIPv6': 'FAKE_IPV6',
'config_drive': True,
'created': 'CREATED_TIMESTAMP',
'flavor': '1',
'hostId': 'FAKE_HOST_ID',
'id': 'FAKE_ID',
'image': 'FAKE_IMAGE',
'attached_volumes': ['FAKE_VOLUME'],
'key_name': 'FAKE_KEY',
'metadata': {},
'name': 'FAKE_NAME',
'addresses': {
'private': [{
'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:5e:00:81',
'version': 4,
'addr': '10.0.0.3',
'OS-EXT-IPS:type': 'fixed'
}]
},
'progress': 0,
'security_groups': 'default',
'updated': 'UPDATE_TIMESTAMP',
'status': 'FAKE_STATUS',
}
self.assertEqual(expected, res)
cc.server_get.assert_called_once_with('FAKE_ID')
def test_do_get_details_with_no_network_or_sg(self):
cc = mock.Mock()
profile = server.ServerProfile('t', self.spec)
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
# Test normal path
nova_server = mock.Mock()
nova_server.to_dict.return_value = {
'addresses': {},
'flavor': {
'id': 'FAKE_FLAVOR',
},
'id': 'FAKE_ID',
'image': {
'id': 'FAKE_IMAGE',
},
'attached_volumes': [{
'id': 'FAKE_VOLUME',
}],
'security_groups': [],
}
cc.server_get.return_value = nova_server
res = profile.do_get_details(node_obj)
expected = {
'flavor': 'FAKE_FLAVOR',
'id': 'FAKE_ID',
'image': 'FAKE_IMAGE',
'attached_volumes': ['FAKE_VOLUME'],
'addresses': {},
'security_groups': '',
}
self.assertEqual(expected, res)
cc.server_get.assert_called_once_with('FAKE_ID')
def test_do_get_details_image_no_id_key(self):
cc = mock.Mock()
profile = server.ServerProfile('t', self.spec)
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
# Test normal path
nova_server = mock.Mock()
nova_server.to_dict.return_value = {
'addresses': {
'private': [{
'version': 4,
'addr': '10.0.0.3',
}]
},
'flavor': {
'id': 'FAKE_FLAVOR',
},
'id': 'FAKE_ID',
'image': {},
'attached_volumes': [{
'id': 'FAKE_VOLUME',
}],
'security_groups': [{'name': 'default'}],
}
cc.server_get.return_value = nova_server
res = profile.do_get_details(node_obj)
expected = {
'flavor': 'FAKE_FLAVOR',
'id': 'FAKE_ID',
'image': {},
'attached_volumes': ['FAKE_VOLUME'],
'addresses': {
'private': [{
'version': 4,
'addr': '10.0.0.3',
}]
},
'security_groups': 'default',
}
self.assertEqual(expected, res)
cc.server_get.assert_called_once_with('FAKE_ID')
def test_do_get_details_bdm_no_id_key(self):
cc = mock.Mock()
profile = server.ServerProfile('t', self.spec)
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
# Test normal path
nova_server = mock.Mock()
nova_server.to_dict.return_value = {
'addresses': {
'private': [{
'version': 4,
'addr': '10.0.0.3',
}]
},
'flavor': {
'id': 'FAKE_FLAVOR',
},
'id': 'FAKE_ID',
'image': {},
'attached_volumes': [],
'security_groups': [{'name': 'default'}],
}
cc.server_get.return_value = nova_server
res = profile.do_get_details(node_obj)
expected = {
'flavor': 'FAKE_FLAVOR',
'id': 'FAKE_ID',
'image': {},
'attached_volumes': [],
'addresses': {
'private': [{
'version': 4,
'addr': '10.0.0.3',
}]
},
'security_groups': 'default',
}
self.assertEqual(expected, res)
cc.server_get.assert_called_once_with('FAKE_ID')
def test_do_get_details_with_more_network_or_sg(self):
cc = mock.Mock()
profile = server.ServerProfile('t', self.spec)
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
# Test normal path
nova_server = mock.Mock()
data = {
'addresses': {
'private': [{
'version': 4,
'addr': '10.0.0.3',
}, {
'version': 4,
'addr': '192.168.43.3'
}],
'public': [{
'version': 4,
'addr': '172.16.5.3',
}]
},
'flavor': {
'id': 'FAKE_FLAVOR',
},
'id': 'FAKE_ID',
'image': {
'id': 'FAKE_IMAGE',
},
'attached_volumes': [{
'id': 'FAKE_VOLUME',
}],
'security_groups': [{
'name': 'default',
}, {
'name': 'webserver',
}],
}
nova_server.to_dict.return_value = data
cc.server_get.return_value = nova_server
res = profile.do_get_details(node_obj)
self.assertEqual(set(data['addresses']), set(res['addresses']))
self.assertEqual(set(['default', 'webserver']),
set(res['security_groups']))
cc.server_get.assert_called_once_with('FAKE_ID')
def test_do_get_details_no_physical_id(self):
# Test path for server not created
profile = server.ServerProfile('t', self.spec)
node_obj = mock.Mock(physical_id='')
self.assertEqual({}, profile.do_get_details(node_obj))
node_obj.physical_id = None
self.assertEqual({}, profile.do_get_details(node_obj))
def test_do_get_details_server_not_found(self):
# Test path for server not created
cc = mock.Mock()
err = exc.InternalError(code=404, message='No Server found for ID')
cc.server_get.side_effect = err
profile = server.ServerProfile('t', self.spec)
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
res = profile.do_get_details(node_obj)
expected = {
'Error': {
'message': 'No Server found for ID',
'code': 404
}
}
self.assertEqual(expected, res)
cc.server_get.assert_called_once_with('FAKE_ID')
def test_do_adopt(self):
profile = server.ServerProfile('t', self.spec)
x_server = mock.Mock(
disk_config="",
availability_zone="AZ01",
block_device_mapping={"foo": "bar"},
has_config_drive=False,
flavor={"id": "FLAVOR_ID"},
image={"id": "IMAGE_ID"},
key_name="FAKE_KEY",
metadata={
"mkey": "mvalue",
"cluster_id": "CLUSTER_ID",
"cluster_node_id": "NODE_ID",
"cluster_node_index": 123
},
addresses={
"NET1": [{
"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:09:6f:d8",
"OS-EXT-IPS:type": "fixed",
"addr": "ADDR1_IPv4",
"version": 4
}, {
"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:09:6f:d8",
"OS-EXT-IPS:type": "fixed",
"addr": "ADDR1_IPv6",
"version": 6
}],
"NET2": [{
"OS-EXT-IPS-MAC:mac_addr": "aa:e6:3e:09:6f:db",
"OS-EXT-IPS:type": "fixed",
"addr": "ADDR2_IPv4",
"version": 4
}, {
"OS-EXT-IPS-MAC:mac_addr": "aa:e6:3e:09:6f:db",
"OS-EXT-IPS:type": "fixed",
"addr": "ADDR2_IPv6",
"version": 6
}],
},
security_groups=[{'name': 'GROUP1'}, {'name': 'GROUP2'}]
)
x_server.name = "FAKE_NAME"
cc = mock.Mock()
cc.server_get.return_value = x_server
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
res = profile.do_adopt(node_obj)
self.assertEqual(False, res['auto_disk_config'])
self.assertEqual('AZ01', res['availability_zone'])
self.assertEqual({'foo': 'bar'}, res['block_device_mapping_v2'])
self.assertFalse(res['config_drive'])
self.assertEqual('FLAVOR_ID', res['flavor'])
self.assertEqual('IMAGE_ID', res['image'])
self.assertEqual('FAKE_KEY', res['key_name'])
self.assertEqual({'mkey': 'mvalue'}, res['metadata'])
self.assertEqual(2, len(res['networks']))
self.assertIn({'network': 'NET1'}, res['networks'])
self.assertIn({'network': 'NET2'}, res['networks'])
self.assertIn('GROUP1', res['security_groups'])
self.assertIn('GROUP2', res['security_groups'])
cc.server_get.assert_called_once_with('FAKE_ID')
def test_do_adopt_failed_get(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
err = exc.InternalError(code=404, message='No Server found for ID')
cc.server_get.side_effect = err
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
res = profile.do_adopt(node_obj)
expected = {
'Error': {
'code': 404,
'message': 'No Server found for ID',
}
}
self.assertEqual(expected, res)
cc.server_get.assert_called_once_with('FAKE_ID')
def test_do_adopt_with_overrides(self):
profile = server.ServerProfile('t', self.spec)
x_server = mock.Mock(
disk_config="",
availability_zone="AZ01",
block_device_mapping={"foo": "bar"},
has_config_drive=False,
flavor={"id": "FLAVOR_ID"},
image={"id": "IMAGE_ID"},
key_name="FAKE_KEY",
metadata={
"mkey": "mvalue",
"cluster_id": "CLUSTER_ID",
"cluster_node_id": "NODE_ID",
"cluster_node_index": 123
},
addresses={
"NET1": [{
"OS-EXT-IPS:type": "fixed",
}],
"NET2": [{
"OS-EXT-IPS:type": "fixed",
}],
},
security_groups=[{'name': 'GROUP1'}, {'name': 'GROUP2'}]
)
x_server.name = "FAKE_NAME"
cc = mock.Mock()
cc.server_get.return_value = x_server
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
overrides = {
'networks': [{"network": "NET3"}]
}
res = profile.do_adopt(node_obj, overrides=overrides)
self.assertEqual(False, res['auto_disk_config'])
self.assertEqual('AZ01', res['availability_zone'])
self.assertEqual({'foo': 'bar'}, res['block_device_mapping_v2'])
self.assertFalse(res['config_drive'])
self.assertEqual('FLAVOR_ID', res['flavor'])
self.assertEqual('IMAGE_ID', res['image'])
self.assertEqual('FAKE_KEY', res['key_name'])
self.assertEqual({'mkey': 'mvalue'}, res['metadata'])
self.assertIn({'network': 'NET3'}, res['networks'])
self.assertNotIn({'network': 'NET1'}, res['networks'])
self.assertNotIn({'network': 'NET2'}, res['networks'])
self.assertIn('GROUP1', res['security_groups'])
self.assertIn('GROUP2', res['security_groups'])
cc.server_get.assert_called_once_with('FAKE_ID')
def test_do_join_successful(self):
cc = mock.Mock()
profile = server.ServerProfile('t', self.spec)
metadata = {}
cc.server_metadata_get.return_value = metadata
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID', index='123')
res = profile.do_join(node_obj, 'FAKE_CLUSTER_ID')
self.assertTrue(res)
meta = {'cluster_id': 'FAKE_CLUSTER_ID',
'cluster_node_index': '123'}
cc.server_metadata_update.assert_called_once_with(
'FAKE_ID', meta)
def test_do_join_server_not_created(self):
# Test path where server not specified
profile = server.ServerProfile('t', self.spec)
node_obj = mock.Mock(physical_id=None)
res = profile.do_join(node_obj, 'FAKE_CLUSTER_ID')
self.assertFalse(res)
def test_do_leave_successful(self):
# Test normal path
cc = mock.Mock()
profile = server.ServerProfile('t', self.spec)
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
res = profile.do_leave(node_obj)
self.assertTrue(res)
cc.server_metadata_delete.assert_called_once_with(
'FAKE_ID', ['cluster_id', 'cluster_node_index'])
def test_do_leave_no_physical_id(self):
profile = server.ServerProfile('t', self.spec)
node_obj = mock.Mock(physical_id=None)
res = profile.do_leave(node_obj)
self.assertFalse(res)
def test_do_check(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
cc.server_get.return_value = None
profile._computeclient = cc
test_server = mock.Mock(physical_id='FAKE_ID')
res = profile.do_check(test_server)
cc.server_get.assert_called_once_with('FAKE_ID')
self.assertFalse(res)
return_server = mock.Mock()
return_server.status = 'ACTIVE'
cc.server_get.return_value = return_server
res = profile.do_check(test_server)
cc.server_get.assert_called_with('FAKE_ID')
self.assertTrue(res)
def test_do_check_no_physical_id(self):
obj = mock.Mock(physical_id=None)
profile = server.ServerProfile('t', self.spec)
# do it
res = profile.do_check(obj)
self.assertFalse(res)
def test_do_check_no_server(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
err = exc.InternalError(code=404, message='No Server found')
cc.server_get.side_effect = err
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
ex = self.assertRaises(exc.EServerNotFound,
profile.do_check,
node_obj)
self.assertEqual("Failed in found server 'FAKE_ID': "
"No Server found.",
six.text_type(ex))
cc.server_get.assert_called_once_with('FAKE_ID')
def test_do_healthcheck_active(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
cc.server_get.return_value = mock.Mock(status='ACTIVE')
profile._computeclient = cc
test_server = mock.Mock(physical_id='FAKE_ID')
res = profile.do_healthcheck(test_server)
cc.server_get.assert_called_once_with('FAKE_ID')
self.assertTrue(res)
def test_do_healthcheck_empty_server_obj(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
cc.server_get.return_value = None
profile._computeclient = cc
test_server = mock.Mock(physical_id='FAKE_ID')
res = profile.do_healthcheck(test_server)
cc.server_get.assert_called_once_with('FAKE_ID')
self.assertTrue(res)
def test_do_healthcheck_exception(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
ex = exc.InternalError(code=503, message='Error')
cc.server_get.side_effect = ex
profile._computeclient = cc
test_server = mock.Mock(physical_id='FAKE_ID')
res = profile.do_healthcheck(test_server)
cc.server_get.assert_called_once_with('FAKE_ID')
self.assertTrue(res)
def test_do_healthcheck_error(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
cc.server_get.return_value = mock.Mock(status='ERROR')
profile._computeclient = cc
test_server = mock.Mock(physical_id='FAKE_ID')
res = profile.do_healthcheck(test_server)
cc.server_get.assert_called_once_with('FAKE_ID')
self.assertFalse(res)
def test_do_healthcheck_server_not_found(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
ex = exc.InternalError(code=404, message='No Server found')
cc.server_get.side_effect = ex
profile._computeclient = cc
test_server = mock.Mock(physical_id='FAKE_ID')
res = profile.do_healthcheck(test_server)
cc.server_get.assert_called_once_with('FAKE_ID')
self.assertFalse(res)
@mock.patch.object(server.ServerProfile, 'do_delete')
@mock.patch.object(server.ServerProfile, 'do_create')
def test_do_recover_operation_is_none(self, mock_create, mock_delete):
profile = server.ServerProfile('t', self.spec)
node_obj = mock.Mock(physical_id='FAKE_ID')
mock_delete.return_value = None
mock_create.return_value = True
res = profile.do_recover(node_obj, operation=None)
self.assertTrue(res)
mock_delete.assert_called_once_with(node_obj, force=False,
timeout=None)
mock_create.assert_called_once_with(node_obj)
@mock.patch.object(server.ServerProfile, 'handle_rebuild')
def test_do_recover_rebuild(self, mock_rebuild):
profile = server.ServerProfile('t', self.spec)
node_obj = mock.Mock(physical_id='FAKE_ID')
res = profile.do_recover(node_obj, operation='REBUILD')
self.assertEqual(mock_rebuild.return_value, res)
mock_rebuild.assert_called_once_with(node_obj)
@mock.patch.object(server.ServerProfile, 'handle_rebuild')
def test_do_recover_with_list(self, mock_rebuild):
profile = server.ServerProfile('t', self.spec)
node_obj = mock.Mock(physical_id='FAKE_ID')
res = profile.do_recover(node_obj, operation='REBUILD')
self.assertEqual(mock_rebuild.return_value, res)
mock_rebuild.assert_called_once_with(node_obj)
@mock.patch.object(server.ServerProfile, 'handle_reboot')
def test_do_recover_reboot(self, mock_reboot):
profile = server.ServerProfile('t', self.spec)
node_obj = mock.Mock(physical_id='FAKE_ID')
res = profile.do_recover(node_obj, operation='REBOOT')
self.assertTrue(res)
self.assertEqual(mock_reboot.return_value, res)
mock_reboot.assert_called_once_with(node_obj, type='HARD')
@mock.patch.object(profiles_base.Profile, 'do_recover')
def test_do_recover_bad_operation(self, mock_base_recover):
profile = server.ServerProfile('t', self.spec)
node_obj = mock.Mock(physical_id='FAKE_ID')
res, status = profile.do_recover(node_obj,
operation='BLAHBLAH')
self.assertFalse(status)
@mock.patch.object(profiles_base.Profile, 'do_recover')
def test_do_recover_fallback(self, mock_base_recover):
profile = server.ServerProfile('t', self.spec)
node_obj = mock.Mock(physical_id='FAKE_ID')
res = profile.do_recover(node_obj, operation='RECREATE')
self.assertEqual(mock_base_recover.return_value, res)
mock_base_recover.assert_called_once_with(
node_obj, operation='RECREATE')
def test_handle_reboot(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
cc.server_reboot = mock.Mock()
cc.wait_for_server = mock.Mock()
profile._computeclient = cc
# do it
res = profile.handle_reboot(obj, type='SOFT')
self.assertTrue(res)
cc.server_reboot.assert_called_once_with('FAKE_ID', 'SOFT')
cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE')
def test_handle_reboot_no_physical_id(self):
obj = mock.Mock(physical_id=None)
profile = server.ServerProfile('t', self.spec)
# do it
res, status = profile.handle_reboot(obj, type='SOFT')
self.assertFalse(status)
def test_handle_reboot_default_type(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
cc.server_reboot = mock.Mock()
cc.wait_for_server = mock.Mock()
profile._computeclient = cc
# do it
res = profile.handle_reboot(obj)
self.assertTrue(res)
cc.server_reboot.assert_called_once_with('FAKE_ID', 'SOFT')
cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE')
def test_handle_reboot_bad_type(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
profile._computeclient = mock.Mock()
# do it
res, status = profile.handle_reboot(obj, type=['foo'])
self.assertFalse(status)
res, status = profile.handle_reboot(obj, type='foo')
self.assertFalse(status)
def test_handle_rebuild_with_image(self):
profile = server.ServerProfile('t', self.spec)
x_image = {'id': '123'}
x_server = mock.Mock(image=x_image)
cc = mock.Mock()
cc.server_get.return_value = x_server
cc.server_rebuild.return_value = True
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
res = profile.handle_rebuild(node_obj)
self.assertTrue(res)
cc.server_get.assert_called_with('FAKE_ID')
cc.server_rebuild.assert_called_once_with('FAKE_ID', '123',
'FAKE_SERVER_NAME',
'adminpass')
cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE')
def test_handle_rebuild_with_bdm(self):
bdm_v2 = [
{
'volume_size': 1,
'uuid': '123',
'source_type': 'image',
'destination_type': 'volume',
'boot_index': 0,
}
]
spec = {
'type': 'os.nova.server',
'version': '1.0',
'properties': {
'flavor': 'FLAV',
'admin_pass': 'adminpass',
'name': 'FAKE_SERVER_NAME',
'security_groups': ['HIGH_SECURITY_GROUP'],
'block_device_mapping_v2': bdm_v2,
}
}
profile = server.ServerProfile('t', spec)
x_image = {'id': '123'}
x_server = mock.Mock(image=x_image)
cc = mock.Mock()
cc.server_get.return_value = x_server
cc.server_rebuild.return_value = True
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
res = profile.handle_rebuild(node_obj)
self.assertTrue(res)
cc.server_get.assert_called_with('FAKE_ID')
cc.server_rebuild.assert_called_once_with('FAKE_ID', '123',
'FAKE_SERVER_NAME',
'adminpass')
cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE')
def test_handle_rebuild_server_not_found(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
err = exc.InternalError(code=404, message='FAKE_ID not found')
cc.server_get.side_effect = err
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
ex = self.assertRaises(exc.EResourceOperation,
profile.handle_rebuild,
node_obj)
self.assertEqual("Failed in rebuilding server 'FAKE_ID': "
"FAKE_ID not found.",
six.text_type(ex))
cc.server_get.assert_called_once_with('FAKE_ID')
def test_handle_rebuild_failed_rebuild(self):
profile = server.ServerProfile('t', self.spec)
x_image = {'id': '123'}
x_server = mock.Mock(image=x_image)
cc = mock.Mock()
cc.server_get.return_value = x_server
ex = exc.InternalError(code=500, message='cannot rebuild')
cc.server_rebuild.side_effect = ex
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
ex = self.assertRaises(exc.EResourceOperation,
profile.handle_rebuild,
node_obj)
self.assertEqual("Failed in rebuilding server 'FAKE_ID': "
"cannot rebuild.",
six.text_type(ex))
cc.server_get.assert_called_once_with('FAKE_ID')
cc.server_rebuild.assert_called_once_with('FAKE_ID', '123',
'FAKE_SERVER_NAME',
'adminpass')
self.assertEqual(0, cc.wait_for_server.call_count)
def test_handle_rebuild_failed_waiting(self):
profile = server.ServerProfile('t', self.spec)
x_image = {'id': '123'}
x_server = mock.Mock(image=x_image)
cc = mock.Mock()
cc.server_get.return_value = x_server
ex = exc.InternalError(code=500, message='timeout')
cc.wait_for_server.side_effect = ex
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
ex = self.assertRaises(exc.EResourceOperation,
profile.handle_rebuild,
node_obj)
self.assertEqual("Failed in rebuilding server 'FAKE_ID': "
"timeout.", six.text_type(ex))
cc.server_get.assert_called_once_with('FAKE_ID')
cc.server_rebuild.assert_called_once_with('FAKE_ID', '123',
'FAKE_SERVER_NAME',
'adminpass')
cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE')
def test_handle_rebuild_failed_retrieving_server(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
cc.server_get.return_value = None
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
res, status = profile.handle_rebuild(node_obj)
self.assertFalse(status)
cc.server_get.assert_called_once_with('FAKE_ID')
self.assertEqual(0, cc.server_rebuild.call_count)
self.assertEqual(0, cc.wait_for_server.call_count)
def test_handle_rebuild_no_physical_id(self):
profile = server.ServerProfile('t', self.spec)
profile._computeclient = mock.Mock()
test_server = mock.Mock()
test_server.physical_id = None
res, status = profile.handle_rebuild(test_server)
self.assertFalse(status)
def test_handle_rebuild_failed_with_name(self):
self.spec['properties']['name'] = None
profile = server.ServerProfile('t', self.spec)
x_image = {'id': '123'}
x_server = mock.Mock(image=x_image)
cc = mock.Mock()
cc.server_get.return_value = x_server
ex = exc.InternalError(code=400,
message='Server name is not '
'a string or unicode.')
cc.server_rebuild.side_effect = ex
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
node_obj.name = None
ex = self.assertRaises(exc.ESchema,
profile.handle_rebuild,
node_obj)
self.assertEqual("The value 'None' is not a valid string.",
six.text_type(ex))
cc.server_get.assert_called_once_with('FAKE_ID')
cc.server_rebuild.assert_not_called()
self.assertEqual(0, cc.wait_for_server.call_count)
def test_handle_change_password(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
cc.server_reboot = mock.Mock()
cc.wait_for_server = mock.Mock()
profile._computeclient = cc
# do it
res = profile.handle_change_password(obj, admin_pass='new_pass')
self.assertTrue(res)
cc.server_change_password.assert_called_once_with(
'FAKE_ID', new_password='new_pass')
def test_handle_change_password_no_physical_id(self):
obj = mock.Mock(physical_id=None)
profile = server.ServerProfile('t', self.spec)
# do it
res = profile.handle_change_password(obj, admin_pass='new_pass')
self.assertFalse(res)
def test_handle_change_password_no_password(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
profile._computeclient = mock.Mock()
# do it
res = profile.handle_change_password(obj)
self.assertFalse(res)
def test_handle_change_password_bad_param(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
profile._computeclient = mock.Mock()
# do it
res = profile.handle_change_password(obj, admin_pass=['foo'])
self.assertFalse(res)
res = profile.handle_change_password(obj, foo='bar')
self.assertFalse(res)
def test_handle_suspend(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
profile._computeclient = mock.Mock()
# do it
res = profile.handle_suspend(obj)
self.assertTrue(res)
def test_handle_suspend_no_physical_id(self):
obj = mock.Mock(physical_id=None)
profile = server.ServerProfile('t', self.spec)
# do it
res = profile.handle_suspend(obj)
self.assertFalse(res)
def test_handle_suspend_failed_waiting(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
ex = exc.InternalError(code=500, message='timeout')
cc.wait_for_server.side_effect = ex
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
ex = self.assertRaises(exc.EResourceOperation,
profile.handle_suspend,
node_obj)
self.assertEqual("Failed in suspend server 'FAKE_ID': "
"timeout.", six.text_type(ex))
cc.server_suspend.assert_called_once_with('FAKE_ID')
cc.wait_for_server.assert_called_once_with('FAKE_ID', 'SUSPENDED')
def test_handle_resume(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
profile._computeclient = mock.Mock()
# do it
res = profile.handle_resume(obj)
self.assertTrue(res)
def test_handle_resume_no_physical_id(self):
obj = mock.Mock(physical_id=None)
profile = server.ServerProfile('t', self.spec)
profile._computeclient = mock.Mock()
# do it
res = profile.handle_resume(obj)
self.assertFalse(res)
def test_handle_resume_failed_waiting(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
ex = exc.InternalError(code=500, message='timeout')
cc.wait_for_server.side_effect = ex
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
ex = self.assertRaises(exc.EResourceOperation,
profile.handle_resume,
node_obj)
self.assertEqual("Failed in resume server 'FAKE_ID': "
"timeout.", six.text_type(ex))
cc.server_resume.assert_called_once_with('FAKE_ID')
cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE')
def test_handle_start(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
profile._computeclient = mock.Mock()
# do it
res = profile.handle_start(obj)
self.assertTrue(res)
def test_handle_start_no_physical_id(self):
obj = mock.Mock(physical_id=None)
profile = server.ServerProfile('t', self.spec)
# do it
res = profile.handle_start(obj)
self.assertFalse(res)
def test_handle_start_failed_waiting(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
ex = exc.InternalError(code=500, message='timeout')
cc.wait_for_server.side_effect = ex
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
ex = self.assertRaises(exc.EResourceOperation,
profile.handle_start,
node_obj)
self.assertEqual("Failed in start server 'FAKE_ID': "
"timeout.", six.text_type(ex))
cc.server_start.assert_called_once_with('FAKE_ID')
cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE')
def test_handle_stop(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
profile._computeclient = mock.Mock()
# do it
res = profile.handle_stop(obj)
self.assertTrue(res)
def test_handle_stop_no_physical_id(self):
obj = mock.Mock(physical_id=None)
profile = server.ServerProfile('t', self.spec)
profile._computeclient = mock.Mock()
# do it
res = profile.handle_stop(obj)
self.assertFalse(res)
def test_handle_stop_failed_waiting(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
ex = exc.InternalError(code=500, message='timeout')
cc.wait_for_server.side_effect = ex
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
ex = self.assertRaises(exc.EResourceOperation,
profile.handle_stop,
node_obj)
self.assertEqual("Failed in stop server 'FAKE_ID': "
"timeout.", six.text_type(ex))
cc.server_stop.assert_called_once_with('FAKE_ID')
cc.wait_for_server.assert_called_once_with('FAKE_ID', 'SHUTOFF')
def test_handle_lock(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
profile._computeclient = mock.Mock()
# do it
res = profile.handle_lock(obj)
self.assertTrue(res)
def test_handle_lock_no_physical_id(self):
obj = mock.Mock(physical_id=None)
profile = server.ServerProfile('t', self.spec)
# do it
res = profile.handle_lock(obj)
self.assertFalse(res)
def test_handle_unlock(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
profile._computeclient = mock.Mock()
# do it
res = profile.handle_unlock(obj)
self.assertTrue(res)
def test_handle_unlock_no_physical_id(self):
obj = mock.Mock(physical_id=None)
profile = server.ServerProfile('t', self.spec)
profile._computeclient = mock.Mock()
# do it
res = profile.handle_unlock(obj)
self.assertFalse(res)
def test_handle_pause(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
profile._computeclient = mock.Mock()
# do it
res = profile.handle_pause(obj)
self.assertTrue(res)
def test_handle_pause_no_physical_id(self):
obj = mock.Mock(physical_id=None)
profile = server.ServerProfile('t', self.spec)
# do it
res = profile.handle_pause(obj)
self.assertFalse(res)
def test_handle_pause_failed_waiting(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
ex = exc.InternalError(code=500, message='timeout')
cc.wait_for_server.side_effect = ex
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
ex = self.assertRaises(exc.EResourceOperation,
profile.handle_pause,
node_obj)
self.assertEqual("Failed in pause server 'FAKE_ID': "
"timeout.", six.text_type(ex))
cc.server_pause.assert_called_once_with('FAKE_ID')
cc.wait_for_server.assert_called_once_with('FAKE_ID', 'PAUSED')
def test_handle_unpause(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
profile._computeclient = mock.Mock()
# do it
res = profile.handle_unpause(obj)
self.assertTrue(res)
def test_handle_unpause_no_physical_id(self):
obj = mock.Mock(physical_id=None)
profile = server.ServerProfile('t', self.spec)
# do it
res = profile.handle_unpause(obj)
self.assertFalse(res)
def test_handle_unpause_failed_waiting(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
ex = exc.InternalError(code=500, message='timeout')
cc.wait_for_server.side_effect = ex
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
ex = self.assertRaises(exc.EResourceOperation,
profile.handle_unpause,
node_obj)
self.assertEqual("Failed in unpause server 'FAKE_ID': "
"timeout.", six.text_type(ex))
cc.server_unpause.assert_called_once_with('FAKE_ID')
cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE')
def test_handle_rescue(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
gc = mock.Mock()
profile._computeclient = cc
profile._glanceclient = gc
# do it
res = profile.handle_rescue(obj, admin_pass='new_pass',
image='FAKE_IMAGE')
self.assertTrue(res)
cc.server_rescue.assert_called_once_with(
'FAKE_ID', admin_pass='new_pass', image_ref='FAKE_IMAGE')
gc.image_find.assert_called_once_with('FAKE_IMAGE', False)
def test_handle_rescue_image_none(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
profile._computeclient = cc
res = profile.handle_rescue(obj, admin_pass='new_pass',
image=None)
self.assertFalse(res)
def test_handle_rescue_no_physical_id(self):
obj = mock.Mock(physical_id=None)
profile = server.ServerProfile('t', self.spec)
# do it
res = profile.handle_rescue(obj)
self.assertFalse(res)
def test_handle_rescue_failed_waiting(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
gc = mock.Mock()
ex = exc.InternalError(code=500, message='timeout')
cc.wait_for_server.side_effect = ex
profile._computeclient = cc
profile._glanceclient = gc
node_obj = mock.Mock(physical_id='FAKE_ID')
ex = self.assertRaises(exc.EResourceOperation,
profile.handle_rescue,
node_obj, admin_pass='new_pass',
image='FAKE_IMAGE')
self.assertEqual("Failed in rescue server 'FAKE_ID': "
"timeout.", six.text_type(ex))
cc.server_rescue.assert_called_once_with('FAKE_ID',
admin_pass='new_pass',
image_ref='FAKE_IMAGE')
cc.wait_for_server.assert_called_once_with('FAKE_ID', 'RESCUE')
gc.image_find.assert_called_once_with('FAKE_IMAGE', False)
def test_handle_unrescue(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
profile._computeclient = mock.Mock()
# do it
res = profile.handle_unrescue(obj)
self.assertTrue(res)
def test_handle_unresuce_no_physical_id(self):
obj = mock.Mock(physical_id=None)
profile = server.ServerProfile('t', self.spec)
# do it
res = profile.handle_unrescue(obj)
self.assertFalse(res)
def test_handle_unrescue_failed_waiting(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
ex = exc.InternalError(code=500, message='timeout')
cc.wait_for_server.side_effect = ex
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
ex = self.assertRaises(exc.EResourceOperation,
profile.handle_unrescue,
node_obj)
self.assertEqual("Failed in unrescue server 'FAKE_ID': "
"timeout.", six.text_type(ex))
cc.server_unrescue.assert_called_once_with('FAKE_ID')
cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE')
def test_handle_migrate(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
profile._computeclient = mock.Mock()
# do it
res = profile.handle_migrate(obj)
self.assertTrue(res)
def test_handle_migrate_no_physical_id(self):
obj = mock.Mock(physical_id=None)
profile = server.ServerProfile('t', self.spec)
# do it
res = profile.handle_migrate(obj)
self.assertFalse(res)
def test_handle_migrate_failed_waiting(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
ex = exc.InternalError(code=500, message='timeout')
cc.wait_for_server.side_effect = ex
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID')
ex = self.assertRaises(exc.EResourceOperation,
profile.handle_migrate,
node_obj)
self.assertEqual("Failed in migrate server 'FAKE_ID': "
"timeout.", six.text_type(ex))
cc.server_migrate.assert_called_once_with('FAKE_ID')
cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE')
def test_handle_snapshot(self):
obj = mock.Mock(physical_id='FAKE_ID', name='NODE001')
profile = server.ServerProfile('t', self.spec)
profile._computeclient = mock.Mock()
# do it
res = profile.handle_snapshot(obj)
self.assertTrue(res)
def test_handle_snapshot_no_physical_id(self):
obj = mock.Mock(physical_id=None, name='NODE001')
profile = server.ServerProfile('t', self.spec)
# do it
res = profile.handle_snapshot(obj)
self.assertFalse(res)
def test_handle_snapshot_failed_waiting(self):
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock(name='NODE001')
ex = exc.InternalError(code=500, message='timeout')
cc.wait_for_server.side_effect = ex
profile._computeclient = cc
node_obj = mock.Mock(physical_id='FAKE_ID', name='NODE001')
ex = self.assertRaises(exc.EResourceOperation,
profile.handle_snapshot,
node_obj)
self.assertEqual("Failed in snapshot server 'FAKE_ID': "
"timeout.", six.text_type(ex))
cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE')
def test_handle_restore(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
profile._computeclient = cc
# do it
res = profile.handle_restore(obj, admin_pass='new_pass',
image='FAKE_IMAGE')
self.assertTrue(res)
def test_handle_restore_image_none(self):
obj = mock.Mock(physical_id='FAKE_ID')
profile = server.ServerProfile('t', self.spec)
cc = mock.Mock()
profile._computeclient = cc
res = profile.handle_restore(obj, admin_pass='new_pass',
image=None)
self.assertFalse(res)
| apache-2.0 |
nigelsmall/jsonstream | test/util/ordereddict.py | 8 | 4093 | # Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other | apache-2.0 |
MSEMJEJME/Get-Dumped | renpy/display/screen.py | 1 | 16961 | # Copyright 2004-2012 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import renpy.display
class Screen(renpy.object.Object):
"""
A screen is a collection of widgets that are displayed together.
This class stores information about the screen.
"""
def __init__(self,
name,
function,
modal="False",
zorder="0",
tag=None,
predict=None,
variant=None):
# The name of this screen.
if isinstance(name, basestring):
name = tuple(name.split())
self.name = name
screens[name[0], variant] = self
# The function that is called to display this screen.
self.function = function
# Expression: Are we modal? (A modal screen ignores screens under it.)
self.modal = modal
# Expression: Our zorder.
self.zorder = zorder
# The tag associated with the screen.
self.tag = tag or name[0]
# Can this screen be predicted?
if predict is None:
predict = renpy.config.predict_screens
self.predict = predict
class ScreenDisplayable(renpy.display.layout.Container):
"""
A screen is a collection of widgets that are displayed together. This
class is responsible for managing the display of a screen.
"""
no_save = [ 'screen', 'child', 'transforms', 'widgets' ]
def after_setstate(self):
self.screen = get_screen_variant(self.screen_name[0])
self.child = None
self.transforms = { }
self.widgets = { }
def __init__(self, screen, tag, layer, widget_properties={}, scope={}, **properties):
super(ScreenDisplayable, self).__init__(**properties)
# Stash the properties, so we can re-create the screen.
self.properties = properties
# The screen, and it's name. (The name is used to look up the
# screen on save.)
self.screen = screen
self.screen_name = screen.name
# The tag and layer screen was displayed with.
self.tag = tag
self.layer = layer
# The scope associated with this statement. This is passed in
# as keyword arguments to the displayable.
self.scope = renpy.python.RevertableDict(scope)
# The child associated with this screen.
self.child = None
# Widget properties given to this screen the last time it was
# shown.
self.widget_properties = widget_properties
# A map from name to the widget with that name.
self.widgets = { }
if tag and layer:
old_screen = get_screen(tag, layer)
else:
old_screen = None
# A map from name to the transform with that name. (This is
# taken from the old version of the screen, if it exists.
if old_screen is not None:
self.transforms = old_screen.transforms
else:
self.transforms = { }
# What widgets and transforms were the last time this screen was
# updated. Used to communicate with the ui module, and only
# valid during an update - not used at other times.
self.old_widgets = None
self.old_transforms = None
# Should we transfer data from the old_screen? This becomes
# true once this screen finishes updating for the first time,
# and also while we're using something.
self.old_transfers = (old_screen and old_screen.screen_name == self.screen_name)
# The current transform event, and the last transform event to
# be processed.
self.current_transform_event = None
# A dict-set of widgets (by id) that have been hidden from us.
self.hidden_widgets = { }
# Are we hiding?
self.hiding = False
# Modal and zorder.
self.modal = renpy.python.py_eval(self.screen.modal, locals=self.scope)
self.zorder = renpy.python.py_eval(self.screen.zorder, locals=self.scope)
def __repr__(self):
return "<ScreenDisplayable: %r>" % (self.screen_name,)
def visit(self):
return [ self.child ]
def per_interact(self):
renpy.display.render.redraw(self, 0)
self.update()
def set_transform_event(self, event):
super(ScreenDisplayable, self).set_transform_event(event)
self.current_transform_event = event
def find_focusable(self, callback, focus_name):
if self.child and not self.hiding:
self.child.find_focusable(callback, focus_name)
def _hide(self, st, at, kind):
if self.hiding:
hid = self
else:
hid = ScreenDisplayable(self.screen, self.tag, self.layer, self.widget_properties, self.scope, **self.properties)
hid.transforms = self.transforms.copy()
hid.widgets = self.widgets.copy()
hid.old_transfers = True
hid.hiding = True
hid.current_transform_event = kind
hid.update()
renpy.display.render.redraw(hid, 0)
rv = None
# Compute the reverse of transforms and widgets.
reverse_transforms = dict((id(v), k) for k, v in hid.transforms.iteritems())
reverse_widgets = dict((id(v), k) for k, v in hid.widgets.iteritems())
# Assumption: the only displayables that can keep us around
# are Transforms that handle hide.
# Iterate over our immediate children, trying to hide them.
for d in list(hid.child.children):
id_d = id(d)
# If we have a transform, call its _hide method. If that comes
# back non-None, store the new transform, and keep us alive.
#
# Otherwise, remove the child.
name = reverse_transforms.get(id_d, None)
if name is not None:
c = d._hide(st, at, kind)
if c is not None:
hid.transforms[name] = c
rv = hid
else:
hid.hidden_widgets[name] = True
hid.child.remove(d)
continue
# Remove any non-transform children.
name = reverse_widgets.get(id_d, None)
if name is not None:
hid.hidden_widgets[name] = True
hid.child.remove(d)
return rv
def update(self):
# Update _current_screen
global _current_screen
old_screen = _current_screen
_current_screen = self
# Cycle widgets and transforms.
self.old_widgets = self.widgets
self.old_transforms = self.transforms
self.widgets = { }
self.transforms = { }
# Render the child.
old_ui_screen = renpy.ui.screen
renpy.ui.screen = self
renpy.ui.detached()
self.child = renpy.ui.fixed(focus="_screen_" + "_".join(self.screen_name))
self.children = [ self.child ]
self.scope["_scope"] = self.scope
self.scope["_name"] = 0
self.screen.function(**self.scope)
renpy.ui.close()
renpy.ui.screen = old_ui_screen
_current_screen = old_screen
# Visit all the children, to get them started.
self.child.visit_all(lambda c : c.per_interact())
# Finish up.
self.old_widgets = None
self.old_transforms = None
self.old_transfers = True
if self.current_transform_event:
for i in self.child.children:
i.set_transform_event(self.current_transform_event)
self.current_transform_event = None
return self.widgets
def render(self, w, h, st, at):
if not self.child:
self.update()
child = renpy.display.render.render(self.child, w, h, st, at)
rv = renpy.display.render.Render(w, h)
rv.blit(child, (0, 0), focus=not self.hiding, main=not self.hiding)
rv.modal = self.modal and not self.hiding
return rv
def get_placement(self):
if not self.child:
self.update()
return self.child.get_placement()
def event(self, ev, x, y, st):
if self.hiding:
return
global _current_screen
old_screen = _current_screen
_current_screen = self
rv = self.child.event(ev, x, y, st)
_current_screen = old_screen
if rv is not None:
return rv
if self.modal:
raise renpy.display.layout.IgnoreLayers()
# The name of the screen that is currently being displayed, or
# None if no screen is being currently displayed.
_current_screen = None
# A map from (screen_name, variant) tuples to screen.
screens = { }
def get_screen_variant(name):
"""
Get a variant screen object for `name`.
"""
for i in renpy.config.variants:
rv = screens.get((name, i), None)
if rv is not None:
return rv
return None
def define_screen(*args, **kwargs):
"""
:doc: screens
:args: (name, function, modal="False", zorder="0", tag=None, variant=None)
Defines a screen with `name`, which should be a string.
`function`
The function that is called to display the screen. The
function is called with the screen scope as keyword
arguments. It should ignore additional keyword arguments.
The function should call the ui functions to add things to the
screen.
`modal`
A string that, when evaluated, determines of the created
screen should be modal. A modal screen prevents screens
underneath it from receiving input events.
`zorder`
A string that, when evaluated, should be an integer. The integer
controls the order in which screens are displayed. A screen
with a greater zorder number is displayed above screens with a
lesser zorder number.
`tag`
The tag associated with this screen. When the screen is shown,
it replaces any other screen with the same tag. The tag
defaults to the name of the screen.
`predict`
If true, this screen can be loaded for image prediction. If false,
it can't. Defaults to true.
`variant`
String. Gives the variant of the screen to use.
"""
Screen(*args, **kwargs)
def get_screen(name, layer="screens"):
"""
:doc: screens
Returns the ScreenDisplayable with the given `tag`, on
`layer`. If no displayable with the tag is not found, it is
interpreted as screen name. If it's still not found, None is returned.
"""
if isinstance(name, basestring):
name = tuple(name.split())
tag = name[0]
sl = renpy.exports.scene_lists()
sd = sl.get_displayable_by_tag(layer, tag)
if sd is None:
sd = sl.get_displayable_by_name(layer, name)
return sd
def has_screen(name):
"""
Returns true if a screen with the given name exists.
"""
if not isinstance(name, tuple):
name = tuple(name.split())
if not name:
return False
if get_screen_variant(name[0]):
return True
else:
return False
def show_screen(_screen_name, _layer='screens', _tag=None, _widget_properties={}, _transient=False, **kwargs):
"""
:doc: screens
The programmatic equivalent of the show screen statement.
Shows the named screen.
`_screen_name`
The name of the screen to show.
`_layer`
The layer to show the screen on.
`_tag`
The tag to show the screen with. If not specified, defaults to
the tag associated with the screen. It that's not specified,
defaults to the name of the screen.,
`_widget_properties`
A map from the id of a widget to a property name -> property
value map. When a widget with that id is shown by the screen,
the specified properties are added to it.
`_transient`
If true, the screen will be automatically hidden at the end of
the current interaction.
Keyword arguments not beginning with underscore (_) are used to
initialize the screen's scope.
"""
name = _screen_name
if not isinstance(name, tuple):
name = tuple(name.split())
screen = get_screen_variant(name[0])
if screen is None:
raise Exception("Screen %s is not known.\n" % (name[0],))
if _tag is None:
_tag = screen.tag
d = ScreenDisplayable(screen, _tag, _layer, _widget_properties, kwargs)
renpy.exports.show(name, tag=_tag, what=d, layer=_layer, zorder=d.zorder, transient=_transient, munge_name=False)
def predict_screen(_screen_name, _widget_properties={}, **kwargs):
"""
Predicts the displayables that make up the given screen.
`_screen_name`
The name of the screen to show.
`_widget_properties`
A map from the id of a widget to a property name -> property
value map. When a widget with that id is shown by the screen,
the specified properties are added to it.
Keyword arguments not beginning with underscore (_) are used to
initialize the screen's scope.
"""
name = _screen_name
if renpy.config.debug_image_cache:
renpy.display.ic_log.write("Predict screen %s", name)
if not isinstance(name, tuple):
name = tuple(name.split())
screen = get_screen_variant(name[0])
try:
if screen is None:
raise Exception("Screen %s is not known.\n" % (name[0],))
if not screen.predict:
return
d = ScreenDisplayable(screen, None, None, _widget_properties, kwargs)
d.update()
renpy.display.predict.displayable(d)
except:
if renpy.config.debug_image_cache:
import traceback
print "While predicting screen", screen
traceback.print_exc()
renpy.ui.reset()
def hide_screen(tag, layer='screens'):
"""
:doc: screens
The programmatic equivalent of the hide screen statement.
Hides the screen with `tag` on `layer`.
"""
screen = get_screen(tag, layer)
if screen is not None:
renpy.exports.hide(screen.tag, layer=layer)
def use_screen(_screen_name, _name=(), **kwargs):
name = _screen_name
if not isinstance(name, tuple):
name = tuple(name.split())
screen = get_screen_variant(name[0])
if screen is None:
raise Exception("Screen %r is not known." % name)
old_transfers = _current_screen.old_transfers
_current_screen.old_transfers = True
scope = kwargs["_scope"].copy() or { }
scope.update(kwargs)
scope["_scope"] = scope
scope["_name"] = (_name, name)
screen.function(**scope)
_current_screen.old_transfers = old_transfers
def current_screen():
return _current_screen
def get_widget(screen, id, layer='screens'): #@ReservedAssignment
"""
:doc: screens
From the `screen` on `layer`, returns the widget with
`id`. Returns None if the screen doesn't exist, or there is no
widget with that id on the screen.
"""
if screen is None:
screen = current_screen()
else:
screen = get_screen(screen, layer)
if not isinstance(screen, ScreenDisplayable):
return None
if screen.child is None:
screen.update()
rv = screen.widgets.get(id, None)
return rv
| gpl-2.0 |
tkruse/jhbuild | jhbuild/commands/goalreport.py | 5 | 28063 | # jhbuild - a build script for GNOME 2.x
# Copyright (C) 2009 Frederic Peters
#
# goalreport.py: report GNOME modules status wrt various goals
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import re
import socket
import sys
import subprocess
import time
import types
import cPickle
import logging
from optparse import make_option
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import elementtree.ElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
try:
import curses
except ImportError:
curses = None
from jhbuild.errors import FatalError
import jhbuild.moduleset
from jhbuild.commands import Command, register_command
from jhbuild.utils import httpcache
from jhbuild.modtypes import MetaModule
try: t_bold = cmds.get_output(['tput', 'bold'])
except: t_bold = ''
try: t_reset = cmds.get_output(['tput', 'sgr0'])
except: t_reset = ''
HTML_AT_TOP = '''<html>
<head>
<title>%(title)s</title>
<style type="text/css">
body {
font-family: sans-serif;
}
tfoot th, thead th {
font-weight: normal;
}
td.dunno { background: #aaa; }
td.todo-low { background: #fce94f; }
td.todo-average { background: #fcaf3e; }
td.todo-complex { background: #ef2929; }
td.ok { background: #8ae234; }
td.heading {
text-align: center;
background: #555753;
color: white;
font-weight: bold;
}
tbody th {
background: #d3d7cf;
text-align: left;
}
tbody td {
text-align: center;
}
tfoot td {
padding-top: 1em;
vertical-align: top;
}
tbody tr:hover th {
background: #2e3436;
color: #d3d7cf;
}
a.bug-closed {
text-decoration: line-through;
}
a.warn-bug-status::after {
content: " \\26A0";
color: #ef2929;
font-weight: bold;
}
a.has-patch {
padding-right: 20px;
background: transparent url(http://bugzilla.gnome.org/images/emblems/P.png) center right no-repeat;
}
</style>
</head>
<body>
'''
class ExcludedModuleException(Exception):
pass
class CouldNotPerformCheckException(Exception):
pass
class Check:
header_note = None
complexity = 'average'
status = 'dunno'
result_comment = None
excluded_modules = []
def __init__(self, config, module):
if module.name in (self.excluded_modules or []):
raise ExcludedModuleException()
self.config = config
self.module = module
def fix_false_positive(self, false_positive):
if not false_positive:
return
if false_positive == 'n/a':
raise ExcludedModuleException()
self.status = 'ok'
def create_from_args(cls, *args):
pass
create_from_args = classmethod(create_from_args)
class ShellCheck(Check):
cmd = None
cmds = None
def run(self):
if not self.cmds:
self.cmds = [self.cmd]
outputs = []
rc = 0
for cmd in self.cmds:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True,
cwd=self.module.branch.srcdir)
outputs.append(process.communicate()[0].strip())
rc = process.wait() or rc
if rc == 1:
raise ExcludedModuleException()
nb_lines = sum([len(x.splitlines()) for x in outputs])
if nb_lines == 0:
self.status = 'ok'
elif nb_lines <= 5:
self.status = 'todo'
self.complexity = 'low'
elif nb_lines <= 20:
self.status = 'todo'
self.complexity = 'average'
else:
self.status = 'todo'
self.complexity = 'complex'
def create_from_args(cls, arg):
new_class = types.ClassType('ShellCheck (%s)' % arg.split('/')[-1],
(cls,), {'cmd': arg})
return new_class
create_from_args = classmethod(create_from_args)
FIND_C = "find -name '*.[ch]' -or -name '*.cpp' -or -name '*.cc'"
class SymbolsCheck(Check):
def run(self):
symbol_regex = re.compile(r'[\s\(\){}\+\|&-](%s)[\s\(\){}\+\|&-]' % '|'.join(self.symbols))
deprecated_and_used = {}
try:
for base, dirnames, filenames in os.walk(self.module.branch.srcdir):
filenames = [x for x in filenames if \
os.path.splitext(x)[-1] in ('.c', '.cc', '.cpp', '.h', '.glade')]
for filename in filenames:
for s in symbol_regex.findall(file(os.path.join(base, filename)).read()):
deprecated_and_used[s] = True
except UnicodeDecodeError:
raise ExcludedModuleException()
self.bad_symbols = deprecated_and_used.keys()
self.compute_status()
def compute_status(self):
nb_symbols = len(self.bad_symbols)
if nb_symbols == 0:
self.status = 'ok'
elif nb_symbols <= 5:
self.status = 'todo'
self.complexity = 'low'
elif nb_symbols <= 20:
self.status = 'todo'
self.complexity = 'average'
else:
self.status = 'todo'
self.complexity = 'complex'
if self.status == 'todo':
self.result_comment = ', '.join(sorted(self.bad_symbols))
else:
self.result_comment = None
def fix_false_positive(self, false_positive):
if not false_positive:
return
if len(self.symbols) == 1 and false_positive == '-':
self.bad_symbols = []
self.compute_status()
return
for symbol in false_positive.split(','):
symbol = symbol.strip()
if symbol in self.bad_symbols:
self.bad_symbols.remove(symbol)
self.compute_status()
def create_from_args(cls, *args):
new_class = types.ClassType('SymbolsCheck (%s)' % ', '.join(args),
(cls,), {'symbols': args})
return new_class
create_from_args = classmethod(create_from_args)
class GrepCheck(Check):
def run(self):
self.nb_occurences = 0
try:
for base, dirnames, filenames in os.walk(self.module.branch.srcdir):
filenames = [x for x in filenames if \
os.path.splitext(x)[-1] in ('.c', '.cc', '.cpp', '.h', '.glade')]
for filename in filenames:
if self.grep in file(os.path.join(base, filename)).read():
self.nb_occurences += 1
except UnicodeDecodeError:
raise ExcludedModuleException()
self.compute_status()
def compute_status(self):
if self.nb_occurences == 0:
self.status = 'ok'
elif self.nb_occurences <= 5:
self.status = 'todo'
self.complexity = 'low'
elif self.nb_occurences <= 20:
self.status = 'todo'
self.complexity = 'average'
else:
self.status = 'todo'
self.complexity = 'complex'
if self.status == 'todo':
self.result_comment = self.nb_occurences
else:
self.result_comment = None
def create_from_args(cls, *args):
new_class = types.ClassType('GrepCheck (%s)' % ', '.join(args),
(cls,), {'grep': args[0]})
return new_class
create_from_args = classmethod(create_from_args)
class FilenamesCheck(Check):
def run(self):
for base, dirnames, filenames in os.walk(self.module.branch.srcdir):
for f in self.filenames:
if f in filenames:
self.found = True
self.compute_status()
return
self.found = False
self.compute_status()
def compute_status(self):
self.status = 'ok'
if self.found:
self.status = 'todo'
self.complexity = 'average'
def create_from_args(cls, *args):
new_class = types.ClassType('FilenamesCheck (%s)' % ', '.join(args),
(cls,), {'filenames': args})
return new_class
create_from_args = classmethod(create_from_args)
class DeprecatedSymbolsCheck(SymbolsCheck):
cached_symbols = {}
def load_deprecated_symbols(self):
if self.cached_symbols.get(self.devhelp_filenames):
return self.cached_symbols.get(self.devhelp_filenames)
symbols = []
for devhelp_filename in self.devhelp_filenames:
try:
devhelp_path = os.path.join(self.config.devhelp_dirname, devhelp_filename)
tree = ET.parse(devhelp_path)
except:
raise CouldNotPerformCheckException()
for keyword in tree.findall('//{http://www.devhelp.net/book}keyword'):
if not keyword.attrib.has_key('deprecated'):
continue
name = keyword.attrib.get('name').replace('enum ', '').replace('()', '').strip()
symbols.append(name)
DeprecatedSymbolsCheck.cached_symbols[self.devhelp_filenames] = symbols
return symbols
symbols = property(load_deprecated_symbols)
class cmd_goalreport(Command):
doc = _('Report GNOME modules status wrt various goals')
name = 'goalreport'
checks = None
page_intro = None
title = 'GNOME Goal Report'
def __init__(self):
Command.__init__(self, [
make_option('-o', '--output', metavar='FILE',
action='store', dest='output', default=None),
make_option('--bugs-file', metavar='BUGFILE',
action='store', dest='bugfile', default=None),
make_option('--false-positives-file', metavar='FILE',
action='store', dest='falsepositivesfile', default=None),
make_option('--devhelp-dirname', metavar='DIR',
action='store', dest='devhelp_dirname', default=None),
make_option('--cache', metavar='FILE',
action='store', dest='cache', default=None),
make_option('--all-modules',
action='store_true', dest='list_all_modules', default=False),
make_option('--check', metavar='CHECK',
action='append', dest='checks', default=[],
help=_('check to perform')),
])
def load_checks_from_options(self, checks):
self.checks = []
for check_option in checks:
check_class_name, args = check_option.split(':', 1)
args = args.split(',')
check_base_class = globals().get(check_class_name)
check = check_base_class.create_from_args(*args)
self.checks.append(check)
def run(self, config, options, args, help=None):
if options.output:
output = StringIO()
global curses
if curses and config.progress_bar:
try:
curses.setupterm()
except:
curses = None
else:
output = sys.stdout
if not self.checks:
self.load_checks_from_options(options.checks)
self.load_bugs(options.bugfile)
self.load_false_positives(options.falsepositivesfile)
config.devhelp_dirname = options.devhelp_dirname
config.partial_build = False
module_set = jhbuild.moduleset.load(config)
if options.list_all_modules:
self.module_list = module_set.modules.values()
else:
self.module_list = module_set.get_module_list(args or config.modules, config.skip)
results = {}
try:
cachedir = os.path.join(os.environ['XDG_CACHE_HOME'], 'jhbuild')
except KeyError:
cachedir = os.path.join(os.environ['HOME'], '.cache','jhbuild')
if options.cache:
try:
results = cPickle.load(file(os.path.join(cachedir, options.cache)))
except:
pass
self.repeat_row_header = 0
if len(self.checks) > 4:
self.repeat_row_header = 1
for module_num, mod in enumerate(self.module_list):
if mod.type in ('meta', 'tarball'):
continue
if not mod.branch or not mod.branch.repository.__class__.__name__ in (
'SubversionRepository', 'GitRepository'):
if not mod.moduleset_name.startswith('gnome-external-deps'):
continue
if not os.path.exists(mod.branch.srcdir):
continue
tree_id = mod.branch.tree_id()
valid_cache = (tree_id and results.get(mod.name, {}).get('tree-id') == tree_id)
if not mod.name in results:
results[mod.name] = {
'results': {}
}
results[mod.name]['tree-id'] = tree_id
r = results[mod.name]['results']
for check in self.checks:
if valid_cache and check.__name__ in r:
continue
try:
c = check(config, mod)
except ExcludedModuleException:
continue
if output != sys.stdout and config.progress_bar:
progress_percent = 1.0 * (module_num-1) / len(self.module_list)
msg = '%s: %s' % (mod.name, check.__name__)
self.display_status_line(progress_percent, module_num, msg)
try:
c.run()
except CouldNotPerformCheckException:
continue
except ExcludedModuleException:
continue
try:
c.fix_false_positive(self.false_positives.get((mod.name, check.__name__)))
except ExcludedModuleException:
continue
r[check.__name__] = [c.status, c.complexity, c.result_comment]
if not os.path.exists(cachedir):
os.makedirs(cachedir)
if options.cache:
cPickle.dump(results, file(os.path.join(cachedir, options.cache), 'w'))
print >> output, HTML_AT_TOP % {'title': self.title}
if self.page_intro:
print >> output, self.page_intro
print >> output, '<table>'
print >> output, '<thead>'
print >> output, '<tr><td></td>'
for check in self.checks:
print >> output, '<th>%s</th>' % check.__name__
print >> output, '<td></td></tr>'
if [x for x in self.checks if x.header_note]:
print >> output, '<tr><td></td>'
for check in self.checks:
print >> output, '<td>%s</td>' % (check.header_note or '')
print >> output, '</tr>'
print >> output, '</thead>'
print >> output, '<tbody>'
suites = []
for module_key, module in module_set.modules.items():
if not isinstance(module_set.get_module(module_key), MetaModule):
continue
if module_key.endswith('upcoming-deprecations'):
# mark deprecated modules as processed, so they don't show in "Others"
try:
metamodule = module_set.get_module(meta_key)
except KeyError:
continue
for module_name in metamodule.dependencies:
processed_modules[module_name] = True
else:
suites.append([module_key, module_key.replace('meta-', '')])
processed_modules = {'gnome-common': True}
not_other_module_names = []
for suite_key, suite_label in suites:
metamodule = module_set.get_module(suite_key)
module_names = [x for x in metamodule.dependencies if x in results]
if not module_names:
continue
print >> output, '<tr><td class="heading" colspan="%d">%s</td></tr>' % (
1+len(self.checks)+self.repeat_row_header, suite_label)
for module_name in module_names:
if module_name in not_other_module_names:
continue
r = results[module_name].get('results')
print >> output, self.get_mod_line(module_name, r)
processed_modules[module_name] = True
not_other_module_names.extend(module_names)
external_deps = [x for x in results.keys() if \
x in [y.name for y in self.module_list] and \
not x in processed_modules and \
module_set.get_module(x).moduleset_name.startswith('gnome-external-deps')]
if external_deps:
print >> output, '<tr><td class="heading" colspan="%d">%s</td></tr>' % (
1+len(self.checks)+self.repeat_row_header, 'External Dependencies')
for module_name in sorted(external_deps):
if not module_name in results:
continue
r = results[module_name].get('results')
try:
version = module_set.get_module(module_name).branch.version
except:
version = None
print >> output, self.get_mod_line(module_name, r, version_number=version)
other_module_names = [x for x in results.keys() if \
not x in processed_modules and not x in external_deps]
if other_module_names:
print >> output, '<tr><td class="heading" colspan="%d">%s</td></tr>' % (
1+len(self.checks)+self.repeat_row_header, 'Others')
for module_name in sorted(other_module_names):
if not module_name in results:
continue
r = results[module_name].get('results')
print >> output, self.get_mod_line(module_name, r)
print >> output, '</tbody>'
print >> output, '<tfoot>'
print >> output, '<tr><td></td>'
for check in self.checks:
print >> output, '<th>%s</th>' % check.__name__
print >> output, '<td></td></tr>'
print >> output, self.get_stat_line(results, not_other_module_names)
print >> output, '</tfoot>'
print >> output, '</table>'
if (options.bugfile and options.bugfile.startswith('http://')) or \
(options.falsepositivesfile and options.falsepositivesfile.startswith('http://')):
print >> output, '<div id="data">'
print >> output, '<p>The following data sources are used:</p>'
print >> output, '<ul>'
if options.bugfile.startswith('http://'):
print >> output, ' <li><a href="%s">Bugs</a></li>' % options.bugfile
if options.falsepositivesfile.startswith('http://'):
print >> output, ' <li><a href="%s">False positives</a></li>' % options.falsepositivesfile
print >> output, '</ul>'
print >> output, '</div>'
print >> output, '<div id="footer">'
print >> output, 'Generated:', time.strftime('%Y-%m-%d %H:%M:%S %z')
print >> output, 'on ', socket.getfqdn()
print >> output, '</div>'
print >> output, '</body>'
print >> output, '</html>'
if output != sys.stdout:
file(options.output, 'w').write(output.getvalue())
if output != sys.stdout and config.progress_bar:
sys.stdout.write('\n')
sys.stdout.flush()
def get_mod_line(self, module_name, r, version_number=None):
s = []
s.append('<tr>')
if version_number:
s.append('<th>%s (%s)</th>' % (module_name, version_number))
else:
s.append('<th>%s</th>' % module_name)
for check in self.checks:
ri = r.get(check.__name__)
if not ri:
classname = 'n-a'
label = 'n/a'
comment = ''
else:
classname = ri[0]
if classname == 'todo':
classname += '-' + ri[1]
label = ri[1]
else:
label = ri[0]
comment = ri[2] or ''
if label == 'ok':
label = ''
s.append('<td class="%s" title="%s">' % (classname, comment))
k = (module_name, check.__name__)
if k in self.bugs:
bug_classes = []
if self.bug_status.get(self.bugs[k], {}).get('resolution'):
bug_classes.append('bug-closed')
if label:
bug_classes.append('warn-bug-status')
if label and self.bug_status.get(self.bugs[k], {}).get('patch'):
bug_classes.append('has-patch')
bug_class = ''
if bug_classes:
bug_class = ' class="%s"' % ' '.join(bug_classes)
if self.bugs[k].isdigit():
s.append('<a href="http://bugzilla.gnome.org/show_bug.cgi?id=%s"%s>' % (
self.bugs[k], bug_class))
else:
s.append('<a href="%s"%s>' % (self.bugs[k], bug_class))
if label == '':
label = 'done'
s.append(label)
if k in self.bugs:
s.append('</a>')
s.append('</td>')
if self.repeat_row_header:
s.append('<th>%s</th>' % module_name)
s.append('</tr>')
return '\n'.join(s)
def get_stat_line(self, results, module_names):
s = []
s.append('<tr>')
s.append('<td>Stats<br/>(excluding "Others")</td>')
for check in self.checks:
s.append('<td>')
for complexity in ('low', 'average', 'complex'):
nb_modules = len([x for x in module_names if \
results[x].get('results') and
results[x]['results'].get(check.__name__) and
results[x]['results'][check.__name__][0] == 'todo' and
results[x]['results'][check.__name__][1] == complexity])
s.append('%s: %s' % (complexity, nb_modules))
s.append('<br/>')
nb_with_bugs = 0
nb_with_bugs_done = 0
for module_name in module_names:
k = (module_name, check.__name__)
if not k in self.bugs or not check.__name__ in results[module_name]['results']:
continue
nb_with_bugs += 1
if results[module_name]['results'][check.__name__][0] == 'ok':
nb_with_bugs_done += 1
if nb_with_bugs:
s.append('<br/>')
s.append('fixed: %d%%' % (100.*nb_with_bugs_done/nb_with_bugs))
s.append('</td>')
s.append('<td></td>')
s.append('</tr>')
return '\n'.join(s)
def load_bugs(self, filename):
# Bug file format:
# $(module)/$(checkname) $(bugnumber)
# Sample bug file:
# evolution/LibGnomeCanvas 571742
#
# Alternatively, the $(checkname) can be replaced by a column number,
# like: evolution/col:2 543234
#
# also, if there is only a single check, the /$(checkname) part
# can be skipped.
self.bugs = {}
if not filename:
return
if filename.startswith('http://'):
if filename.startswith('http://live.gnome.org') and not filename.endswith('?action=raw'):
filename += '?action=raw'
try:
filename = httpcache.load(filename, age=0)
except Exception, e:
logging.warning('could not download %s: %s' % (filename, e))
return
for line in file(filename):
line = line.strip()
if not line:
continue
if line.startswith('#'):
continue
part, bugnumber = line.split()
if '/' in part:
module_name, check = part.split('/')
if check.startswith('col:'):
check = self.checks[int(check[4:])-1].__name__
elif len(self.checks) == 1:
module_name = part
check = self.checks[0].__name__
else:
continue
self.bugs[(module_name, check)] = bugnumber
self.bug_status = {}
bug_status = httpcache.load(
'http://bugzilla.gnome.org/show_bug.cgi?%s&'
'ctype=xml&field=bug_id&field=bug_status&field=emblems&'
'field=resolution' % '&'.join(['id=' + x for x in self.bugs.values() if x.isdigit()]),
age=0)
tree = ET.parse(bug_status)
for bug in tree.findall('bug'):
bug_id = bug.find('bug_id').text
bug_resolved = (bug.find('resolution') is not None)
bug_has_patch = (bug.find('emblems') is not None and 'P' in bug.find('emblems').text)
self.bug_status[bug_id] = {
'resolution': bug_resolved,
'patch': bug_has_patch,
}
def load_false_positives(self, filename):
self.false_positives = {}
if not filename:
return
if filename.startswith('http://'):
if filename.startswith('http://live.gnome.org') and not filename.endswith('?action=raw'):
filename += '?action=raw'
try:
filename = httpcache.load(filename, age=0)
except Exception, e:
logging.warning('could not download %s: %s' % (filename, e))
return
for line in file(filename):
line = line.strip()
if not line:
continue
if line.startswith('#'):
continue
if ' ' in line:
part, extra = line.split(' ', 1)
else:
part, extra = line, '-'
if '/' in part:
module_name, check = part.split('/')
if check.startswith('col:'):
check = self.checks[int(check[4:])-1].__name__
elif len(self.checks) == 1:
module_name = part
check = self.checks[0].__name__
else:
continue
self.false_positives[(module_name, check)] = extra
def display_status_line(self, progress, module_num, message):
if not curses:
return
columns = curses.tigetnum('cols')
width = columns / 2
num_hashes = int(round(progress * width))
progress_bar = '[' + (num_hashes * '=') + ((width - num_hashes) * '-') + ']'
module_no_digits = len(str(len(self.module_list)))
format_str = '%%%dd' % module_no_digits
module_pos = '[' + format_str % (module_num+1) + '/' + format_str % len(self.module_list) + ']'
output = '%s %s %s%s%s' % (progress_bar, module_pos, t_bold, message, t_reset)
if len(output) > columns:
output = output[:columns]
else:
output += ' ' * (columns-len(output))
sys.stdout.write(output + '\r')
sys.stdout.flush()
register_command(cmd_goalreport)
| gpl-2.0 |
ic-labs/django-icekit | icekit/plugins/page_anchor/tests.py | 1 | 2333 | from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from django_dynamic_fixture import G
from django_webtest import WebTest
from icekit.models import Layout
from icekit.page_types.layout_page.models import LayoutPage
from icekit.utils import fluent_contents
from . import models
User = get_user_model()
class PageAnchorItemTestCase(WebTest):
def setUp(self):
self.layout_1 = G(
Layout,
template_name='layout_page/layoutpage/layouts/default.html',
)
self.layout_1.content_types.add(
ContentType.objects.get_for_model(LayoutPage))
self.layout_1.save()
self.staff_1 = User.objects.create(
email='test@test.com',
is_staff=True,
is_active=True,
is_superuser=True,
)
self.page_1 = LayoutPage()
self.page_1.title = 'Test Page'
self.page_1.slug = 'test-page'
self.page_1.parent_site = Site.objects.first()
self.page_1.layout = self.layout_1
self.page_1.author = self.staff_1
self.page_1.status = 'p' # Publish the page
self.page_1.save()
self.anchor_1 = fluent_contents.create_content_instance(
models.PageAnchorItem,
self.page_1,
anchor_name='Jump Link'
)
self.anchor_2 = fluent_contents.create_content_instance(
models.PageAnchorItem,
self.page_1,
anchor_name='Second Jump Link'
)
self.page_1.publish()
self.page_1_published = self.page_1.get_published()
def test_renders_anchor(self):
response = self.app.get(self.page_1_published.get_absolute_url())
published_anchor_1 = self.page_1_published.contentitem_set.get(
pageanchoritem__anchor_name='Jump Link')
response.mustcontain(
'<a class="page-anchor" id="jump-link"></a>')
def test_increments_anchor_id(self):
response = self.app.get(self.page_1_published.get_absolute_url())
published_anchor_1 = self.page_1_published.contentitem_set.get(
pageanchoritem__anchor_name='Jump Link')
response.mustcontain(
'<a class="page-anchor" id="second-jump-link"></a>')
| mit |
goldsborough/.emacs | .emacs.d/.python-environments/default/lib/python3.5/site-packages/setuptools/sandbox.py | 109 | 14210 | import os
import sys
import tempfile
import operator
import functools
import itertools
import re
import contextlib
import pickle
from setuptools.extern import six
from setuptools.extern.six.moves import builtins, map
import pkg_resources
if sys.platform.startswith('java'):
import org.python.modules.posix.PosixModule as _os
else:
_os = sys.modules[os.name]
try:
_file = file
except NameError:
_file = None
_open = open
from distutils.errors import DistutilsError
from pkg_resources import working_set
__all__ = [
"AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
]
def _execfile(filename, globals, locals=None):
"""
Python 3 implementation of execfile.
"""
mode = 'rb'
with open(filename, mode) as stream:
script = stream.read()
# compile() function in Python 2.6 and 3.1 requires LF line endings.
if sys.version_info[:2] < (2, 7) or sys.version_info[:2] >= (3, 0) and sys.version_info[:2] < (3, 2):
script = script.replace(b'\r\n', b'\n')
script = script.replace(b'\r', b'\n')
if locals is None:
locals = globals
code = compile(script, filename, 'exec')
exec(code, globals, locals)
@contextlib.contextmanager
def save_argv(repl=None):
saved = sys.argv[:]
if repl is not None:
sys.argv[:] = repl
try:
yield saved
finally:
sys.argv[:] = saved
@contextlib.contextmanager
def save_path():
saved = sys.path[:]
try:
yield saved
finally:
sys.path[:] = saved
@contextlib.contextmanager
def override_temp(replacement):
"""
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
"""
if not os.path.isdir(replacement):
os.makedirs(replacement)
saved = tempfile.tempdir
tempfile.tempdir = replacement
try:
yield
finally:
tempfile.tempdir = saved
@contextlib.contextmanager
def pushd(target):
saved = os.getcwd()
os.chdir(target)
try:
yield saved
finally:
os.chdir(saved)
class UnpickleableException(Exception):
"""
An exception representing another Exception that could not be pickled.
"""
@staticmethod
def dump(type, exc):
"""
Always return a dumped (pickled) type and exc. If exc can't be pickled,
wrap it in UnpickleableException first.
"""
try:
return pickle.dumps(type), pickle.dumps(exc)
except Exception:
# get UnpickleableException inside the sandbox
from setuptools.sandbox import UnpickleableException as cls
return cls.dump(cls, cls(repr(exc)))
class ExceptionSaver:
"""
A Context Manager that will save an exception, serialized, and restore it
later.
"""
def __enter__(self):
return self
def __exit__(self, type, exc, tb):
if not exc:
return
# dump the exception
self._saved = UnpickleableException.dump(type, exc)
self._tb = tb
# suppress the exception
return True
def resume(self):
"restore and re-raise any exception"
if '_saved' not in vars(self):
return
type, exc = map(pickle.loads, self._saved)
six.reraise(type, exc, self._tb)
@contextlib.contextmanager
def save_modules():
"""
Context in which imported modules are saved.
Translates exceptions internal to the context into the equivalent exception
outside the context.
"""
saved = sys.modules.copy()
with ExceptionSaver() as saved_exc:
yield saved
sys.modules.update(saved)
# remove any modules imported since
del_modules = (
mod_name for mod_name in sys.modules
if mod_name not in saved
# exclude any encodings modules. See #285
and not mod_name.startswith('encodings.')
)
_clear_modules(del_modules)
saved_exc.resume()
def _clear_modules(module_names):
for mod_name in list(module_names):
del sys.modules[mod_name]
@contextlib.contextmanager
def save_pkg_resources_state():
saved = pkg_resources.__getstate__()
try:
yield saved
finally:
pkg_resources.__setstate__(saved)
@contextlib.contextmanager
def setup_context(setup_dir):
temp_dir = os.path.join(setup_dir, 'temp')
with save_pkg_resources_state():
with save_modules():
hide_setuptools()
with save_path():
with save_argv():
with override_temp(temp_dir):
with pushd(setup_dir):
# ensure setuptools commands are available
__import__('setuptools')
yield
def _needs_hiding(mod_name):
"""
>>> _needs_hiding('setuptools')
True
>>> _needs_hiding('pkg_resources')
True
>>> _needs_hiding('setuptools_plugin')
False
>>> _needs_hiding('setuptools.__init__')
True
>>> _needs_hiding('distutils')
True
>>> _needs_hiding('os')
False
>>> _needs_hiding('Cython')
True
"""
pattern = re.compile('(setuptools|pkg_resources|distutils|Cython)(\.|$)')
return bool(pattern.match(mod_name))
def hide_setuptools():
"""
Remove references to setuptools' modules from sys.modules to allow the
invocation to import the most appropriate setuptools. This technique is
necessary to avoid issues such as #315 where setuptools upgrading itself
would fail to find a function declared in the metadata.
"""
modules = filter(_needs_hiding, sys.modules)
_clear_modules(modules)
def run_setup(setup_script, args):
"""Run a distutils setup script, sandboxed in its directory"""
setup_dir = os.path.abspath(os.path.dirname(setup_script))
with setup_context(setup_dir):
try:
sys.argv[:] = [setup_script]+list(args)
sys.path.insert(0, setup_dir)
# reset to include setup dir, w/clean callback list
working_set.__init__()
working_set.callbacks.append(lambda dist:dist.activate())
def runner():
ns = dict(__file__=setup_script, __name__='__main__')
_execfile(setup_script, ns)
DirectorySandbox(setup_dir).run(runner)
except SystemExit as v:
if v.args and v.args[0]:
raise
# Normal exit, just return
class AbstractSandbox:
"""Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
_active = False
def __init__(self):
self._attrs = [
name for name in dir(_os)
if not name.startswith('_') and hasattr(self,name)
]
def _copy(self, source):
for name in self._attrs:
setattr(os, name, getattr(source,name))
def run(self, func):
"""Run 'func' under os sandboxing"""
try:
self._copy(self)
if _file:
builtins.file = self._file
builtins.open = self._open
self._active = True
return func()
finally:
self._active = False
if _file:
builtins.file = _file
builtins.open = _open
self._copy(_os)
def _mk_dual_path_wrapper(name):
original = getattr(_os,name)
def wrap(self,src,dst,*args,**kw):
if self._active:
src,dst = self._remap_pair(name,src,dst,*args,**kw)
return original(src,dst,*args,**kw)
return wrap
for name in ["rename", "link", "symlink"]:
if hasattr(_os,name): locals()[name] = _mk_dual_path_wrapper(name)
def _mk_single_path_wrapper(name, original=None):
original = original or getattr(_os,name)
def wrap(self,path,*args,**kw):
if self._active:
path = self._remap_input(name,path,*args,**kw)
return original(path,*args,**kw)
return wrap
if _file:
_file = _mk_single_path_wrapper('file', _file)
_open = _mk_single_path_wrapper('open', _open)
for name in [
"stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
"remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
"startfile", "mkfifo", "mknod", "pathconf", "access"
]:
if hasattr(_os,name): locals()[name] = _mk_single_path_wrapper(name)
def _mk_single_with_return(name):
original = getattr(_os,name)
def wrap(self,path,*args,**kw):
if self._active:
path = self._remap_input(name,path,*args,**kw)
return self._remap_output(name, original(path,*args,**kw))
return original(path,*args,**kw)
return wrap
for name in ['readlink', 'tempnam']:
if hasattr(_os,name): locals()[name] = _mk_single_with_return(name)
def _mk_query(name):
original = getattr(_os,name)
def wrap(self,*args,**kw):
retval = original(*args,**kw)
if self._active:
return self._remap_output(name, retval)
return retval
return wrap
for name in ['getcwd', 'tmpnam']:
if hasattr(_os,name): locals()[name] = _mk_query(name)
def _validate_path(self,path):
"""Called to remap or validate any path, whether input or output"""
return path
def _remap_input(self,operation,path,*args,**kw):
"""Called for path inputs"""
return self._validate_path(path)
def _remap_output(self,operation,path):
"""Called for path outputs"""
return self._validate_path(path)
def _remap_pair(self,operation,src,dst,*args,**kw):
"""Called for path pairs like rename, link, and symlink operations"""
return (
self._remap_input(operation+'-from',src,*args,**kw),
self._remap_input(operation+'-to',dst,*args,**kw)
)
if hasattr(os, 'devnull'):
_EXCEPTIONS = [os.devnull,]
else:
_EXCEPTIONS = []
try:
from win32com.client.gencache import GetGeneratePath
_EXCEPTIONS.append(GetGeneratePath())
del GetGeneratePath
except ImportError:
# it appears pywin32 is not installed, so no need to exclude.
pass
class DirectorySandbox(AbstractSandbox):
"""Restrict operations to a single subdirectory - pseudo-chroot"""
write_ops = dict.fromkeys([
"open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
"utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
])
_exception_patterns = [
# Allow lib2to3 to attempt to save a pickled grammar object (#121)
'.*lib2to3.*\.pickle$',
]
"exempt writing to paths that match the pattern"
def __init__(self, sandbox, exceptions=_EXCEPTIONS):
self._sandbox = os.path.normcase(os.path.realpath(sandbox))
self._prefix = os.path.join(self._sandbox,'')
self._exceptions = [
os.path.normcase(os.path.realpath(path))
for path in exceptions
]
AbstractSandbox.__init__(self)
def _violation(self, operation, *args, **kw):
from setuptools.sandbox import SandboxViolation
raise SandboxViolation(operation, args, kw)
if _file:
def _file(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("file", path, mode, *args, **kw)
return _file(path,mode,*args,**kw)
def _open(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("open", path, mode, *args, **kw)
return _open(path,mode,*args,**kw)
def tmpnam(self):
self._violation("tmpnam")
def _ok(self, path):
active = self._active
try:
self._active = False
realpath = os.path.normcase(os.path.realpath(path))
return (
self._exempted(realpath)
or realpath == self._sandbox
or realpath.startswith(self._prefix)
)
finally:
self._active = active
def _exempted(self, filepath):
start_matches = (
filepath.startswith(exception)
for exception in self._exceptions
)
pattern_matches = (
re.match(pattern, filepath)
for pattern in self._exception_patterns
)
candidates = itertools.chain(start_matches, pattern_matches)
return any(candidates)
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
if operation in self.write_ops and not self._ok(path):
self._violation(operation, os.path.realpath(path), *args, **kw)
return path
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
if not self._ok(src) or not self._ok(dst):
self._violation(operation, src, dst, *args, **kw)
return (src,dst)
def open(self, file, flags, mode=0o777, *args, **kw):
"""Called for low-level os.open()"""
if flags & WRITE_FLAGS and not self._ok(file):
self._violation("os.open", file, flags, mode, *args, **kw)
return _os.open(file,flags,mode, *args, **kw)
WRITE_FLAGS = functools.reduce(
operator.or_, [getattr(_os, a, 0) for a in
"O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
)
class SandboxViolation(DistutilsError):
"""A setup script attempted to modify the filesystem outside the sandbox"""
def __str__(self):
return """SandboxViolation: %s%r %s
The package setup script has attempted to modify files on your system
that are not within the EasyInstall build area, and has been aborted.
This package cannot be safely installed by EasyInstall, and may not
support alternate installation locations even if you run its setup
script by hand. Please inform the package's author and the EasyInstall
maintainers to find out if a fix or workaround is available.""" % self.args
#
| mit |
XiaosongWei/chromium-crosswalk | tools/telemetry/telemetry/internal/browser/browser_credentials.py | 9 | 5610 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
from telemetry.core import util
from telemetry.internal.backends import codepen_credentials_backend
from telemetry.internal.backends import facebook_credentials_backend
from telemetry.internal.backends import google_credentials_backend
from telemetry.testing import options_for_unittests
class CredentialsError(Exception):
"""Error that can be thrown when logging in."""
class BrowserCredentials(object):
def __init__(self, backends=None):
self._credentials = {}
self._credentials_path = None
self._extra_credentials = {}
if backends is None:
backends = [
codepen_credentials_backend.CodePenCredentialsBackend(),
facebook_credentials_backend.FacebookCredentialsBackend(),
facebook_credentials_backend.FacebookCredentialsBackend2(),
google_credentials_backend.GoogleCredentialsBackend(),
google_credentials_backend.GoogleCredentialsBackend2()]
self._backends = {}
for backend in backends:
self._backends[backend.credentials_type] = backend
def AddBackend(self, backend):
assert backend.credentials_type not in self._backends
self._backends[backend.credentials_type] = backend
def IsLoggedIn(self, credentials_type):
if credentials_type not in self._backends:
raise CredentialsError(
'Unrecognized credentials type: %s', credentials_type)
if credentials_type not in self._credentials:
return False
return self._backends[credentials_type].IsLoggedIn()
def CanLogin(self, credentials_type):
if credentials_type not in self._backends:
raise CredentialsError(
'Unrecognized credentials type: %s', credentials_type)
return credentials_type in self._credentials
def LoginNeeded(self, tab, credentials_type):
if credentials_type not in self._backends:
raise CredentialsError(
'Unrecognized credentials type: %s', credentials_type)
if credentials_type not in self._credentials:
return False
from telemetry.page import action_runner
runner = action_runner.ActionRunner(tab)
return self._backends[credentials_type].LoginNeeded(
tab, runner, self._credentials[credentials_type])
def LoginNoLongerNeeded(self, tab, credentials_type):
assert credentials_type in self._backends
self._backends[credentials_type].LoginNoLongerNeeded(tab)
@property
def credentials_path(self):
return self._credentials_path
@credentials_path.setter
def credentials_path(self, credentials_path):
self._credentials_path = credentials_path
self._RebuildCredentials()
def Add(self, credentials_type, data):
if credentials_type not in self._extra_credentials:
self._extra_credentials[credentials_type] = {}
for k, v in data.items():
assert k not in self._extra_credentials[credentials_type]
self._extra_credentials[credentials_type][k] = v
self._RebuildCredentials()
def _ResetLoggedInState(self):
"""Makes the backends think we're not logged in even though we are.
Should only be used in unit tests to simulate --dont-override-profile.
"""
for backend in self._backends.keys():
# pylint: disable=protected-access
self._backends[backend]._ResetLoggedInState()
def _RebuildCredentials(self):
credentials = {}
if self._credentials_path == None:
pass
elif os.path.exists(self._credentials_path):
with open(self._credentials_path, 'r') as f:
credentials = json.loads(f.read())
# TODO(nduca): use system keychain, if possible.
homedir_credentials_path = os.path.expanduser('~/.telemetry-credentials')
homedir_credentials = {}
if (not options_for_unittests.GetCopy() and
os.path.exists(homedir_credentials_path)):
logging.info("Found ~/.telemetry-credentials. Its contents will be used "
"when no other credentials can be found.")
with open(homedir_credentials_path, 'r') as f:
homedir_credentials = json.loads(f.read())
self._credentials = {}
all_keys = set(credentials.keys()).union(
homedir_credentials.keys()).union(
self._extra_credentials.keys())
for k in all_keys:
if k in credentials:
self._credentials[k] = credentials[k]
if k in homedir_credentials:
logging.info("Will use ~/.telemetry-credentials for %s logins." % k)
self._credentials[k] = homedir_credentials[k]
if k in self._extra_credentials:
self._credentials[k] = self._extra_credentials[k]
def WarnIfMissingCredentials(self, page):
if page.credentials and not self.CanLogin(page.credentials):
files_to_tweak = []
if page.credentials_path:
files_to_tweak.append(page.credentials_path)
files_to_tweak.append('~/.telemetry-credentials')
example_credentials_file = os.path.join(
util.GetTelemetryDir(), 'examples', 'credentials_example.json')
logging.warning("""
Credentials for %s were not found. page %s will not be tested.
To fix this, either follow the instructions to authenticate to gsutil
here:
http://www.chromium.org/developers/telemetry/upload_to_cloud_storage,
or add your own credentials to:
%s
An example credentials file you can copy from is here:
%s\n""" % (page.credentials, page, ' or '.join(files_to_tweak),
example_credentials_file))
| bsd-3-clause |
GaZ3ll3/scikit-image | doc/release/contribs.py | 24 | 1448 | #!/usr/bin/env python
import subprocess
import sys
import string
import shlex
if len(sys.argv) != 2:
print("Usage: ./contributors.py tag-of-previous-release")
sys.exit(-1)
tag = sys.argv[1]
def call(cmd):
return subprocess.check_output(shlex.split(cmd), universal_newlines=True).split('\n')
tag_date = call("git show --format='%%ci' %s" % tag)[0]
print("Release %s was on %s\n" % (tag, tag_date))
merges = call("git log --since='%s' --merges --format='>>>%%B' --reverse" % tag_date)
merges = [m for m in merges if m.strip()]
merges = '\n'.join(merges).split('>>>')
merges = [m.split('\n')[:2] for m in merges]
merges = [m for m in merges if len(m) == 2 and m[1].strip()]
num_commits = call("git rev-list %s..HEAD --count" % tag)[0]
print("A total of %s changes have been committed.\n" % num_commits)
print("It contained the following %d merges:\n" % len(merges))
for (merge, message) in merges:
if merge.startswith('Merge pull request #'):
PR = ' (%s)' % merge.split()[3]
else:
PR = ''
print('- ' + message + PR)
print("\nMade by the following committers [alphabetical by last name]:\n")
authors = call("git log --since='%s' --format=%%aN" % tag_date)
authors = [a.strip() for a in authors if a.strip()]
def key(author):
author = [v for v in author.split() if v[0] in string.ascii_letters]
return author[-1]
authors = sorted(set(authors), key=key)
for a in authors:
print('- ' + a)
| bsd-3-clause |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/pandas/core/series.py | 1 | 89595 | """
Data structure for 1-dimensional cross-sectional and time series data
"""
from __future__ import division
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
import types
import warnings
from numpy import nan, ndarray
import numpy as np
import numpy.ma as ma
from pandas.core.common import (isnull, notnull, is_bool_indexer,
_default_index, _maybe_upcast,
_asarray_tuplesafe, _infer_dtype_from_scalar,
is_list_like, _values_from_object,
_possibly_cast_to_datetime, _possibly_castable,
_possibly_convert_platform, _try_sort,
ABCSparseArray, _maybe_match_name, _coerce_to_dtype,
_ensure_object, SettingWithCopyError,
_maybe_box_datetimelike, ABCDataFrame)
from pandas.core.index import (Index, MultiIndex, InvalidIndexError,
_ensure_index)
from pandas.core.indexing import check_bool_indexer, maybe_convert_indices
from pandas.core import generic, base
from pandas.core.internals import SingleBlockManager
from pandas.core.categorical import Categorical, CategoricalAccessor
from pandas.tseries.common import (maybe_to_datetimelike,
CombinedDatetimelikeProperties)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex, Period
from pandas import compat
from pandas.util.terminal import get_terminal_size
from pandas.compat import zip, u, OrderedDict, StringIO
import pandas.core.ops as ops
from pandas.core.algorithms import select_n
import pandas.core.common as com
import pandas.core.datetools as datetools
import pandas.core.format as fmt
import pandas.core.nanops as nanops
from pandas.util.decorators import Appender, cache_readonly
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from numpy import percentile as _quantile
from pandas.core.config import get_option
__all__ = ['Series']
_shared_doc_kwargs = dict(
axes='index',
klass='Series',
axes_single_arg="{0, 'index'}",
inplace="""inplace : boolean, default False
If True, performs operation inplace and returns None.""",
duplicated='Series'
)
def _coerce_method(converter):
""" install the scalar coercion methods """
def wrapper(self):
if len(self) == 1:
return converter(self.iloc[0])
raise TypeError(
"cannot convert the series to {0}".format(str(converter)))
return wrapper
#----------------------------------------------------------------------
# Series class
class Series(base.IndexOpsMixin, generic.NDFrame):
"""
One-dimensional ndarray with axis labels (including time series).
Labels need not be unique but must be any hashable type. The object
supports both integer- and label-based indexing and provides a host of
methods for performing operations involving the index. Statistical
methods from ndarray have been overridden to automatically exclude
missing data (currently represented as NaN)
Operations between Series (+, -, /, *, **) align values based on their
associated index values-- they need not be the same length. The result
index will be the sorted union of the two indexes.
Parameters
----------
data : array-like, dict, or scalar value
Contains data stored in Series
index : array-like or Index (1d)
Values must be unique and hashable, same length as data. Index
object (or other iterable of same length as data) Will default to
np.arange(len(data)) if not provided. If both a dict and index
sequence are used, the index will override the keys found in the
dict.
dtype : numpy.dtype or None
If None, dtype will be inferred
copy : boolean, default False
Copy input data
"""
_metadata = ['name']
_accessors = frozenset(['dt', 'cat', 'str'])
_allow_index_ops = True
def __init__(self, data=None, index=None, dtype=None, name=None,
copy=False, fastpath=False):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
if index is None:
index = data.index
else:
if index is not None:
index = _ensure_index(index)
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, MultiIndex):
raise NotImplementedError("initializing a Series from a "
"MultiIndex is not supported")
elif isinstance(data, Index):
# need to copy to avoid aliasing issues
if name is None:
name = data.name
data = data._to_embed(keep_tz=True)
copy = True
elif isinstance(data, np.ndarray):
pass
elif isinstance(data, Series):
if name is None:
name = data.name
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
data = data._data
elif isinstance(data, dict):
if index is None:
if isinstance(data, OrderedDict):
index = Index(data)
else:
index = Index(_try_sort(data))
try:
if isinstance(index, DatetimeIndex):
# coerce back to datetime objects for lookup
data = lib.fast_multiget(data, index.astype('O'),
default=np.nan)
elif isinstance(index, PeriodIndex):
data = [data.get(i, nan) for i in index]
else:
data = lib.fast_multiget(data, index.values,
default=np.nan)
except TypeError:
data = [data.get(i, nan) for i in index]
elif isinstance(data, SingleBlockManager):
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
elif isinstance(data, Categorical):
if dtype is not None:
raise ValueError("cannot specify a dtype with a Categorical")
if name is None:
name = data.name
elif (isinstance(data, types.GeneratorType) or
(compat.PY3 and isinstance(data, map))):
data = list(data)
elif isinstance(data, (set, frozenset)):
raise TypeError("{0!r} type is unordered"
"".format(data.__class__.__name__))
else:
# handle sparse passed here (and force conversion)
if isinstance(data, ABCSparseArray):
data = data.to_dense()
if index is None:
if not is_list_like(data):
data = [data]
index = _default_index(len(data))
# create/copy the manager
if isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype=dtype, raise_on_error=False)
elif copy:
data = data.copy()
else:
data = _sanitize_array(data, index, dtype, copy,
raise_cast_failure=True)
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data, fastpath=True)
object.__setattr__(self, 'name', name)
self._set_axis(0, index, fastpath=True)
@classmethod
def from_array(cls, arr, index=None, name=None, dtype=None, copy=False,
fastpath=False):
# return a sparse series here
if isinstance(arr, ABCSparseArray):
from pandas.sparse.series import SparseSeries
cls = SparseSeries
return cls(arr, index=index, name=name, dtype=dtype, copy=copy, fastpath=fastpath)
@property
def _constructor(self):
return Series
@property
def _constructor_expanddim(self):
from pandas.core.frame import DataFrame
return DataFrame
# types
@property
def _can_hold_na(self):
return self._data._can_hold_na
@property
def is_time_series(self):
return self._subtyp in ['time_series', 'sparse_time_series']
_index = None
def _set_axis(self, axis, labels, fastpath=False):
""" override generic, we want to set the _typ here """
if not fastpath:
labels = _ensure_index(labels)
is_all_dates = labels.is_all_dates
if is_all_dates:
if not isinstance(labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
labels = DatetimeIndex(labels)
# need to set here becuase we changed the index
if fastpath:
self._data.set_axis(axis, labels)
self._set_subtyp(is_all_dates)
object.__setattr__(self, '_index', labels)
if not fastpath:
self._data.set_axis(axis, labels)
def _set_subtyp(self, is_all_dates):
if is_all_dates:
object.__setattr__(self, '_subtyp', 'time_series')
else:
object.__setattr__(self, '_subtyp', 'series')
def _update_inplace(self, result, **kwargs):
# we want to call the generic version and not the IndexOpsMixin
return generic.NDFrame._update_inplace(self, result, **kwargs)
# ndarray compatibility
@property
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@property
def dtypes(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@property
def ftype(self):
""" return if the data is sparse|dense """
return self._data.ftype
@property
def ftypes(self):
""" return if the data is sparse|dense """
return self._data.ftype
@property
def values(self):
"""
Return Series as ndarray
Returns
-------
arr : numpy.ndarray
"""
return self._data.values
def get_values(self):
""" same as values (but handles sparseness conversions); is a view """
return self._data.get_values()
# ops
def ravel(self, order='C'):
"""
Return the flattened underlying data as an ndarray
See also
--------
numpy.ndarray.ravel
"""
return self.values.ravel(order=order)
def compress(self, condition, axis=0, out=None, **kwargs):
"""
Return selected slices of an array along given axis as a Series
See also
--------
numpy.ndarray.compress
"""
return self[condition]
def nonzero(self):
"""
Return the indices of the elements that are non-zero
This method is equivalent to calling `numpy.nonzero` on the
series data. For compatability with NumPy, the return value is
the same (a tuple with an array of indices for each dimension),
but it will always be a one-item tuple because series only have
one dimension.
Examples
--------
>>> s = pd.Series([0, 3, 0, 4])
>>> s.nonzero()
(array([1, 3]),)
>>> s.iloc[s.nonzero()[0]]
1 3
3 4
dtype: int64
See Also
--------
numpy.nonzero
"""
return self.values.nonzero()
def put(self, *args, **kwargs):
"""
return a ndarray with the values put
See also
--------
numpy.ndarray.put
"""
self.values.put(*args, **kwargs)
def __len__(self):
"""
return the length of the Series
"""
return len(self._data)
def view(self, dtype=None):
return self._constructor(self.values.view(dtype),
index=self.index).__finalize__(self)
def __array__(self, result=None):
"""
the array interface, return my values
"""
return self.get_values()
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
return self._constructor(result, index=self.index,
copy=False).__finalize__(self)
def __array_prepare__(self, result, context=None):
"""
Gets called prior to a ufunc
"""
# nice error message for non-ufunc types
if context is not None and not isinstance(self.values, np.ndarray):
obj = context[1][0]
raise TypeError("{obj} with dtype {dtype} cannot perform "
"the numpy op {op}".format(obj=type(obj).__name__,
dtype=getattr(obj,'dtype',None),
op=context[0].__name__))
return result
# complex
@property
def real(self):
return self.values.real
@real.setter
def real(self, v):
self.values.real = v
@property
def imag(self):
return self.values.imag
@imag.setter
def imag(self, v):
self.values.imag = v
# coercion
__float__ = _coerce_method(float)
__long__ = _coerce_method(int)
__int__ = _coerce_method(int)
# we are preserving name here
def __getstate__(self):
return dict(_data=self._data, name=self.name)
def _unpickle_series_compat(self, state):
if isinstance(state, dict):
self._data = state['_data']
self.name = state['name']
self.index = self._data.index
elif isinstance(state, tuple):
# < 0.12 series pickle
nd_state, own_state = state
# recreate the ndarray
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
# backwards compat
index, name = own_state[0], None
if len(own_state) > 1:
name = own_state[1]
# recreate
self._data = SingleBlockManager(data, index, fastpath=True)
self._index = index
self.name = name
else:
raise Exception("cannot unpickle legacy formats -> [%s]" % state)
# indexers
@property
def axes(self):
return [self.index]
def _ixs(self, i, axis=0):
"""
Return the i-th value or values in the Series by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Series (slice, sequence)
"""
try:
# dispatch to the values if we need
values = self.values
if isinstance(values, np.ndarray):
return _index.get_value_at(values, i)
else:
return values[i]
except IndexError:
raise
except:
if isinstance(i, slice):
indexer = self.index._convert_slice_indexer(i, kind='iloc')
return self._get_values(indexer)
else:
label = self.index[i]
if isinstance(label, Index):
return self.take(i, axis=axis, convert=True)
else:
return _index.get_value_at(self, i)
@property
def _is_mixed_type(self):
return False
def _slice(self, slobj, axis=0, kind=None):
slobj = self.index._convert_slice_indexer(slobj, kind=kind or 'getitem')
return self._get_values(slobj)
def __getitem__(self, key):
try:
result = self.index.get_value(self, key)
if not np.isscalar(result):
if is_list_like(result) and not isinstance(result, Series):
# we need to box if we have a non-unique index here
# otherwise have inline ndarray/lists
if not self.index.is_unique:
result = self._constructor(result,
index=[key]*len(result)
,dtype=self.dtype).__finalize__(self)
return result
except InvalidIndexError:
pass
except (KeyError, ValueError):
if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
# kludge
pass
elif key is Ellipsis:
return self
elif is_bool_indexer(key):
pass
else:
# we can try to coerce the indexer (or this will raise)
new_key = self.index._convert_scalar_indexer(key,kind='getitem')
if type(new_key) != type(key):
return self.__getitem__(new_key)
raise
except Exception:
raise
if com.is_iterator(key):
key = list(key)
if is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
return self._get_with(key)
def _get_with(self, key):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind='getitem')
return self._get_values(indexer)
elif isinstance(key, ABCDataFrame):
raise TypeError('Indexing a Series with DataFrame is not supported, '\
'use the appropriate DataFrame column')
else:
if isinstance(key, tuple):
try:
return self._get_values_tuple(key)
except:
if len(key) == 1:
key = key[0]
if isinstance(key, slice):
return self._get_values(key)
raise
# pragma: no cover
if not isinstance(key, (list, np.ndarray, Series, Index)):
key = list(key)
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key)
if key_type == 'integer':
if self.index.is_integer() or self.index.is_floating():
return self.reindex(key)
else:
return self._get_values(key)
elif key_type == 'boolean':
return self._get_values(key)
else:
try:
# handle the dup indexing case (GH 4246)
if isinstance(key, (list, tuple)):
return self.ix[key]
return self.reindex(key)
except Exception:
# [slice(0, 5, None)] will break if you convert to ndarray,
# e.g. as requested by np.median
# hack
if isinstance(key[0], slice):
return self._get_values(key)
raise
def _get_values_tuple(self, key):
# mpl hackaround
if any(k is None for k in key):
return self._get_values(key)
if not isinstance(self.index, MultiIndex):
raise ValueError('Can only tuple-index with a MultiIndex')
# If key is contained, would have returned by now
indexer, new_index = self.index.get_loc_level(key)
return self._constructor(self.values[indexer],
index=new_index).__finalize__(self)
def _get_values(self, indexer):
try:
return self._constructor(self._data.get_slice(indexer),
fastpath=True).__finalize__(self)
except Exception:
return self.values[indexer]
def __setitem__(self, key, value):
def setitem(key, value):
try:
self._set_with_engine(key, value)
return
except (SettingWithCopyError):
raise
except (KeyError, ValueError):
values = self.values
if (com.is_integer(key)
and not self.index.inferred_type == 'integer'):
values[key] = value
return
elif key is Ellipsis:
self[:] = value
return
elif is_bool_indexer(key):
pass
elif com.is_timedelta64_dtype(self.dtype):
# reassign a null value to iNaT
if isnull(value):
value = tslib.iNaT
try:
self.index._engine.set_value(self.values, key, value)
return
except (TypeError):
pass
self.loc[key] = value
return
except TypeError as e:
if isinstance(key, tuple) and not isinstance(self.index,
MultiIndex):
raise ValueError("Can only tuple-index with a MultiIndex")
# python 3 type errors should be raised
if 'unorderable' in str(e): # pragma: no cover
raise IndexError(key)
if is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
try:
self.where(~key, value, inplace=True)
return
except (InvalidIndexError):
pass
self._set_with(key, value)
# do the setitem
cacher_needs_updating = self._check_is_chained_assignment_possible()
setitem(key, value)
if cacher_needs_updating:
self._maybe_update_cacher()
def _set_with_engine(self, key, value):
values = self.values
try:
self.index._engine.set_value(values, key, value)
return
except KeyError:
values[self.index.get_loc(key)] = value
return
def _set_with(self, key, value):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind='getitem')
return self._set_values(indexer, value)
else:
if isinstance(key, tuple):
try:
self._set_values(key, value)
except Exception:
pass
if not isinstance(key, (list, Series, np.ndarray, Series)):
try:
key = list(key)
except:
key = [ key ]
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key)
if key_type == 'integer':
if self.index.inferred_type == 'integer':
self._set_labels(key, value)
else:
return self._set_values(key, value)
elif key_type == 'boolean':
self._set_values(key.astype(np.bool_), value)
else:
self._set_labels(key, value)
def _set_labels(self, key, value):
if isinstance(key, Index):
key = key.values
else:
key = _asarray_tuplesafe(key)
indexer = self.index.get_indexer(key)
mask = indexer == -1
if mask.any():
raise ValueError('%s not contained in the index'
% str(key[mask]))
self._set_values(indexer, value)
def _set_values(self, key, value):
if isinstance(key, Series):
key = key.values
self._data = self._data.setitem(indexer=key, value=value)
self._maybe_update_cacher()
# help out SparseSeries
_get_val_at = ndarray.__getitem__
def repeat(self, reps):
"""
return a new Series with the values repeated reps times
See also
--------
numpy.ndarray.repeat
"""
new_index = self.index.repeat(reps)
new_values = self.values.repeat(reps)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def reshape(self, *args, **kwargs):
"""
return an ndarray with the values shape
if the specified shape matches exactly the current shape, then
return self (for compat)
See also
--------
numpy.ndarray.take
"""
if len(args) == 1 and hasattr(args[0], '__iter__'):
shape = args[0]
else:
shape = args
if tuple(shape) == self.shape:
# XXX ignoring the "order" keyword.
return self
return self.values.reshape(shape, **kwargs)
iget_value = _ixs
iget = _ixs
irow = _ixs
def get_value(self, label, takeable=False):
"""
Quickly retrieve single value at passed index label
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
if takeable is True:
return _maybe_box_datetimelike(self.values[label])
return self.index.get_value(self.values, label)
def set_value(self, label, value, takeable=False):
"""
Quickly set single value at passed label. If label is not contained, a
new object is created with the label placed at the end of the result
index
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed
value : object
Scalar value
takeable : interpret the index as indexers, default False
Returns
-------
series : Series
If label is contained, will be reference to calling Series,
otherwise a new object
"""
try:
if takeable:
self.values[label] = value
else:
self.index._engine.set_value(self.values, label, value)
return self
except KeyError:
# set using a non-recursive method
self.loc[label] = value
return self
def reset_index(self, level=None, drop=False, name=None, inplace=False):
"""
Analogous to the :meth:`pandas.DataFrame.reset_index` function, see
docstring there.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns
name : object, default None
The name of the column corresponding to the Series values
inplace : boolean, default False
Modify the Series in place (do not create a new object)
Returns
----------
resetted : DataFrame, or Series if drop == True
"""
if drop:
new_index = np.arange(len(self))
if level is not None and isinstance(self.index, MultiIndex):
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < len(self.index.levels):
new_index = self.index.droplevel(level)
if inplace:
self.index = new_index
# set name if it was passed, otherwise, keep the previous name
self.name = name or self.name
else:
return self._constructor(self.values.copy(),
index=new_index).__finalize__(self)
elif inplace:
raise TypeError('Cannot reset_index inplace on a Series '
'to create a DataFrame')
else:
df = self.to_frame(name)
return df.reset_index(level=level, drop=drop)
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
width, height = get_terminal_size()
max_rows = (height if get_option("display.max_rows") == 0
else get_option("display.max_rows"))
self.to_string(buf=buf, name=self.name, dtype=self.dtype,
max_rows=max_rows)
result = buf.getvalue()
return result
def _repr_footer(self):
namestr = u("Name: %s, ") % com.pprint_thing(
self.name) if self.name is not None else ""
# time series
if self.is_time_series:
if self.index.freq is not None:
freqstr = u('Freq: %s, ') % self.index.freqstr
else:
freqstr = u('')
return u('%s%sLength: %d') % (freqstr, namestr, len(self))
# Categorical
if com.is_categorical_dtype(self.dtype):
level_info = self.values._repr_categories_info()
return u('%sLength: %d, dtype: %s\n%s') % (namestr,
len(self),
str(self.dtype.name),
level_info)
# reg series
return u('%sLength: %d, dtype: %s') % (namestr,
len(self),
str(self.dtype.name))
def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True,
length=False, dtype=False, name=False, max_rows=None):
"""
Render a string representation of the Series
Parameters
----------
buf : StringIO-like, optional
buffer to write to
na_rep : string, optional
string representation of NAN to use, default 'NaN'
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats
default None
header: boolean, default True
Add the Series header (index name)
length : boolean, default False
Add the Series length
dtype : boolean, default False
Add the Series dtype
name : boolean, default False
Add the Series name if not None
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
Returns
-------
formatted : string (if not buffer passed)
"""
the_repr = self._get_repr(float_format=float_format, na_rep=na_rep,
header=header, length=length, dtype=dtype,
name=name, max_rows=max_rows)
# catch contract violations
if not isinstance(the_repr, compat.text_type):
raise AssertionError("result must be of type unicode, type"
" of result is {0!r}"
"".format(the_repr.__class__.__name__))
if buf is None:
return the_repr
else:
try:
buf.write(the_repr)
except AttributeError:
with open(buf, 'w') as f:
f.write(the_repr)
def _get_repr(
self, name=False, header=True, length=True, dtype=True, na_rep='NaN',
float_format=None, max_rows=None):
"""
Internal function, should always return unicode string
"""
formatter = fmt.SeriesFormatter(self, name=name,
length=length, header=header,
dtype=dtype,
na_rep=na_rep,
float_format=float_format,
max_rows=max_rows)
result = formatter.to_string()
# TODO: following check prob. not neces.
if not isinstance(result, compat.text_type):
raise AssertionError("result must be of type unicode, type"
" of result is {0!r}"
"".format(result.__class__.__name__))
return result
def __iter__(self):
if com.is_categorical_dtype(self.dtype):
return iter(self.values)
elif np.issubdtype(self.dtype, np.datetime64):
return (lib.Timestamp(x) for x in self.values)
elif np.issubdtype(self.dtype, np.timedelta64):
return (lib.Timedelta(x) for x in self.values)
else:
return iter(self.values)
def iteritems(self):
"""
Lazily iterate over (index, value) tuples
"""
return zip(iter(self.index), iter(self))
if compat.PY3: # pragma: no cover
items = iteritems
#----------------------------------------------------------------------
# Misc public methods
def keys(self):
"Alias for index"
return self.index
def tolist(self):
""" Convert Series to a nested list """
return list(self)
def to_dict(self):
"""
Convert Series to {label -> value} dict
Returns
-------
value_dict : dict
"""
return dict(compat.iteritems(self))
def to_frame(self, name=None):
"""
Convert Series to DataFrame
Parameters
----------
name : object, default None
The passed name should substitute for the series name (if it has
one).
Returns
-------
data_frame : DataFrame
"""
if name is None:
df = self._constructor_expanddim(self)
else:
df = self._constructor_expanddim({name: self})
return df
def to_sparse(self, kind='block', fill_value=None):
"""
Convert Series to SparseSeries
Parameters
----------
kind : {'block', 'integer'}
fill_value : float, defaults to NaN (missing)
Returns
-------
sp : SparseSeries
"""
from pandas.core.sparse import SparseSeries
return SparseSeries(self, kind=kind,
fill_value=fill_value).__finalize__(self)
#----------------------------------------------------------------------
# Statistics, overridden ndarray methods
# TODO: integrate bottleneck
def count(self, level=None):
"""
Return number of non-NA/null observations in the Series
Parameters
----------
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a smaller Series
Returns
-------
nobs : int or Series (if level specified)
"""
if level is not None:
mask = notnull(self.values)
if isinstance(level, compat.string_types):
level = self.index._get_level_number(level)
level_index = self.index.levels[level]
if len(self) == 0:
return self._constructor(0, index=level_index)\
.__finalize__(self)
# call cython function
max_bin = len(level_index)
labels = com._ensure_int64(self.index.labels[level])
counts = lib.count_level_1d(mask.view(np.uint8),
labels, max_bin)
return self._constructor(counts,
index=level_index).__finalize__(self)
return notnull(_values_from_object(self)).sum()
def mode(self):
"""Returns the mode(s) of the dataset.
Empty if nothing occurs at least 2 times. Always returns Series even
if only one value.
Parameters
----------
sort : bool, default True
If True, will lexicographically sort values, if False skips
sorting. Result ordering when ``sort=False`` is not defined.
Returns
-------
modes : Series (sorted)
"""
# TODO: Add option for bins like value_counts()
from pandas.core.algorithms import mode
return mode(self)
@Appender(base._shared_docs['drop_duplicates'] % _shared_doc_kwargs)
def drop_duplicates(self, take_last=False, inplace=False):
return super(Series, self).drop_duplicates(take_last=take_last,
inplace=inplace)
@Appender(base._shared_docs['duplicated'] % _shared_doc_kwargs)
def duplicated(self, take_last=False):
return super(Series, self).duplicated(take_last=take_last)
def idxmin(self, axis=None, out=None, skipna=True):
"""
Index of first occurrence of minimum of values.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values
Returns
-------
idxmin : Index of minimum of values
Notes
-----
This method is the Series version of ``ndarray.argmin``.
See Also
--------
DataFrame.idxmin
numpy.ndarray.argmin
"""
i = nanops.nanargmin(_values_from_object(self), skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
def idxmax(self, axis=None, out=None, skipna=True):
"""
Index of first occurrence of maximum of values.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values
Returns
-------
idxmax : Index of maximum of values
Notes
-----
This method is the Series version of ``ndarray.argmax``.
See Also
--------
DataFrame.idxmax
numpy.ndarray.argmax
"""
i = nanops.nanargmax(_values_from_object(self), skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
# ndarray compat
argmin = idxmin
argmax = idxmax
@Appender(np.ndarray.round.__doc__)
def round(self, decimals=0, out=None):
"""
"""
result = _values_from_object(self).round(decimals, out=out)
if out is None:
result = self._constructor(result,
index=self.index).__finalize__(self)
return result
def quantile(self, q=0.5):
"""
Return value at the given quantile, a la numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
Returns
-------
quantile : float or Series
if ``q`` is an array, a Series will be returned where the
index is ``q`` and the values are the quantiles.
Examples
--------
>>> s = Series([1, 2, 3, 4])
>>> s.quantile(.5)
2.5
>>> s.quantile([.25, .5, .75])
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64
"""
valid = self.dropna()
def multi(values, qs):
if com.is_list_like(qs):
return Series([_quantile(values, x*100)
for x in qs], index=qs)
else:
return _quantile(values, qs*100)
return self._maybe_box(lambda values: multi(values, q), dropna=True)
def ptp(self, axis=None, out=None):
return _values_from_object(self).ptp(axis, out)
def corr(self, other, method='pearson',
min_periods=None):
"""
Compute correlation with `other` Series, excluding missing values
Parameters
----------
other : Series
method : {'pearson', 'kendall', 'spearman'}
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations needed to have a valid result
Returns
-------
correlation : float
"""
this, other = self.align(other, join='inner', copy=False)
if len(this) == 0:
return np.nan
return nanops.nancorr(this.values, other.values, method=method,
min_periods=min_periods)
def cov(self, other, min_periods=None):
"""
Compute covariance with Series, excluding missing values
Parameters
----------
other : Series
min_periods : int, optional
Minimum number of observations needed to have a valid result
Returns
-------
covariance : float
Normalized by N-1 (unbiased estimator).
"""
this, other = self.align(other, join='inner', copy=False)
if len(this) == 0:
return np.nan
return nanops.nancov(this.values, other.values,
min_periods=min_periods)
def diff(self, periods=1):
"""
1st discrete difference of object
Parameters
----------
periods : int, default 1
Periods to shift for forming difference
Returns
-------
diffed : Series
"""
result = com.diff(_values_from_object(self), periods)
return self._constructor(result, index=self.index).__finalize__(self)
def autocorr(self, lag=1):
"""
Lag-N autocorrelation
Parameters
----------
lag : int, default 1
Number of lags to apply before performing autocorrelation.
Returns
-------
autocorr : float
"""
return self.corr(self.shift(lag))
def dot(self, other):
"""
Matrix multiplication with DataFrame or inner-product with Series
objects
Parameters
----------
other : Series or DataFrame
Returns
-------
dot_product : scalar or Series
"""
from pandas.core.frame import DataFrame
if isinstance(other, (Series, DataFrame)):
common = self.index.union(other.index)
if (len(common) > len(self.index) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(index=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[0] != rvals.shape[0]:
raise Exception('Dot product shape mismatch, %s vs %s' %
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals),
index=other.columns).__finalize__(self)
elif isinstance(other, Series):
return np.dot(lvals, rvals)
elif isinstance(rvals, np.ndarray):
return np.dot(lvals, rvals)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
def searchsorted(self, v, side='left', sorter=None):
"""Find indices where elements should be inserted to maintain order.
Find the indices into a sorted Series `self` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `self` would be preserved.
Parameters
----------
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
Series.sort
Series.order
numpy.searchsorted
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> x = pd.Series([1, 2, 3])
>>> x
0 1
1 2
2 3
dtype: int64
>>> x.searchsorted(4)
array([3])
>>> x.searchsorted([0, 4])
array([0, 3])
>>> x.searchsorted([1, 3], side='left')
array([0, 2])
>>> x.searchsorted([1, 3], side='right')
array([1, 3])
>>> x.searchsorted([1, 2], side='right', sorter=[0, 2, 1])
array([1, 3])
"""
if sorter is not None:
sorter = com._ensure_platform_int(sorter)
return self.values.searchsorted(Series(v).values, side=side,
sorter=sorter)
#------------------------------------------------------------------------------
# Combination
def append(self, to_append, verify_integrity=False):
"""
Concatenate two or more Series. The indexes must not overlap
Parameters
----------
to_append : Series or list/tuple of Series
verify_integrity : boolean, default False
If True, raise Exception on creating index with duplicates
Returns
-------
appended : Series
"""
from pandas.tools.merge import concat
if isinstance(to_append, (list, tuple)):
to_concat = [self] + to_append
else:
to_concat = [self, to_append]
return concat(to_concat, ignore_index=False,
verify_integrity=verify_integrity)
def _binop(self, other, func, level=None, fill_value=None):
"""
Perform generic binary operation with optional fill value
Parameters
----------
other : Series
func : binary operator
fill_value : float or object
Value to substitute for NA/null values. If both Series are NA in a
location, the result will be NA regardless of the passed fill value
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level
Returns
-------
combined : Series
"""
if not isinstance(other, Series):
raise AssertionError('Other operand must be Series')
new_index = self.index
this = self
if not self.index.equals(other.index):
this, other = self.align(other, level=level, join='outer', copy=False)
new_index = this.index
this_vals = this.values
other_vals = other.values
if fill_value is not None:
this_mask = isnull(this_vals)
other_mask = isnull(other_vals)
this_vals = this_vals.copy()
other_vals = other_vals.copy()
# one but not both
mask = this_mask ^ other_mask
this_vals[this_mask & mask] = fill_value
other_vals[other_mask & mask] = fill_value
result = func(this_vals, other_vals)
name = _maybe_match_name(self, other)
return self._constructor(result, index=new_index).__finalize__(self)
def combine(self, other, func, fill_value=nan):
"""
Perform elementwise binary operation on two Series using given function
with optional fill value when an index is missing from one Series or
the other
Parameters
----------
other : Series or scalar value
func : function
fill_value : scalar value
Returns
-------
result : Series
"""
if isinstance(other, Series):
new_index = self.index.union(other.index)
new_name = _maybe_match_name(self, other)
new_values = np.empty(len(new_index), dtype=self.dtype)
for i, idx in enumerate(new_index):
lv = self.get(idx, fill_value)
rv = other.get(idx, fill_value)
new_values[i] = func(lv, rv)
else:
new_index = self.index
new_values = func(self.values, other)
new_name = self.name
return self._constructor(new_values, index=new_index, name=new_name)
def combine_first(self, other):
"""
Combine Series values, choosing the calling Series's values
first. Result index will be the union of the two indexes
Parameters
----------
other : Series
Returns
-------
y : Series
"""
new_index = self.index.union(other.index)
this = self.reindex(new_index, copy=False)
other = other.reindex(new_index, copy=False)
name = _maybe_match_name(self, other)
rs_vals = com._where_compat(isnull(this), other.values, this.values)
return self._constructor(rs_vals, index=new_index).__finalize__(self)
def update(self, other):
"""
Modify Series in place using non-NA values from passed
Series. Aligns on index
Parameters
----------
other : Series
"""
other = other.reindex_like(self)
mask = notnull(other)
self._data = self._data.putmask(mask=mask, new=other, inplace=True)
self._maybe_update_cacher()
#----------------------------------------------------------------------
# Reindexing, sorting
def sort_index(self, ascending=True):
"""
Sort object by labels (along an axis)
Parameters
----------
ascending : boolean or list, default True
Sort ascending vs. descending. Specify list for multiple sort
orders
Examples
--------
>>> result1 = s.sort_index(ascending=False)
>>> result2 = s.sort_index(ascending=[1, 0])
Returns
-------
sorted_obj : Series
"""
index = self.index
if isinstance(index, MultiIndex):
from pandas.core.groupby import _lexsort_indexer
indexer = _lexsort_indexer(index.labels, orders=ascending)
indexer = com._ensure_platform_int(indexer)
new_labels = index.take(indexer)
else:
new_labels, indexer = index.order(return_indexer=True,
ascending=ascending)
new_values = self.values.take(indexer)
return self._constructor(new_values,
index=new_labels).__finalize__(self)
def argsort(self, axis=0, kind='quicksort', order=None):
"""
Overrides ndarray.argsort. Argsorts the value, omitting NA/null values,
and places the result in the same locations as the non-NA values
Parameters
----------
axis : int (can only be zero)
kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See np.sort for more
information. 'mergesort' is the only stable algorithm
order : ignored
Returns
-------
argsorted : Series, with -1 indicated where nan values are present
See also
--------
numpy.ndarray.argsort
"""
values = self.values
mask = isnull(values)
if mask.any():
result = Series(
-1, index=self.index, name=self.name, dtype='int64')
notmask = ~mask
result[notmask] = np.argsort(values[notmask], kind=kind)
return self._constructor(result,
index=self.index).__finalize__(self)
else:
return self._constructor(
np.argsort(values, kind=kind), index=self.index,
dtype='int64').__finalize__(self)
def rank(self, method='average', na_option='keep', ascending=True,
pct=False):
"""
Compute data ranks (1 through n). Equal values are assigned a rank that
is the average of the ranks of those values
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
na_option : {'keep'}
keep: leave NA values where they are
ascending : boolean, default True
False for ranks by high (1) to low (N)
pct : boolean, default False
Computes percentage rank of data
Returns
-------
ranks : Series
"""
from pandas.core.algorithms import rank
ranks = rank(self.values, method=method, na_option=na_option,
ascending=ascending, pct=pct)
return self._constructor(ranks, index=self.index).__finalize__(self)
def sort(self, axis=0, ascending=True, kind='quicksort', na_position='last', inplace=True):
"""
Sort values and index labels by value. This is an inplace sort by default.
Series.order is the equivalent but returns a new Series.
Parameters
----------
axis : int (can only be zero)
ascending : boolean, default True
Sort ascending. Passing False sorts descending
kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See np.sort for more
information. 'mergesort' is the only stable algorithm
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
inplace : boolean, default True
Do operation in place.
See Also
--------
Series.order
"""
return self.order(ascending=ascending,
kind=kind,
na_position=na_position,
inplace=inplace)
def order(self, na_last=None, ascending=True, kind='quicksort', na_position='last', inplace=False):
"""
Sorts Series object, by value, maintaining index-value link.
This will return a new Series by default. Series.sort is the equivalent but as an inplace method.
Parameters
----------
na_last : boolean (optional, default=True) (DEPRECATED; use na_position)
Put NaN's at beginning or end
ascending : boolean, default True
Sort ascending. Passing False sorts descending
kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See np.sort for more
information. 'mergesort' is the only stable algorithm
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
inplace : boolean, default False
Do operation in place.
Returns
-------
y : Series
See Also
--------
Series.sort
"""
# GH 5856/5853
if inplace and self._is_cached:
raise ValueError("This Series is a view of some other array, to "
"sort in-place you must create a copy")
if na_last is not None:
warnings.warn(("na_last is deprecated. Please use na_position instead"),
FutureWarning)
na_position = 'last' if na_last else 'first'
def _try_kind_sort(arr):
# easier to ask forgiveness than permission
try:
# if kind==mergesort, it can fail for object dtype
return arr.argsort(kind=kind)
except TypeError:
# stable sort not available for object dtype
# uses the argsort default quicksort
return arr.argsort(kind='quicksort')
arr = self.values
sortedIdx = np.empty(len(self), dtype=np.int32)
bad = isnull(arr)
good = ~bad
idx = np.arange(len(self))
argsorted = _try_kind_sort(arr[good])
if not ascending:
argsorted = argsorted[::-1]
if na_position == 'last':
n = good.sum()
sortedIdx[:n] = idx[good][argsorted]
sortedIdx[n:] = idx[bad]
elif na_position == 'first':
n = bad.sum()
sortedIdx[n:] = idx[good][argsorted]
sortedIdx[:n] = idx[bad]
else:
raise ValueError('invalid na_position: {!r}'.format(na_position))
result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx])
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self)
def nlargest(self, n=5, take_last=False):
"""Return the largest `n` elements.
Parameters
----------
n : int
Return this many descending sorted values
take_last : bool
Where there are duplicate values, take the last duplicate
Returns
-------
top_n : Series
The n largest values in the Series, in sorted order
Notes
-----
Faster than ``.order(ascending=False).head(n)`` for small `n` relative
to the size of the ``Series`` object.
See Also
--------
Series.nsmallest
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> s = pd.Series(np.random.randn(1e6))
>>> s.nlargest(10) # only sorts up to the N requested
"""
return select_n(self, n=n, take_last=take_last, method='nlargest')
def nsmallest(self, n=5, take_last=False):
"""Return the smallest `n` elements.
Parameters
----------
n : int
Return this many ascending sorted values
take_last : bool
Where there are duplicate values, take the last duplicate
Returns
-------
bottom_n : Series
The n smallest values in the Series, in sorted order
Notes
-----
Faster than ``.order().head(n)`` for small `n` relative to
the size of the ``Series`` object.
See Also
--------
Series.nlargest
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> s = pd.Series(np.random.randn(1e6))
>>> s.nsmallest(10) # only sorts up to the N requested
"""
return select_n(self, n=n, take_last=take_last, method='nsmallest')
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
Sort Series with MultiIndex by chosen level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order)
Parameters
----------
level : int or level name, default None
ascending : bool, default True
Returns
-------
sorted : Series
"""
if not isinstance(self.index, MultiIndex):
raise TypeError('can only sort by level with a hierarchical index')
new_index, indexer = self.index.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
new_values = self.values.take(indexer)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def swaplevel(self, i, j, copy=True):
"""
Swap levels i and j in a MultiIndex
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : Series
"""
new_index = self.index.swaplevel(i, j)
return self._constructor(self.values, index=new_index,
copy=copy).__finalize__(self)
def reorder_levels(self, order):
"""
Rearrange index levels using input order. May not drop or duplicate
levels
Parameters
----------
order: list of int representing new level order.
(reference level by number or key)
axis: where to reorder levels
Returns
-------
type of caller (new object)
"""
if not isinstance(self.index, MultiIndex): # pragma: no cover
raise Exception('Can only reorder levels on a hierarchical axis.')
result = self.copy()
result.index = result.index.reorder_levels(order)
return result
def unstack(self, level=-1):
"""
Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame.
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default last level
Level(s) to unstack, can pass level name
Examples
--------
>>> s
one a 1.
one b 2.
two a 3.
two b 4.
>>> s.unstack(level=-1)
a b
one 1. 2.
two 3. 4.
>>> s.unstack(level=0)
one two
a 1. 2.
b 3. 4.
Returns
-------
unstacked : DataFrame
"""
from pandas.core.reshape import unstack
return unstack(self, level)
#----------------------------------------------------------------------
# function application
def map(self, arg, na_action=None):
"""
Map values of Series using input correspondence (which can be
a dict, Series, or function)
Parameters
----------
arg : function, dict, or Series
na_action : {None, 'ignore'}
If 'ignore', propagate NA values
Examples
--------
>>> x
one 1
two 2
three 3
>>> y
1 foo
2 bar
3 baz
>>> x.map(y)
one foo
two bar
three baz
Returns
-------
y : Series
same index as caller
"""
values = self.values
if com.is_datetime64_dtype(values.dtype):
values = lib.map_infer(values, lib.Timestamp)
if na_action == 'ignore':
mask = isnull(values)
def map_f(values, f):
return lib.map_infer_mask(values, f, mask.view(np.uint8))
else:
map_f = lib.map_infer
if isinstance(arg, (dict, Series)):
if isinstance(arg, dict):
arg = self._constructor(arg, index=arg.keys())
indexer = arg.index.get_indexer(values)
new_values = com.take_1d(arg.values, indexer)
return self._constructor(new_values,
index=self.index).__finalize__(self)
else:
mapped = map_f(values, arg)
return self._constructor(mapped,
index=self.index).__finalize__(self)
def apply(self, func, convert_dtype=True, args=(), **kwds):
"""
Invoke function on values of Series. Can be ufunc (a NumPy function
that applies to the entire Series) or a Python function that only works
on single values
Parameters
----------
func : function
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results. If
False, leave as dtype=object
args : tuple
Positional arguments to pass to function in addition to the value
Additional keyword arguments will be passed as keywords to the function
See also
--------
Series.map: For element-wise operations
Returns
-------
y : Series or DataFrame if func returns a Series
"""
if len(self) == 0:
return self._constructor(dtype=self.dtype,
index=self.index).__finalize__(self)
if kwds or args and not isinstance(func, np.ufunc):
f = lambda x: func(x, *args, **kwds)
else:
f = func
if isinstance(f, np.ufunc):
return f(self)
values = _values_from_object(self)
if com.is_datetime64_dtype(values.dtype):
values = lib.map_infer(values, lib.Timestamp)
mapped = lib.map_infer(values, f, convert=convert_dtype)
if len(mapped) and isinstance(mapped[0], Series):
from pandas.core.frame import DataFrame
return DataFrame(mapped.tolist(), index=self.index)
else:
return self._constructor(mapped,
index=self.index).__finalize__(self)
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
"""
perform a reduction operation
if we have an ndarray as a value, then simply perform the operation,
otherwise delegate to the object
"""
delegate = self.values
if isinstance(delegate, np.ndarray):
# Validate that 'axis' is consistent with Series's single axis.
self._get_axis_number(axis)
if numeric_only:
raise NotImplementedError(
'Series.{0} does not implement numeric_only.'.format(name))
return op(delegate, skipna=skipna, **kwds)
return delegate._reduce(op=op, name=name, axis=axis, skipna=skipna,
numeric_only=numeric_only,
filter_type=filter_type, **kwds)
def _maybe_box(self, func, dropna=False):
"""
evaluate a function with possible input/output conversion if we are i8
Parameters
----------
dropna : bool, default False
whether to drop values if necessary
"""
if dropna:
values = self.dropna().values
else:
values = self.values
if com.needs_i8_conversion(self):
boxer = com.i8_boxer(self)
if len(values) == 0:
return boxer(tslib.iNaT)
values = values.view('i8')
result = func(values)
if com.is_list_like(result):
result = result.map(boxer)
else:
result = boxer(result)
else:
# let the function return nan if appropriate
if dropna:
if len(values) == 0:
return np.nan
result = func(values)
return result
def _reindex_indexer(self, new_index, indexer, copy):
if indexer is None:
if copy:
return self.copy()
return self
# be subclass-friendly
new_values = com.take_1d(self.get_values(), indexer)
return self._constructor(new_values, index=new_index)
def _needs_reindex_multi(self, axes, method, level):
""" check if we do need a multi reindex; this is for compat with
higher dims
"""
return False
@Appender(generic._shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, index=None, **kwargs):
return super(Series, self).rename(index=index, **kwargs)
@Appender(generic._shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, index=None, **kwargs):
return super(Series, self).reindex(index=index, **kwargs)
@Appender(generic._shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(Series, self).fillna(value=value, method=method,
axis=axis, inplace=inplace,
limit=limit, downcast=downcast,
**kwargs)
@Appender(generic._shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0, **kwargs):
return super(Series, self).shift(periods=periods, freq=freq,
axis=axis, **kwargs)
def reindex_axis(self, labels, axis=0, **kwargs):
""" for compatibility with higher dims """
if axis != 0:
raise ValueError("cannot reindex series on non-zero axis!")
return self.reindex(index=labels, **kwargs)
def take(self, indices, axis=0, convert=True, is_copy=False):
"""
return Series corresponding to requested indices
Parameters
----------
indices : list / array of ints
convert : translate negative to positive indices (default)
Returns
-------
taken : Series
See also
--------
numpy.ndarray.take
"""
# check/convert indicies here
if convert:
indices = maybe_convert_indices(
indices, len(self._get_axis(axis)))
indices = com._ensure_platform_int(indices)
new_index = self.index.take(indices)
new_values = self.values.take(indices)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def isin(self, values):
"""
Return a boolean :class:`~pandas.Series` showing whether each element
in the :class:`~pandas.Series` is exactly contained in the passed
sequence of ``values``.
Parameters
----------
values : list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
``list`` of one element.
Returns
-------
isin : Series (bool dtype)
Raises
------
TypeError
* If ``values`` is a string
See Also
--------
pandas.DataFrame.isin
Examples
--------
>>> s = pd.Series(list('abc'))
>>> s.isin(['a', 'c', 'e'])
0 True
1 False
2 True
dtype: bool
Passing a single string as ``s.isin('a')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['a'])
0 True
1 False
2 False
dtype: bool
"""
if not com.is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to Series.isin(), you passed a "
"{0!r}".format(type(values).__name__))
# may need i8 conversion for proper membership testing
comps = _values_from_object(self)
if com.is_datetime64_dtype(self):
from pandas.tseries.tools import to_datetime
values = Series(to_datetime(values)).values.view('i8')
comps = comps.view('i8')
elif com.is_timedelta64_dtype(self):
from pandas.tseries.timedeltas import to_timedelta
values = Series(to_timedelta(values)).values.view('i8')
comps = comps.view('i8')
value_set = set(values)
result = lib.ismember(comps, value_set)
return self._constructor(result, index=self.index).__finalize__(self)
def between(self, left, right, inclusive=True):
"""
Return boolean Series equivalent to left <= series <= right. NA values
will be treated as False
Parameters
----------
left : scalar
Left boundary
right : scalar
Right boundary
Returns
-------
is_between : Series
"""
if inclusive:
lmask = self >= left
rmask = self <= right
else:
lmask = self > left
rmask = self < right
return lmask & rmask
@classmethod
def from_csv(cls, path, sep=',', parse_dates=True, header=None,
index_col=0, encoding=None, infer_datetime_format=False):
"""
Read delimited file into Series
Parameters
----------
path : string file path or file handle / StringIO
sep : string, default ','
Field delimiter
parse_dates : boolean, default True
Parse dates. Different default from read_table
header : int, default 0
Row to use at header (skip prior rows)
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
encoding : string, optional
a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
Returns
-------
y : Series
"""
from pandas.core.frame import DataFrame
df = DataFrame.from_csv(path, header=header, index_col=index_col,
sep=sep, parse_dates=parse_dates,
encoding=encoding,
infer_datetime_format=infer_datetime_format)
result = df.icol(0)
result.index.name = result.name = None
return result
def to_csv(self, path, index=True, sep=",", na_rep='',
float_format=None, header=False,
index_label=None, mode='w', nanRep=None, encoding=None,
date_format=None, decimal='.'):
"""
Write Series to a comma-separated values (csv) file
Parameters
----------
path : string file path or file handle / StringIO. If None is provided
the result is returned as a string.
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
header : boolean, default False
Write out series name
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
mode : Python write mode, default 'w'
sep : character, default ","
Field delimiter for the output file.
encoding : string, optional
a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
date_format: string, default None
Format string for datetime objects.
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for European data
"""
from pandas.core.frame import DataFrame
df = DataFrame(self)
# result is only a string if no path provided, otherwise None
result = df.to_csv(path, index=index, sep=sep, na_rep=na_rep,
float_format=float_format, header=header,
index_label=index_label, mode=mode, nanRep=nanRep,
encoding=encoding, date_format=date_format, decimal=decimal)
if path is None:
return result
def dropna(self, axis=0, inplace=False, **kwargs):
"""
Return Series without null values
Returns
-------
valid : Series
inplace : boolean, default False
Do operation in place.
"""
kwargs.pop('how', None)
if kwargs:
raise TypeError('dropna() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
axis = self._get_axis_number(axis or 0)
result = remove_na(self)
if inplace:
self._update_inplace(result)
else:
return result
valid = lambda self, inplace=False, **kwargs: self.dropna(inplace=inplace,
**kwargs)
def first_valid_index(self):
"""
Return label for first non-NA/null value
"""
if len(self) == 0:
return None
mask = isnull(self.values)
i = mask.argmin()
if mask[i]:
return None
else:
return self.index[i]
def last_valid_index(self):
"""
Return label for last non-NA/null value
"""
if len(self) == 0:
return None
mask = isnull(self.values[::-1])
i = mask.argmin()
if mask[i]:
return None
else:
return self.index[len(self) - i - 1]
#----------------------------------------------------------------------
# Time series-oriented methods
def asof(self, where):
"""
Return last good (non-NaN) value in TimeSeries if value is NaN for
requested date.
If there is no good value, NaN is returned.
Parameters
----------
where : date or array of dates
Notes
-----
Dates are assumed to be sorted
Returns
-------
value or NaN
"""
if isinstance(where, compat.string_types):
where = datetools.to_datetime(where)
values = self.values
if not hasattr(where, '__iter__'):
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq).ordinal
start = start.ordinal
if where < start:
return np.nan
loc = self.index.searchsorted(where, side='right')
if loc > 0:
loc -= 1
while isnull(values[loc]) and loc > 0:
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where)
locs = self.index.asof_locs(where, notnull(values))
new_values = com.take_1d(values, locs)
return self._constructor(new_values, index=where).__finalize__(self)
def to_timestamp(self, freq=None, how='start', copy=True):
"""
Cast to datetimeindex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
Returns
-------
ts : TimeSeries with DatetimeIndex
"""
new_values = self.values
if copy:
new_values = new_values.copy()
new_index = self.index.to_timestamp(freq=freq, how=how)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def to_period(self, freq=None, copy=True):
"""
Convert TimeSeries from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
Returns
-------
ts : TimeSeries with PeriodIndex
"""
new_values = self.values
if copy:
new_values = new_values.copy()
new_index = self.index.to_period(freq=freq)
return self._constructor(new_values,
index=new_index).__finalize__(self)
#------------------------------------------------------------------------------
# Datetimelike delegation methods
def _make_dt_accessor(self):
try:
return maybe_to_datetimelike(self)
except Exception:
raise AttributeError("Can only use .dt accessor with datetimelike "
"values")
dt = base.AccessorProperty(CombinedDatetimelikeProperties, _make_dt_accessor)
#------------------------------------------------------------------------------
# Categorical methods
def _make_cat_accessor(self):
if not com.is_categorical_dtype(self.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
return CategoricalAccessor(self.values, self.index)
cat = base.AccessorProperty(CategoricalAccessor, _make_cat_accessor)
def _dir_deletions(self):
return self._accessors
def _dir_additions(self):
rv = set()
# these accessors are mutually exclusive, so break loop when one exists
for accessor in self._accessors:
try:
getattr(self, accessor)
rv.add(accessor)
break
except AttributeError:
pass
return rv
Series._setup_axes(['index'], info_axis=0, stat_axis=0,
aliases={'rows': 0})
Series._add_numeric_operations()
_INDEX_TYPES = ndarray, Index, list, tuple
#------------------------------------------------------------------------------
# Supplementary functions
def remove_na(series):
"""
Return series containing only true/non-NaN values, possibly empty.
"""
return series[notnull(_values_from_object(series))]
def _sanitize_index(data, index, copy=False):
""" sanitize an index type to return an ndarray of the underlying, pass thru a non-Index """
if len(data) != len(index):
raise ValueError('Length of values does not match length of '
'index')
if isinstance(data, PeriodIndex):
data = data.asobject
elif isinstance(data, DatetimeIndex):
data = data._to_embed(keep_tz=True)
if copy:
data = data.copy()
elif isinstance(data, np.ndarray):
# coerce datetimelike types
if data.dtype.kind in ['M','m']:
data = _sanitize_array(data, index, copy=copy)
return data
def _sanitize_array(data, index, dtype=None, copy=False,
raise_cast_failure=False):
""" sanitize input data to an ndarray, copy if specified, coerce to the dtype if specified """
if dtype is not None:
dtype = _coerce_to_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = _maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
def _try_cast(arr, take_fast_path):
# perf shortcut as this is the most common case
if take_fast_path:
if _possibly_castable(arr) and not copy and dtype is None:
return arr
try:
arr = _possibly_cast_to_datetime(arr, dtype)
subarr = np.array(arr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
if com.is_categorical_dtype(dtype):
subarr = Categorical(arr)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
# GH #846
if isinstance(data, (np.ndarray, Index, Series)):
if dtype is not None:
subarr = np.array(data, copy=False)
# possibility of nan -> garbage
if com.is_float_dtype(data.dtype) and com.is_integer_dtype(dtype):
if not isnull(data).any():
subarr = _try_cast(data, True)
elif copy:
subarr = data.copy()
else:
if (com.is_datetime64_dtype(data.dtype) and
not com.is_datetime64_dtype(dtype)):
if dtype == object:
ints = np.asarray(data).view('i8')
subarr = tslib.ints_to_pydatetime(ints)
elif raise_cast_failure:
raise TypeError('Cannot cast datetime64 to %s' % dtype)
else:
subarr = _try_cast(data, True)
elif isinstance(data, Index):
# don't coerce Index types
# e.g. indexes can have different conversions (so don't fast path them)
# GH 6140
subarr = _sanitize_index(data, index, copy=True)
else:
subarr = _try_cast(data, True)
if copy:
subarr = data.copy()
elif isinstance(data, Categorical):
subarr = data
if copy:
subarr = data.copy()
return subarr
elif isinstance(data, list) and len(data) > 0:
if dtype is not None:
try:
subarr = _try_cast(data, False)
except Exception:
if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
else:
subarr = _possibly_convert_platform(data)
subarr = _possibly_cast_to_datetime(subarr, dtype)
else:
subarr = _try_cast(data, False)
# scalar like
if subarr.ndim == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = _infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
value = _possibly_cast_to_datetime(value, dtype)
subarr = np.empty(len(index), dtype=dtype)
subarr.fill(value)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
value = subarr[0]
subarr = np.empty(len(index), dtype=subarr.dtype)
subarr.fill(value)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise Exception('Data must be 1-dimensional')
else:
subarr = _asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, compat.string_types):
subarr = np.array(data, dtype=object, copy=copy)
return subarr
# backwards compatiblity
TimeSeries = Series
#----------------------------------------------------------------------
# Add plotting methods to Series
import pandas.tools.plotting as _gfx
Series.plot = _gfx.plot_series
Series.hist = _gfx.hist_series
# Add arithmetic!
ops.add_flex_arithmetic_methods(Series, **ops.series_flex_funcs)
ops.add_special_arithmetic_methods(Series, **ops.series_special_funcs)
| mit |
muchbeli/bitcoin-abe | Abe/Chain/Bitcoin.py | 29 | 1035 | # Copyright(C) 2014 by Abe developers.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/agpl.html>.
from .Sha256Chain import Sha256Chain
class Bitcoin(Sha256Chain):
def __init__(chain, **kwargs):
chain.name = 'Bitcoin'
chain.code3 = 'BTC'
chain.address_version = '\x00'
chain.script_addr_vers = '\x05'
chain.magic = '\xf9\xbe\xb4\xd9'
Sha256Chain.__init__(chain, **kwargs)
| agpl-3.0 |
inetCatapult/troposphere | examples/DynamoDB_Table_With_GSI_And_NonKeyAttributes_Projection.py | 22 | 4390 | #!/usr/bin/python
from troposphere import Template, Ref, Output, Parameter
from troposphere.dynamodb import (Key, AttributeDefinition,
ProvisionedThroughput, Projection)
from troposphere.dynamodb import Table, GlobalSecondaryIndex
template = Template()
template.add_description("Create a dynamodb table with a global secondary "
"index")
# N.B. If you remove the provisioning section this works for
# LocalSecondaryIndexes aswell.
readunits = template.add_parameter(Parameter(
"ReadCapacityUnits",
Description="Provisioned read throughput",
Type="Number",
Default="10",
MinValue="5",
MaxValue="10000",
ConstraintDescription="should be between 5 and 10000"
))
writeunits = template.add_parameter(Parameter(
"WriteCapacityUnits",
Description="Provisioned write throughput",
Type="Number",
Default="5",
MinValue="5",
MaxValue="10000",
ConstraintDescription="should be between 5 and 10000"
))
tableIndexName = template.add_parameter(Parameter(
"TableIndexName",
Description="Table: Primary Key Field",
Type="String",
Default="id",
AllowedPattern="[a-zA-Z0-9]*",
MinLength="1",
MaxLength="2048",
ConstraintDescription="must contain only alphanumberic characters"
))
tableIndexDataType = template.add_parameter(Parameter(
"TableIndexDataType",
Description=" Table: Primary Key Data Type",
Type="String",
Default="S",
AllowedPattern="[S|N|B]",
MinLength="1",
MaxLength="1",
ConstraintDescription="S for string data, N for numeric data, or B for "
"binary data"
))
secondaryIndexHashName = template.add_parameter(Parameter(
"SecondaryIndexHashName",
Description="Secondary Index: Primary Key Field",
Type="String",
Default="tokenType",
AllowedPattern="[a-zA-Z0-9]*",
MinLength="1",
MaxLength="2048",
ConstraintDescription="must contain only alphanumberic characters"
))
secondaryIndexHashDataType = template.add_parameter(Parameter(
"SecondaryIndexHashDataType",
Description="Secondary Index: Primary Key Data Type",
Type="String",
Default="S",
AllowedPattern="[S|N|B]",
MinLength="1",
MaxLength="1",
ConstraintDescription="S for string data, N for numeric data, or B for "
"binary data"
))
secondaryIndexRangeName = template.add_parameter(Parameter(
"refreshSecondaryIndexRangeName",
Description="Secondary Index: Range Key Field",
Type="String",
Default="tokenUpdatedTime",
AllowedPattern="[a-zA-Z0-9]*",
MinLength="1",
MaxLength="2048",
ConstraintDescription="must contain only alphanumberic characters"
))
secondaryIndexRangeDataType = template.add_parameter(Parameter(
"SecondaryIndexRangeDataType",
Description="Secondary Index: Range Key Data Type",
Type="String",
Default="S",
AllowedPattern="[S|N|B]",
MinLength="1",
MaxLength="1",
ConstraintDescription="S for string data, N for numeric data, or B for "
"binary data"
))
GSITable = template.add_resource(Table(
"GSITable",
AttributeDefinitions=[
AttributeDefinition(Ref(tableIndexName), Ref(tableIndexDataType)),
AttributeDefinition(Ref(secondaryIndexHashName),
Ref(secondaryIndexHashDataType)),
AttributeDefinition(Ref(secondaryIndexRangeName),
Ref(secondaryIndexRangeDataType)),
AttributeDefinition("non_key_attribute_0", "S"),
AttributeDefinition("non_key_attribute_1", "S")
],
KeySchema=[
Key(Ref(tableIndexName), "HASH")
],
ProvisionedThroughput=ProvisionedThroughput(
Ref(readunits),
Ref(writeunits)
),
GlobalSecondaryIndexes=[
GlobalSecondaryIndex(
"SecondaryIndex",
[
Key(Ref(secondaryIndexHashName), "HASH"),
Key(Ref(secondaryIndexRangeName), "RANGE")
],
Projection("INCLUDE", ["non_key_attribute_0"]),
ProvisionedThroughput(
Ref(readunits),
Ref(writeunits)
)
)
]
))
template.add_output(Output(
"GSITable",
Value=Ref(GSITable),
Description="Table with a Global Secondary Index",
))
print(template.to_json())
| bsd-2-clause |
ChameleonCloud/horizon | openstack_dashboard/dashboards/admin/group_types/tables.py | 2 | 4390 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.template import defaultfilters as filters
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import forms
from horizon import tables
from openstack_dashboard.api import cinder
class CreateGroupType(tables.LinkAction):
name = "create"
verbose_name = _("Create Group Type")
url = "horizon:admin:group_types:create_type"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("volume", "group:group_types_manage"),)
class EditGroupType(tables.LinkAction):
name = "edit"
verbose_name = _("Edit Group Type")
url = "horizon:admin:group_types:update_type"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("volume", "group:group_types_manage"),)
class GroupTypeSpecs(tables.LinkAction):
name = "specs"
verbose_name = _("View Specs")
url = "horizon:admin:group_types:specs:index"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("volume", "group:group_types_manage"),)
class GroupTypesFilterAction(tables.FilterAction):
def filter(self, table, group_types, filter_string):
"""Naive case-insensitive search."""
query = filter_string.lower()
return [group_type for group_type in group_types
if query in group_type.name.lower()]
class DeleteGroupType(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Group Type",
u"Delete Group Types",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Group Type",
u"Deleted Group Types",
count
)
policy_rules = (("volume", "group:group_types_manage"),)
def delete(self, request, obj_id):
try:
cinder.group_type_delete(request, obj_id)
except exceptions.BadRequest as e:
redirect_url = reverse("horizon:admin:group_types:index")
exceptions.handle(request, e, redirect=redirect_url)
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, group_type_id):
try:
group_type = \
cinder.group_type_get(request, group_type_id)
except Exception:
exceptions.handle(request,
_('Unable to retrieve group type.'))
return group_type
class GroupTypesTable(tables.DataTable):
name = tables.WrappingColumn("name", verbose_name=_("Name"),
form_field=forms.CharField(max_length=64))
description = tables.Column(lambda obj: getattr(obj, 'description', None),
verbose_name=_('Description'),
form_field=forms.CharField(
widget=forms.Textarea(attrs={'rows': 4}),
required=False))
public = tables.Column("is_public",
verbose_name=_("Public"),
filters=(filters.yesno, filters.capfirst),
form_field=forms.BooleanField(
label=_('Public'), required=False))
def get_object_display(self, group_type):
return group_type.name
def get_object_id(self, group_type):
return str(group_type.id)
class Meta(object):
name = "group_types"
verbose_name = _("Group Types")
table_actions = (
GroupTypesFilterAction,
CreateGroupType,
DeleteGroupType,
)
row_actions = (
GroupTypeSpecs,
EditGroupType,
DeleteGroupType
)
row_class = UpdateRow
| apache-2.0 |
jpetto/olympia | src/olympia/stats/management/commands/update_theme_popularity_movers.py | 7 | 3459 | import datetime
from django.core.management.base import BaseCommand
from django.db import connection
import commonware.log
from olympia.stats.models import (
ThemeUpdateCount, ThemeUpdateCountBulk, ThemeUserCount)
log = commonware.log.getLogger('adi.themepopularitymovers')
class Command(BaseCommand):
"""Compute the popularity and movers of themes from ADI data.
Usage:
./manage.py update_theme_popularity_movers
This will compute the popularity and movers of each theme, and store them
in the Persona associated.
Popularity: average number of users over the last 7 days.
Movers: (popularity - (last 21 days avg)) / (last 21 days avg)
"""
help = __doc__
def handle(self, *args, **options):
start = datetime.datetime.now() # Measure the time it takes to run.
# The theme_update_counts_from_* gather data for the day before, at
# best.
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
# Average number of users over the last 7 days (0 to 6 days ago), in
# a list of tuples (addon_id, persona_id, count)
last_week_avgs = ThemeUpdateCount.objects.get_range_days_avg(
yesterday - datetime.timedelta(days=6),
yesterday,
'addon__persona__id')
# Average number of users over the three weeks before last week
# (7 to 27 days ago), in dictionary form ({addon_id: count}).
prev_3_weeks_avgs = dict(ThemeUpdateCount.objects.get_range_days_avg(
yesterday - datetime.timedelta(days=27),
yesterday - datetime.timedelta(days=7)))
temp_update_counts = []
theme_user_counts = []
for addon_id, persona_id, popularity in last_week_avgs:
if not persona_id or not addon_id:
continue
# Create the temporary ThemeUpdateCountBulk for later bulk create.
prev_3_weeks_avg = prev_3_weeks_avgs.get(addon_id, 0)
theme_update_count_bulk = ThemeUpdateCountBulk(
persona_id=persona_id,
popularity=popularity,
movers=0)
# Set movers to 0 if values aren't high enough.
if popularity > 100 and prev_3_weeks_avg > 1:
theme_update_count_bulk.movers = (
popularity - prev_3_weeks_avg) / prev_3_weeks_avg
theme_user_count = ThemeUserCount(
addon_id=addon_id,
count=popularity,
date=today # ThemeUserCount date is the processing date.
)
temp_update_counts.append(theme_update_count_bulk)
theme_user_counts.append(theme_user_count)
# Create in bulk: this is much faster.
ThemeUpdateCountBulk.objects.all().delete() # Clean slate first.
ThemeUpdateCountBulk.objects.bulk_create(temp_update_counts, 100)
ThemeUserCount.objects.bulk_create(theme_user_counts, 100)
# Update Personas table in bulk from the above temp table: again, much
# faster.
raw_query = """
UPDATE personas p, theme_update_counts_bulk t
SET p.popularity=t.popularity,
p.movers=t.movers
WHERE t.persona_id=p.id
"""
cursor = connection.cursor()
cursor.execute(raw_query)
log.debug('Total processing time: %s' % (
datetime.datetime.now() - start))
| bsd-3-clause |
chebee7i/palettable | palettable/cubehelix/test/test_cubehelix.py | 3 | 3964 | # coding: utf-8
try:
import pytest
except ImportError:
raise ImportError('Tests require pytest >= 2.2.')
from ... import cubehelix
HAVE_NPY = cubehelix.cubehelix.HAVE_NPY
def test_print_maps(capsys):
# just make sure there are no errors
cubehelix.print_maps()
out, err = capsys.readouterr()
assert out
def test_get_map():
palette = cubehelix.get_map('CLASSIC_16')
assert palette.name == 'classic_16'
assert palette.type == 'sequential'
assert len(palette.colors) == 16
assert palette.url == 'http://adsabs.harvard.edu/abs/2011arXiv1108.5083G'
def test_get_map_bad_name():
with pytest.raises(KeyError):
cubehelix.get_map('bad name')
def test_get_map_reversed():
palette = cubehelix.get_map('classic_16', reverse=False)
palette_r = cubehelix.get_map('classic_16', reverse=True)
assert palette.colors == palette_r.colors[::-1]
@pytest.mark.skipif('not HAVE_NPY')
def test_make_map_reversed():
palette = cubehelix.Cubehelix.make(n=16, reverse=False)
palette_r = cubehelix.Cubehelix.make(n=16, reverse=True)
assert palette.colors == palette_r.colors[::-1]
def test_palettes_loaded():
assert isinstance(cubehelix.classic_16, cubehelix.Cubehelix)
assert isinstance(cubehelix.classic_16_r, cubehelix.Cubehelix)
@pytest.mark.skipif('not HAVE_NPY')
def test_default_is_classic():
classic_palette = cubehelix.get_map('classic_16')
default_palette = cubehelix.Cubehelix.make(n=16)
assert classic_palette.colors == default_palette.colors
@pytest.mark.skipif('not HAVE_NPY')
def test_classic():
palette = cubehelix.Cubehelix.make(start=0.5, rotation=-1.5, gamma=1.0,
sat=1.2, min_light=0., max_light=1.,
n=16)
assert palette.colors == cubehelix.get_map('classic_16').colors
@pytest.mark.skipif('not HAVE_NPY')
def test_perceptual_rainbow():
palette = cubehelix.Cubehelix.make(start_hue=240., end_hue=-300.,
min_sat=1., max_sat=2.5,
min_light=0.3, max_light=0.8, gamma=.9,
n=16)
assert palette.colors == cubehelix.get_map('perceptual_rainbow_16').colors
@pytest.mark.skipif('not HAVE_NPY')
def test_purple():
palette = cubehelix.Cubehelix.make(start=0., rotation=0.0, n=16)
assert palette.colors == cubehelix.get_map('purple_16').colors
@pytest.mark.skipif('not HAVE_NPY')
def test_jim_special():
palette = cubehelix.Cubehelix.make(start=0.3, rotation=-0.5, n=16)
assert palette.colors == cubehelix.get_map('jim_special_16').colors
@pytest.mark.skipif('not HAVE_NPY')
def test_red():
palette = cubehelix.Cubehelix.make(start=0., rotation=0.5, n=16)
assert palette.colors == cubehelix.get_map('red_16').colors
@pytest.mark.skipif('not HAVE_NPY')
def test_cubehelix1():
palette = cubehelix.Cubehelix.make(gamma=1.0, start=1.5,
rotation=-1.0, sat=1.5, n=16)
assert palette.colors == cubehelix.get_map('cubehelix1_16').colors
@pytest.mark.skipif('not HAVE_NPY')
def test_cubehelix2():
palette = cubehelix.Cubehelix.make(gamma=1.0, start=2.0, rotation=1.0,
sat=1.5, n=16)
assert palette.colors == cubehelix.get_map('cubehelix2_16').colors
@pytest.mark.skipif('not HAVE_NPY')
def test_cubehelix3():
palette = cubehelix.Cubehelix.make(gamma=1.0, start=2.0, rotation=1.0,
sat=3, n=16)
assert palette.colors == cubehelix.get_map('cubehelix3_16').colors
@pytest.mark.skipif('not HAVE_NPY')
def test_hex_color():
palette = cubehelix.Cubehelix.make(start=0.5, rotation=-1.5, gamma=1.0,
sat=1.2, min_light=0., max_light=1.,
n=16)
for color in palette.hex_colors:
assert 'L' not in color
| mit |
raymondnijssen/QGIS | python/plugins/processing/gui/CheckboxesPanel.py | 13 | 3950 | # -*- coding: utf-8 -*-
"""
***************************************************************************
CheckBoxesPanel.py
---------------------
Date : January 2015
Copyright : (C) 2015 by Arnaud Morvan
Email : arnaud dot morvan at camptocamp dot com
Contributors : Arnaud Morvan
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Arnaud Morvan'
__date__ = 'January 2015'
__copyright__ = '(C) 2015, Arnaud Morvan'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtWidgets import (
QCheckBox,
QRadioButton,
QGridLayout,
QButtonGroup,
QSizePolicy,
QSpacerItem,
QWidget,
QMenu,
QAction
)
from qgis.PyQt.QtGui import QCursor
class CheckboxesPanel(QWidget):
def __init__(self, options, multiple, columns=2, parent=None):
super(CheckboxesPanel, self).__init__(parent)
self._options = []
for i, option in enumerate(options):
if isinstance(option, str):
self._options.append((i, option))
else:
self.options.append(option)
self._multiple = multiple
self._buttons = []
rows = len(options) / columns
self._buttonGroup = QButtonGroup()
self._buttonGroup.setExclusive(not multiple)
layout = QGridLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setMargin(0)
for i, (v, t) in enumerate(self._options):
if multiple:
button = QCheckBox(t)
else:
button = QRadioButton(t)
self._buttons.append((v, button))
self._buttonGroup.addButton(button, i)
layout.addWidget(button, i % rows, i / rows)
layout.addItem(QSpacerItem(0, 0, QSizePolicy.Expanding, QSizePolicy.Minimum),
0, columns)
self.setLayout(layout)
if multiple:
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.showPopupMenu)
def showPopupMenu(self):
popup_menu = QMenu()
select_all_action = QAction(self.tr('Select All'), popup_menu)
select_all_action.triggered.connect(self.selectAll)
clear_all_action = QAction(self.tr('Clear Selection'), popup_menu)
clear_all_action.triggered.connect(self.deselectAll)
popup_menu.addAction(select_all_action)
popup_menu.addAction(clear_all_action)
popup_menu.exec_(QCursor.pos())
def selectAll(self):
for (v, button) in self._buttons:
button.setChecked(True)
def deselectAll(self):
for (v, button) in self._buttons:
button.setChecked(False)
def value(self):
if self._multiple:
value = []
for (v, checkbox) in self._buttons:
if checkbox.isChecked():
value.append(v)
return value
else:
return self._options[self._buttonGroup.checkedId()][0]
def setValue(self, value):
if self._multiple:
for (v, button) in self._buttons:
button.setChecked(v in value)
else:
for v, button in self._buttons:
button.setChecked(v == value)
| gpl-2.0 |
DaveA50/lbry | lbrynet/reflector/server/server.py | 1 | 5842 | import logging
from twisted.python import failure
from twisted.internet import error, defer
from twisted.internet.protocol import Protocol, ServerFactory
import json
from lbrynet.core.utils import is_valid_blobhash
log = logging.getLogger(__name__)
class ReflectorServer(Protocol):
def connectionMade(self):
peer_info = self.transport.getPeer()
log.debug('Connection made to %s', peer_info)
self.peer = self.factory.peer_manager.get_peer(peer_info.host, peer_info.port)
self.blob_manager = self.factory.blob_manager
self.received_handshake = False
self.peer_version = None
self.receiving_blob = False
self.incoming_blob = None
self.blob_write = None
self.blob_finished_d = None
self.cancel_write = None
self.request_buff = ""
def connectionLost(self, reason=failure.Failure(error.ConnectionDone())):
log.info("Reflector upload from %s finished" % self.peer.host)
def dataReceived(self, data):
if self.receiving_blob:
# log.debug('Writing data to blob')
self.blob_write(data)
else:
log.debug('Not yet recieving blob, data needs further processing')
self.request_buff += data
msg, extra_data = self._get_valid_response(self.request_buff)
if msg is not None:
self.request_buff = ''
d = self.handle_request(msg)
d.addCallbacks(self.send_response, self.handle_error)
if self.receiving_blob and extra_data:
log.debug('Writing extra data to blob')
self.blob_write(extra_data)
def _get_valid_response(self, response_msg):
extra_data = None
response = None
curr_pos = 0
while True:
next_close_paren = response_msg.find('}', curr_pos)
if next_close_paren != -1:
curr_pos = next_close_paren + 1
try:
response = json.loads(response_msg[:curr_pos])
except ValueError:
if curr_pos > 100:
raise Exception("error decoding response")
else:
pass
else:
extra_data = response_msg[curr_pos:]
break
else:
break
return response, extra_data
def handle_request(self, request_dict):
if self.received_handshake is False:
return self.handle_handshake(request_dict)
else:
return self.handle_normal_request(request_dict)
def handle_handshake(self, request_dict):
log.debug('Handling handshake')
if 'version' not in request_dict:
raise ValueError("Client should send version")
self.peer_version = int(request_dict['version'])
if self.peer_version != 0:
raise ValueError("I don't know that version!")
self.received_handshake = True
return defer.succeed({'version': 0})
def determine_blob_needed(self, blob):
if blob.is_validated():
return {'send_blob': False}
else:
self.incoming_blob = blob
self.blob_finished_d, self.blob_write, self.cancel_write = blob.open_for_writing(self.peer)
self.blob_finished_d.addCallback(lambda _ :self.blob_manager.blob_completed(blob))
return {'send_blob': True}
def close_blob(self):
self.blob_finished_d = None
self.blob_write = None
self.cancel_write = None
self.incoming_blob = None
self.receiving_blob = False
def handle_normal_request(self, request_dict):
if self.blob_write is None:
# we haven't opened a blob yet, meaning we must be waiting for the
# next message containing a blob hash and a length. this message
# should be it. if it's one we want, open the blob for writing, and
# return a nice response dict (in a Deferred) saying go ahead
if not 'blob_hash' in request_dict or not 'blob_size' in request_dict:
raise ValueError("Expected a blob hash and a blob size")
if not is_valid_blobhash(request_dict['blob_hash']):
raise ValueError("Got a bad blob hash: {}".format(request_dict['blob_hash']))
log.debug('Recieved info for blob: %s', request_dict['blob_hash'])
d = self.blob_manager.get_blob(
request_dict['blob_hash'],
True,
int(request_dict['blob_size'])
)
d.addCallback(self.determine_blob_needed)
else:
# we have a blob open already, so this message should have nothing
# important in it. to the deferred that fires when the blob is done,
# add a callback which returns a nice response dict saying to keep
# sending, and then return that deferred
log.debug('blob is already open')
self.receiving_blob = True
d = self.blob_finished_d
d.addCallback(lambda _: self.close_blob())
d.addCallback(lambda _: {'received_blob': True})
return d
def send_response(self, response_dict):
self.transport.write(json.dumps(response_dict))
def handle_error(self, err):
log.error(err.getTraceback())
self.transport.loseConnection()
class ReflectorServerFactory(ServerFactory):
protocol = ReflectorServer
def __init__(self, peer_manager, blob_manager):
self.peer_manager = peer_manager
self.blob_manager = blob_manager
def buildProtocol(self, addr):
log.debug('Creating a protocol for %s', addr)
return ServerFactory.buildProtocol(self, addr)
| mit |
LIAMF-USP/word2vec-TF | src/basic_experiment/experiments5_to_8.py | 1 | 2985 | """
EXPERIMENT 5, 6, 7, & 8
Experiment with the models gensim, and the official tensorflow implementation
with different window sizes.
We use both a corpus in portuguese as a corpus in english
with and without preprocessing.
"""
import os
from train_functions import train_both_models_with_different_window_sizes
import sys
import inspect
import subprocess
almost_current = os.path.abspath(inspect.getfile(inspect.currentframe()))
currentdir = os.path.dirname(almost_current)
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from utils import clean_text # noqa
en_path_to_raw_corpus = os.path.join('corpora', 'text8.txt')
pt_path_to_raw_corpus = os.path.join('corpora', 'pt96.txt')
en_path_to_corpus = os.path.join('corpora', 'text8CLEAN.txt')
pt_path_to_corpus = os.path.join('corpora', 'pt96CLEAN.txt')
en_condition = os.path.exists(en_path_to_raw_corpus)
pt_condition = os.path.exists(pt_path_to_raw_corpus)
if not (en_condition and pt_condition):
pro = subprocess.Popen(["bash", "download_corpora.sh"])
pro.wait()
if not os.path.exists(en_path_to_corpus):
clean_text(en_path_to_raw_corpus)
if not os.path.exists(pt_path_to_corpus):
clean_text(pt_path_to_raw_corpus)
pt_languageR = 'pR'
en_languageR = 'eR'
pt_languageC = 'pC'
en_languageC = 'eC'
window_size = 5
emb_size = 500
window_list = [2, 5, 10, 15, 20, 25]
epochs_to_train = 5
# EXPERIMENT 5: portuguese raw
train_both_models_with_different_window_sizes(pt_languageR,
emb_size,
window_list,
epochs_to_train,
pt_path_to_raw_corpus,
"experiment5")
# EXPERIMENT 6: english raw
train_both_models_with_different_window_sizes(en_languageR,
emb_size,
window_list,
epochs_to_train,
en_path_to_raw_corpus,
"experiment6")
# EXPERIMENT 7: portuguese clean
train_both_models_with_different_window_sizes(pt_languageC,
emb_size,
window_list,
epochs_to_train,
pt_path_to_corpus,
"experiment7")
# EXPERIMENT 8: english clean
train_both_models_with_different_window_sizes(en_languageC,
emb_size,
window_list,
epochs_to_train,
en_path_to_corpus,
"experiment8")
| mit |
MinFu/youtube-dl | test/test_aes.py | 124 | 2032 | #!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.aes import aes_decrypt, aes_encrypt, aes_cbc_decrypt, aes_decrypt_text
from youtube_dl.utils import bytes_to_intlist, intlist_to_bytes
import base64
# the encrypted data can be generate with 'devscripts/generate_aes_testdata.py'
class TestAES(unittest.TestCase):
def setUp(self):
self.key = self.iv = [0x20, 0x15] + 14 * [0]
self.secret_msg = b'Secret message goes here'
def test_encrypt(self):
msg = b'message'
key = list(range(16))
encrypted = aes_encrypt(bytes_to_intlist(msg), key)
decrypted = intlist_to_bytes(aes_decrypt(encrypted, key))
self.assertEqual(decrypted, msg)
def test_cbc_decrypt(self):
data = bytes_to_intlist(
b"\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd"
)
decrypted = intlist_to_bytes(aes_cbc_decrypt(data, self.key, self.iv))
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
def test_decrypt_text(self):
password = intlist_to_bytes(self.key).decode('utf-8')
encrypted = base64.b64encode(
intlist_to_bytes(self.iv[:8]) +
b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae'
).decode('utf-8')
decrypted = (aes_decrypt_text(encrypted, password, 16))
self.assertEqual(decrypted, self.secret_msg)
password = intlist_to_bytes(self.key).decode('utf-8')
encrypted = base64.b64encode(
intlist_to_bytes(self.iv[:8]) +
b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83'
).decode('utf-8')
decrypted = (aes_decrypt_text(encrypted, password, 32))
self.assertEqual(decrypted, self.secret_msg)
if __name__ == '__main__':
unittest.main()
| unlicense |
mkl-/scioncc | src/pyon/datastore/test/test_id_factory.py | 4 | 1063 | from pyon.util.int_test import IonIntegrationTestCase
from pyon.datastore.id_factory import IDFactory, SaltedTimeIDFactory
from nose.plugins.attrib import attr
@attr('UNIT', group='datastore')
class GeneratorTest(IonIntegrationTestCase):
def test_length(self):
subject = SaltedTimeIDFactory()
id = subject.create_id()
self.assertEqual(10, len(id))
sub2 = SaltedTimeIDFactory(salt_chars=5)
id = sub2.create_id()
self.assertEqual(12, len(id))
def test_increasing(self):
subject = SaltedTimeIDFactory()
id1 = subject.create_id()
for n in xrange(20):
id2 = subject.create_id()
self.assertTrue(id2>id1, msg='%s v %s'%(id1,id2))
id1=id2
def test_change_salt(self):
# use unusually large salt to make it
# nearly impossible 2 random salts will be equal
subject = SaltedTimeIDFactory(salt_chars=10)
id1 = subject.create_id()
id2 = subject.replace_duplicate()
self.assertTrue(id1[-10:]!=id2[-10:])
| bsd-2-clause |
BBVA/chaos-monkey-engine | test/unit/chaosmonkey/api/plans_blueprint_test.py | 1 | 2367 | from datetime import datetime, timedelta
from flask import url_for, json
from chaosmonkey.api.hal import Document
import test.attacks.attack1 as attack1_module
import test.planners.planner1 as planner1_module
valid_request_body = {
"name": "Test Planner",
"attack": {
"ref": "test.attacks.attack1:Attack1",
"args": {}
},
"planner": {
"ref": "test.planners.planner1:Planner1",
"args": {}
}
}
def test_empty_plans_return_hal(app):
url = url_for("plans.list_plans")
with app.test_request_context(url):
res = app.test_client().get(url)
assert res.status_code == 200
assert res.mimetype == "application/hal+json"
assert res.json == Document(data={"plans": []}).to_dict()
def test_plan_list_return_hal(app, manager):
url = url_for("plans.list_plans")
plan = manager.add_plan("plan name")
with app.test_request_context(url):
plan_list = [plan.to_dict()]
res = app.test_client().get(url)
assert res.status_code == 200
assert res.mimetype == "application/hal+json"
assert res.json == Document(data={"plans": plan_list}).to_dict()
def test_plan_get_return_hal_with_executors(app, manager):
plan = manager.add_plan("plan name")
run_time = datetime.now() + timedelta(hours=10)
executor = manager.add_executor(run_time, "executor name", {}, plan.id)
url = url_for("plans.get_plan", plan_id=plan.id)
with app.test_request_context(url):
res = app.test_client().get(url)
expected = Document(data=plan.to_dict(), embedded={"executors": [executor.to_dict()]}).to_dict()
assert res.status_code == 200
assert res.mimetype == "application/hal+json"
assert res.json == expected
def test_plan_add_valid_body(app, manager):
url = url_for("plans.add_plan")
# add a planner and an attack
manager.attacks_store.add(attack1_module)
manager.planners_store.add(planner1_module)
with app.test_request_context(url):
res = app.test_client().post(url_for("plans.add_plan"),
content_type="application/json",
data=json.dumps(valid_request_body))
assert res.status_code == 200
assert res.mimetype == "application/json"
assert res.json == {"msg": "ok"}
| apache-2.0 |
Galexrt/zulip | zerver/webhooks/taiga/view.py | 2 | 13294 | """Taiga integration for Zulip.
Tips for notification output:
*Emojis*: most of the events have specific emojis e.g.
- :notebook: - change of subject/name/description
- :chart_with_upwards_trend: - change of status
etc. If no there's no meaningful emoji for certain event, the defaults are used:
- :thought_balloon: - event connected to commenting
- :busts_in_silhouette: - event connected to a certain user
- :package: - all other events connected to user story
- :calendar: - all other events connected to milestones
- :clipboard: - all other events connected to tasks
- :bulb: - all other events connected to issues
*Text formatting*: if there has been a change of a property, the new
value should always be in bold; otherwise the subject of US/task
should be in bold.
"""
from typing import Any, Dict, List, Mapping, Optional, Tuple, Text
from django.utils.translation import ugettext as _
from django.http import HttpRequest, HttpResponse
from zerver.lib.actions import check_send_stream_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile
import ujson
from six.moves import range
@api_key_only_webhook_view('Taiga')
@has_request_variables
def api_taiga_webhook(request, user_profile, message=REQ(argument_type='body'),
stream=REQ(default='taiga'), topic=REQ(default='General')):
# type: (HttpRequest, UserProfile, Dict[str, Any], Text, Text) -> HttpResponse
parsed_events = parse_message(message)
content_lines = []
for event in parsed_events:
content_lines.append(generate_content(event) + '\n')
content = "".join(sorted(content_lines))
check_send_stream_message(user_profile, request.client, stream, topic, content)
return json_success()
templates = {
'epic': {
'create': u':package: %(user)s created epic **%(subject)s**',
'set_assigned_to': u':busts_in_silhouette: %(user)s assigned epic **%(subject)s** to %(new)s.',
'unset_assigned_to': u':busts_in_silhouette: %(user)s unassigned epic **%(subject)s**',
'changed_assigned_to': u':busts_in_silhouette: %(user)s reassigned epic **%(subject)s**'
' from %(old)s to %(new)s.',
'blocked': u':lock: %(user)s blocked epic **%(subject)s**',
'unblocked': u':unlock: %(user)s unblocked epic **%(subject)s**',
'changed_status': u':chart_increasing: %(user)s changed status of epic **%(subject)s**'
' from %(old)s to %(new)s.',
'renamed': u':notebook: %(user)s renamed epic from **%(old)s** to **%(new)s**',
'description_diff': u':notebook: %(user)s updated description of epic **%(subject)s**',
'commented': u':thought_balloon: %(user)s commented on epic **%(subject)s**',
'delete': u':cross_mark: %(user)s deleted epic **%(subject)s**',
},
'relateduserstory': {
'create': (u':package: %(user)s added a related user story '
u'**%(userstory_subject)s** to the epic **%(epic_subject)s**'),
'delete': (u':cross_mark: %(user)s removed a related user story ' +
u'**%(userstory_subject)s** from the epic **%(epic_subject)s**'),
},
'userstory': {
'create': u':package: %(user)s created user story **%(subject)s**.',
'set_assigned_to': u':busts_in_silhouette: %(user)s assigned user story **%(subject)s** to %(new)s.',
'unset_assigned_to': u':busts_in_silhouette: %(user)s unassigned user story **%(subject)s**.',
'changed_assigned_to': u':busts_in_silhouette: %(user)s reassigned user story **%(subject)s**'
' from %(old)s to %(new)s.',
'points': u':game_die: %(user)s changed estimation of user story **%(subject)s**.',
'blocked': u':lock: %(user)s blocked user story **%(subject)s**.',
'unblocked': u':unlock: %(user)s unblocked user story **%(subject)s**.',
'set_milestone': u':calendar: %(user)s added user story **%(subject)s** to sprint %(new)s.',
'unset_milestone': u':calendar: %(user)s removed user story **%(subject)s** from sprint %(old)s.',
'changed_milestone': u':calendar: %(user)s changed sprint of user story **%(subject)s** from %(old)s'
' to %(new)s.',
'changed_status': u':chart_with_upwards_trend: %(user)s changed status of user story **%(subject)s**'
' from %(old)s to %(new)s.',
'closed': u':checkered_flag: %(user)s closed user story **%(subject)s**.',
'reopened': u':package: %(user)s reopened user story **%(subject)s**.',
'renamed': u':notebook: %(user)s renamed user story from %(old)s to **%(new)s**.',
'description_diff': u':notebook: %(user)s updated description of user story **%(subject)s**.',
'commented': u':thought_balloon: %(user)s commented on user story **%(subject)s**.',
'delete': u':x: %(user)s deleted user story **%(subject)s**.'
},
'milestone': {
'create': u':calendar: %(user)s created sprint **%(subject)s**.',
'renamed': u':notebook: %(user)s renamed sprint from %(old)s to **%(new)s**.',
'estimated_start': u':calendar: %(user)s changed estimated start of sprint **%(subject)s**'
' from %(old)s to %(new)s.',
'estimated_finish': u':calendar: %(user)s changed estimated finish of sprint **%(subject)s**'
' from %(old)s to %(new)s.',
'delete': u':x: %(user)s deleted sprint **%(subject)s**.'
},
'task': {
'create': u':clipboard: %(user)s created task **%(subject)s**.',
'set_assigned_to': u':busts_in_silhouette: %(user)s assigned task **%(subject)s** to %(new)s.',
'unset_assigned_to': u':busts_in_silhouette: %(user)s unassigned task **%(subject)s**.',
'changed_assigned_to': u':busts_in_silhouette: %(user)s reassigned task **%(subject)s**'
' from %(old)s to %(new)s.',
'blocked': u':lock: %(user)s blocked task **%(subject)s**.',
'unblocked': u':unlock: %(user)s unblocked task **%(subject)s**.',
'set_milestone': u':calendar: %(user)s added task **%(subject)s** to sprint %(new)s.',
'changed_milestone': u':calendar: %(user)s changed sprint of task **%(subject)s** from %(old)s to %(new)s.',
'changed_status': u':chart_with_upwards_trend: %(user)s changed status of task **%(subject)s**'
' from %(old)s to %(new)s.',
'renamed': u':notebook: %(user)s renamed task %(old)s to **%(new)s**.',
'description_diff': u':notebook: %(user)s updated description of task **%(subject)s**.',
'commented': u':thought_balloon: %(user)s commented on task **%(subject)s**.',
'delete': u':x: %(user)s deleted task **%(subject)s**.',
'changed_us': u':clipboard: %(user)s moved task **%(subject)s** from user story %(old)s to %(new)s.'
},
'issue': {
'create': u':bulb: %(user)s created issue **%(subject)s**.',
'set_assigned_to': u':busts_in_silhouette: %(user)s assigned issue **%(subject)s** to %(new)s.',
'unset_assigned_to': u':busts_in_silhouette: %(user)s unassigned issue **%(subject)s**.',
'changed_assigned_to': u':busts_in_silhouette: %(user)s reassigned issue **%(subject)s**'
' from %(old)s to %(new)s.',
'changed_priority': u':rocket: %(user)s changed priority of issue **%(subject)s** from %(old)s to %(new)s.',
'changed_severity': u':warning: %(user)s changed severity of issue **%(subject)s** from %(old)s to %(new)s.',
'changed_status': u':chart_with_upwards_trend: %(user)s changed status of issue **%(subject)s**'
' from %(old)s to %(new)s.',
'changed_type': u':bulb: %(user)s changed type of issue **%(subject)s** from %(old)s to %(new)s.',
'renamed': u':notebook: %(user)s renamed issue %(old)s to **%(new)s**.',
'description_diff': u':notebook: %(user)s updated description of issue **%(subject)s**.',
'commented': u':thought_balloon: %(user)s commented on issue **%(subject)s**.',
'delete': u':x: %(user)s deleted issue **%(subject)s**.'
},
}
def get_old_and_new_values(change_type, message):
# type: (str, Mapping[str, Any]) -> Tuple[Optional[Dict[str, Any]], Optional[Dict[str, Any]]]
""" Parses the payload and finds previous and current value of change_type."""
if change_type in ['subject', 'name', 'estimated_finish', 'estimated_start']:
old = message["change"]["diff"][change_type]["from"]
new = message["change"]["diff"][change_type]["to"]
return old, new
try:
old = message["change"]["diff"][change_type]["from"]
except KeyError:
old = None
try:
new = message["change"]["diff"][change_type]["to"]
except KeyError:
new = None
return old, new
def parse_comment(message):
# type: (Mapping[str, Any]) -> Dict[str, Any]
""" Parses the comment to issue, task or US. """
return {
'event': 'commented',
'type': message["type"],
'values': {
'user': get_owner_name(message),
'subject': get_subject(message)
}
}
def parse_create_or_delete(message):
# type: (Mapping[str, Any]) -> Dict[str, Any]
""" Parses create or delete event. """
if message["type"] == 'relateduserstory':
return {
'type': message["type"],
'event': message["action"],
'values': {
'user': get_owner_name(message),
'epic_subject': message['data']['epic']['subject'],
'userstory_subject': message['data']['user_story']['subject'],
}
}
return {
'type': message["type"],
'event': message["action"],
'values': {
'user': get_owner_name(message),
'subject': get_subject(message)
}
}
def parse_change_event(change_type, message):
# type: (str, Mapping[str, Any]) -> Optional[Dict[str, Any]]
""" Parses change event. """
evt = {} # type: Dict[str, Any]
values = {
'user': get_owner_name(message),
'subject': get_subject(message)
} # type: Dict[str, Any]
if change_type in ["description_diff", "points"]:
event_type = change_type
elif change_type in ["milestone", "assigned_to"]:
old, new = get_old_and_new_values(change_type, message)
if not old:
event_type = "set_" + change_type
values["new"] = new
elif not new:
event_type = "unset_" + change_type
values["old"] = old
else:
event_type = "changed_" + change_type
values.update({'old': old, 'new': new})
elif change_type == "is_blocked":
if message["change"]["diff"]["is_blocked"]["to"]:
event_type = "blocked"
else:
event_type = "unblocked"
elif change_type == "is_closed":
if message["change"]["diff"]["is_closed"]["to"]:
event_type = "closed"
else:
event_type = "reopened"
elif change_type == "user_story":
old, new = get_old_and_new_values(change_type, message)
event_type = "changed_us"
values.update({'old': old, 'new': new})
elif change_type in ["subject", 'name']:
event_type = 'renamed'
old, new = get_old_and_new_values(change_type, message)
values.update({'old': old, 'new': new})
elif change_type in ["estimated_finish", "estimated_start"]:
old, new = get_old_and_new_values(change_type, message)
if not old == new:
event_type = change_type
values.update({'old': old, 'new': new})
else:
# date hasn't changed
return None
elif change_type in ["priority", "severity", "type", "status"]:
event_type = 'changed_' + change_type
old, new = get_old_and_new_values(change_type, message)
values.update({'old': old, 'new': new})
else:
# we are not supporting this type of event
return None
evt.update({"type": message["type"], "event": event_type, "values": values})
return evt
def parse_message(message):
# type: (Mapping[str, Any]) -> List[Dict[str, Any]]
""" Parses the payload by delegating to specialized functions. """
events = []
if message["action"] in ['create', 'delete']:
events.append(parse_create_or_delete(message))
elif message["action"] == 'change':
if message["change"]["diff"]:
for value in message["change"]["diff"]:
parsed_event = parse_change_event(value, message)
if parsed_event:
events.append(parsed_event)
if message["change"]["comment"]:
events.append(parse_comment(message))
return events
def generate_content(data):
# type: (Mapping[str, Any]) -> str
""" Gets the template string and formats it with parsed data. """
return templates[data['type']][data['event']] % data['values']
def get_owner_name(message):
# type: (Mapping[str, Any]) -> str
return message["by"]["full_name"]
def get_subject(message):
# type: (Mapping[str, Any]) -> str
data = message["data"]
return data.get("subject", data.get("name"))
| apache-2.0 |
asimshankar/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py | 137 | 2219 | # encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
from tensorflow.python.platform import test
class CategoricalTest(test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(
min_frequency=0, share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"],
["3", "Male"]])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
test.main()
| apache-2.0 |
bgxavier/neutron | neutron/api/v2/router.py | 8 | 5082 | # Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
import routes as routes_mapper
import six
import six.moves.urllib.parse as urlparse
import webob
import webob.dec
import webob.exc
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import base
from neutron import manager
from neutron import policy
from neutron import quota
from neutron import wsgi
LOG = logging.getLogger(__name__)
RESOURCES = {'network': 'networks',
'subnet': 'subnets',
'subnetpool': 'subnetpools',
'port': 'ports'}
SUB_RESOURCES = {}
COLLECTION_ACTIONS = ['index', 'create']
MEMBER_ACTIONS = ['show', 'update', 'delete']
REQUIREMENTS = {'id': attributes.UUID_PATTERN, 'format': 'json'}
class Index(wsgi.Application):
def __init__(self, resources):
self.resources = resources
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
metadata = {}
layout = []
for name, collection in six.iteritems(self.resources):
href = urlparse.urljoin(req.path_url, collection)
resource = {'name': name,
'collection': collection,
'links': [{'rel': 'self',
'href': href}]}
layout.append(resource)
response = dict(resources=layout)
content_type = req.best_match_content_type()
body = wsgi.Serializer(metadata=metadata).serialize(response,
content_type)
return webob.Response(body=body, content_type=content_type)
class APIRouter(wsgi.Router):
@classmethod
def factory(cls, global_config, **local_config):
return cls(**local_config)
def __init__(self, **local_config):
mapper = routes_mapper.Mapper()
plugin = manager.NeutronManager.get_plugin()
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
ext_mgr.extend_resources("2.0", attributes.RESOURCE_ATTRIBUTE_MAP)
col_kwargs = dict(collection_actions=COLLECTION_ACTIONS,
member_actions=MEMBER_ACTIONS)
def _map_resource(collection, resource, params, parent=None):
allow_bulk = cfg.CONF.allow_bulk
allow_pagination = cfg.CONF.allow_pagination
allow_sorting = cfg.CONF.allow_sorting
controller = base.create_resource(
collection, resource, plugin, params, allow_bulk=allow_bulk,
parent=parent, allow_pagination=allow_pagination,
allow_sorting=allow_sorting)
path_prefix = None
if parent:
path_prefix = "/%s/{%s_id}/%s" % (parent['collection_name'],
parent['member_name'],
collection)
mapper_kwargs = dict(controller=controller,
requirements=REQUIREMENTS,
path_prefix=path_prefix,
**col_kwargs)
return mapper.collection(collection, resource,
**mapper_kwargs)
mapper.connect('index', '/', controller=Index(RESOURCES))
for resource in RESOURCES:
_map_resource(RESOURCES[resource], resource,
attributes.RESOURCE_ATTRIBUTE_MAP.get(
RESOURCES[resource], dict()))
quota.QUOTAS.register_resource_by_name(resource)
for resource in SUB_RESOURCES:
_map_resource(SUB_RESOURCES[resource]['collection_name'], resource,
attributes.RESOURCE_ATTRIBUTE_MAP.get(
SUB_RESOURCES[resource]['collection_name'],
dict()),
SUB_RESOURCES[resource]['parent'])
# Certain policy checks require that the extensions are loaded
# and the RESOURCE_ATTRIBUTE_MAP populated before they can be
# properly initialized. This can only be claimed with certainty
# once this point in the code has been reached. In the event
# that the policies have been initialized before this point,
# calling reset will cause the next policy check to
# re-initialize with all of the required data in place.
policy.reset()
super(APIRouter, self).__init__(mapper)
| apache-2.0 |
Debian/dput-ng | dput/hooks/distro_info_checks.py | 1 | 6966 | # -*- coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# Copyright (c) 2013 dput authors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
from dput.core import logger
from dput.exceptions import HookException
try:
from distro_info import (DebianDistroInfo, UbuntuDistroInfo,
DistroDataOutdated)
except ImportError:
logger.warning('Uploading to Ubuntu requires python3-distro-info to be '
'installed')
raise
class UnknownDistribution(HookException):
"""
Subclass of the :class:`dput.exceptions.HookException`.
Thrown if the checker encounters an issue.
"""
pass
class UnsupportedDistribution(HookException):
"""
Subclass of the :class:`dput.exceptions.HookException`.
Thrown if the ``supported-distribution`` checker finds a release that isn't
supported.
"""
pass
class FieldEmptyException(HookException):
"""
Subclass of the :class:`dput.exceptions.HookException`.
Thrown if the ``required_fields`` checker finds an empty field that should
be non-empty.
"""
pass
def check_supported_distribution(changes, profile, interface):
"""
The ``supported-distribution`` checker is a stock dput checker that checks
packages intended for upload for a valid upload distribution.
Profile key: supported-distribution
"""
suite = changes['Distribution']
if profile.get('codenames'):
if '-' in suite:
release, pocket = suite.split('-', 1)
else:
release, pocket = suite, 'release'
codenames = profile['codenames']
if codenames == 'ubuntu':
distro_info = UbuntuDistroInfo()
pockets = profile['supported-distribution']
logger.critical(pockets)
if pocket not in pockets['known']:
raise UnknownDistribution("Unkown pocket: %s" % pocket)
if pocket not in pockets['allowed']:
raise UnknownDistribution(
"Uploads aren't permitted to pocket: %s" % pocket)
elif codenames == 'debian':
distro_info = DebianDistroInfo()
else:
raise UnknownDistribution("distro-info doesn't know about %s"
% codenames)
try:
codename = distro_info.codename(release, default=release)
if codename not in distro_info.all:
raise UnsupportedDistribution('Unknown release %s' % release)
if codename not in distro_info.supported():
raise UnsupportedDistribution('Unsupported release %s'
% release)
except DistroDataOutdated:
logger.warn('distro-info is outdated, '
'unable to check supported releases')
def required_fields(changes, profile, interface):
"""
The ``required-fields`` checker is a stock dput checker that checks if the
specified fields are non-empty in the Changes file, if the upload is
targetting a specified distribution.
Profile key: ```required-fields```
Example profile::
"required-fields": {
...
"suites": "any-stable",
"fields": ["Launchpad-Bugs-Fixed"],
"skip": false
...
}
``skip`` controls if the checker should drop out without checking
for anything at all.
``fields`` This controls what we check for. Any fields present in this
list must be present and non-empty in the ```.changes``` file
being uploaded.
```suites``` This controls which target suites the check is active for. It
is a list containing suite names, or the special keywords
"any-stable" or "devel". If the field is missing or empty,
this check is active for all targets.
"""
required_fields = profile.get('required-fields')
if required_fields is None:
logger.debug('Not running required-fields: empty')
return
if required_fields.get('skip', True):
logger.debug('Not running required-fields: skipped')
return
applicable_distributions = set(required_fields.get('suites', []))
codenames = profile['codenames']
if codenames == 'ubuntu':
distro_info = UbuntuDistroInfo()
elif codenames == 'debian':
distro_info = DebianDistroInfo()
else:
raise UnknownDistribution("distro-info doesn't know about %s"
% codenames)
if 'any-stable' in applicable_distributions:
applicable_distributions.remove('any-stable')
supported = set(distro_info.supported())
if 'devel' not in applicable_distributions:
try:
supported -= set([distro_info.devel(), 'experimental'])
# if there is no devel distro, just ignore it
except DistroDataOutdated:
supported -= set(['experimental'])
applicable_distributions |= supported
if 'devel' in applicable_distributions and \
'any-stable' not in applicable_distributions:
# if any-stable is in there, it'll have done this already
applicable_distributions.remove('devel')
applicable_distributions.add(distro_info.devel())
for codename in applicable_distributions:
if codename not in distro_info.all:
raise UnsupportedDistribution('Unknown release %s' % codename)
distribution = changes.get("Distribution").strip()
logger.debug("required-fields: Applying hook for %s" %
applicable_distributions)
if distribution not in applicable_distributions:
return
for field in required_fields["fields"]:
try:
value = changes[field]
if not value:
raise FieldEmptyException(
"The field '%s' is required for upload to '%s', "
"but it is empty." % (field, distribution))
except KeyError:
raise FieldEmptyException(
"The field '%s' is required for uploads to '%s', "
"but it is missing." % (field, distribution))
| gpl-2.0 |
mrb/letsencrypt | letsencrypt/tests/colored_logging_test.py | 23 | 1197 | """Tests for letsencrypt.colored_logging."""
import logging
import StringIO
import unittest
from letsencrypt import le_util
class StreamHandlerTest(unittest.TestCase):
"""Tests for letsencrypt.colored_logging."""
def setUp(self):
from letsencrypt import colored_logging
self.stream = StringIO.StringIO()
self.stream.isatty = lambda: True
self.handler = colored_logging.StreamHandler(self.stream)
self.logger = logging.getLogger()
self.logger.setLevel(logging.DEBUG)
self.logger.addHandler(self.handler)
def test_format(self):
msg = 'I did a thing'
self.logger.debug(msg)
self.assertEqual(self.stream.getvalue(), '{0}\n'.format(msg))
def test_format_and_red_level(self):
msg = 'I did another thing'
self.handler.red_level = logging.DEBUG
self.logger.debug(msg)
self.assertEqual(self.stream.getvalue(),
'{0}{1}{2}\n'.format(le_util.ANSI_SGR_RED,
msg,
le_util.ANSI_SGR_RESET))
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 |
sander76/home-assistant | homeassistant/components/garmin_connect/config_flow.py | 3 | 2409 | """Config flow for Garmin Connect integration."""
import logging
from garminconnect import (
Garmin,
GarminConnectAuthenticationError,
GarminConnectConnectionError,
GarminConnectTooManyRequestsError,
)
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_ID, CONF_PASSWORD, CONF_USERNAME
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
class GarminConnectConfigFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Garmin Connect."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def _show_setup_form(self, errors=None):
"""Show the setup form to the user."""
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}
),
errors=errors or {},
)
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
if user_input is None:
return await self._show_setup_form()
garmin_client = Garmin(user_input[CONF_USERNAME], user_input[CONF_PASSWORD])
errors = {}
try:
await self.hass.async_add_executor_job(garmin_client.login)
except GarminConnectConnectionError:
errors["base"] = "cannot_connect"
return await self._show_setup_form(errors)
except GarminConnectAuthenticationError:
errors["base"] = "invalid_auth"
return await self._show_setup_form(errors)
except GarminConnectTooManyRequestsError:
errors["base"] = "too_many_requests"
return await self._show_setup_form(errors)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
return await self._show_setup_form(errors)
unique_id = garmin_client.get_full_name()
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=unique_id,
data={
CONF_ID: unique_id,
CONF_USERNAME: user_input[CONF_USERNAME],
CONF_PASSWORD: user_input[CONF_PASSWORD],
},
)
| apache-2.0 |
zachcp/qiime | scripts/unweight_fasta.py | 15 | 2216 | #!/usr/bin/env python
# File created on 20 Jun 2011
from __future__ import division
__author__ = "Justin Kuczynski"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Justin Kuczynski"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Justin Kuczynski"
__email__ = "justinak@gmail.com"
from skbio.parse.sequences import parse_fasta
from qiime.util import make_option
from qiime.util import parse_command_line_parameters
script_info = {}
script_info[
'brief_description'] = "Transform fasta files with abundance weighting into unweighted"
script_info['script_description'] = '''E.g. makes 3 fasta records from a weighted input fasta file containing the following record:
>goodsample1_12_3 bc_val=20
AATGCTTGTCACATCGATGC
'''
script_info['script_usage'] = [("", '''make 3 fasta records from the following record:
>goodsample1_12_3 bc_val=20
AATGCTTGTCACATCGATGC
resulting in:
>goodsample_0
AATGCTTGTCACATCGATGC
>goodsample_1
AATGCTTGTCACATCGATGC
>goodsample_2
AATGCTTGTCACATCGATGC''', "%prog -i input.fna -o output.fna -l goodsample")]
script_info['output_description'] = "a .fasta file"
script_info['required_options'] = [
make_option(
'-i',
'--input_fasta',
type='existing_filepath',
help='the input fasta file'),
make_option(
'-o',
'--output_file',
type='new_filepath',
help='the output fasta filepath'),
make_option(
'-l',
'--label',
type='string',
help='sequence label used for all records. fasta label lines will look like: >label_423'),
]
script_info['optional_options'] = []
script_info['version'] = __version__
def main():
option_parser, opts, args =\
parse_command_line_parameters(**script_info)
seqsfna_fh = open(opts.output_file, 'w')
seq_counter = 0
for label, seq in parse_fasta(open(opts.input_fasta, 'U')):
seq_abundance = int(label.split()[0].split('_')[-1])
for i in range(seq_abundance): # don't use i, use seq_counter
seqsfna_fh.write('>' + opts.label + '_' + str(seq_counter) + '\n')
seqsfna_fh.write(seq + '\n')
seq_counter += 1
if __name__ == "__main__":
main()
| gpl-2.0 |
nwjs/chromium.src | tools/perf/generate_legacy_perf_dashboard_json.py | 5 | 9577 | #!/usr/bin/env vpython
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Generates legacy perf dashboard json from non-telemetry based perf tests.
Taken from chromium/build/scripts/slave/performance_log_processory.py
(https://goo.gl/03SQRk)
"""
import collections
import json
import math
import logging
import re
class LegacyResultsProcessor(object):
"""Class for any log processor expecting standard data to be graphed.
The log will be parsed looking for any lines of the forms:
<*>RESULT <graph_name>: <trace_name>= <value> <units>
or
<*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...] <units>
or
<*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>} <units>
For example,
*RESULT vm_final_browser: OneTab= 8488 kb
RESULT startup: ref= [167.00,148.00,146.00,142.00] ms
RESULT TabCapturePerformance_foo: Capture= {30.7, 1.45} ms
The leading * is optional; it indicates that the data from that line should
be considered "important", which may mean for example that it's graphed by
default.
If multiple values are given in [], their mean and (sample) standard
deviation will be written; if only one value is given, that will be written.
A trailing comma is permitted in the list of values.
NOTE: All lines except for RESULT lines are ignored, including the Avg and
Stddev lines output by Telemetry!
Any of the <fields> except <value> may be empty, in which case the
not-terribly-useful defaults will be used. The <graph_name> and <trace_name>
should not contain any spaces, colons (:) nor equals-signs (=). Furthermore,
the <trace_name> will be used on the waterfall display, so it should be kept
short. If the trace_name ends with '_ref', it will be interpreted as a
reference value, and shown alongside the corresponding main value on the
waterfall.
Semantic note: The terms graph and chart are used interchangeably here.
"""
RESULTS_REGEX = re.compile(r'(?P<IMPORTANT>\*)?RESULT '
r'(?P<GRAPH>[^:]*): (?P<TRACE>[^=]*)= '
r'(?P<VALUE>[\{\[]?[-\d\., ]+[\}\]]?)('
r' ?(?P<UNITS>.+))?')
# TODO(eyaich): Determine if this format is still used by any perf tests
HISTOGRAM_REGEX = re.compile(r'(?P<IMPORTANT>\*)?HISTOGRAM '
r'(?P<GRAPH>[^:]*): (?P<TRACE>[^=]*)= '
r'(?P<VALUE_JSON>{.*})(?P<UNITS>.+)?')
def __init__(self):
# A dict of Graph objects, by name.
self._graphs = {}
# A dict mapping output file names to lists of lines in a file.
self._output = {}
self._percentiles = [.1, .25, .5, .75, .90, .95, .99]
class Trace(object):
"""Encapsulates data for one trace. Here, this means one point."""
def __init__(self):
self.important = False
self.values = []
self.mean = 0.0
self.stddev = 0.0
def __str__(self):
result = _FormatHumanReadable(self.mean)
if self.stddev:
result += '+/-%s' % _FormatHumanReadable(self.stddev)
return result
class Graph(object):
"""Encapsulates a set of points that should appear on the same graph."""
def __init__(self):
self.units = None
self.traces = {}
def IsImportant(self):
"""A graph is considered important if any of its traces is important."""
for trace in self.traces.itervalues():
if trace.important:
return True
return False
def BuildTracesDict(self):
"""Returns a dictionary mapping trace names to [value, stddev]."""
traces_dict = {}
for name, trace in self.traces.items():
traces_dict[name] = [str(trace.mean), str(trace.stddev)]
return traces_dict
def GenerateJsonResults(self, filename):
# Iterate through the file and process each output line
with open(filename) as f:
for line in f.readlines():
self.ProcessLine(line)
# After all results have been seen, generate the graph json data
return self.GenerateGraphJson()
def _PrependLog(self, filename, data):
"""Prepends some data to an output file."""
self._output[filename] = data + self._output.get(filename, [])
def ProcessLine(self, line):
"""Processes one result line, and updates the state accordingly."""
results_match = self.RESULTS_REGEX.search(line)
histogram_match = self.HISTOGRAM_REGEX.search(line)
if results_match:
self._ProcessResultLine(results_match)
elif histogram_match:
raise Exception("Error: Histogram results parsing not supported yet")
def _ProcessResultLine(self, line_match):
"""Processes a line that matches the standard RESULT line format.
Args:
line_match: A MatchObject as returned by re.search.
"""
match_dict = line_match.groupdict()
graph_name = match_dict['GRAPH'].strip()
trace_name = match_dict['TRACE'].strip()
graph = self._graphs.get(graph_name, self.Graph())
graph.units = (match_dict['UNITS'] or '').strip()
trace = graph.traces.get(trace_name, self.Trace())
value = match_dict['VALUE']
trace.important = match_dict['IMPORTANT'] or False
# Compute the mean and standard deviation for a list or a histogram,
# or the numerical value of a scalar value.
if value.startswith('['):
try:
value_list = [float(x) for x in value.strip('[],').split(',')]
except ValueError:
# Report, but ignore, corrupted data lines. (Lines that are so badly
# broken that they don't even match the RESULTS_REGEX won't be
# detected.)
logging.warning("Bad test output: '%s'" % value.strip())
return
trace.values += value_list
trace.mean, trace.stddev, filedata = self._CalculateStatistics(
trace.values, trace_name)
assert filedata is not None
for filename in filedata:
self._PrependLog(filename, filedata[filename])
elif value.startswith('{'):
stripped = value.strip('{},')
try:
trace.mean, trace.stddev = [float(x) for x in stripped.split(',')]
except ValueError:
logging.warning("Bad test output: '%s'" % value.strip())
return
else:
try:
trace.values.append(float(value))
trace.mean, trace.stddev, filedata = self._CalculateStatistics(
trace.values, trace_name)
assert filedata is not None
for filename in filedata:
self._PrependLog(filename, filedata[filename])
except ValueError:
logging.warning("Bad test output: '%s'" % value.strip())
return
graph.traces[trace_name] = trace
self._graphs[graph_name] = graph
def GenerateGraphJson(self):
"""Writes graph json for each graph seen.
"""
charts = {}
for graph_name, graph in self._graphs.iteritems():
traces = graph.BuildTracesDict()
# Traces should contain exactly two elements: [mean, stddev].
for _, trace in traces.iteritems():
assert len(trace) == 2
graph_dict = collections.OrderedDict([
('traces', traces),
('units', str(graph.units)),
])
# Include a sorted list of important trace names if there are any.
important = [t for t in graph.traces.keys() if graph.traces[t].important]
if important:
graph_dict['important'] = sorted(important)
charts[graph_name] = graph_dict
return json.dumps(charts)
# _CalculateStatistics needs to be a member function.
# pylint: disable=R0201
# Unused argument value_list.
# pylint: disable=W0613
def _CalculateStatistics(self, value_list, trace_name):
"""Returns a tuple with some statistics based on the given value list.
This method may be overridden by subclasses wanting a different standard
deviation calcuation (or some other sort of error value entirely).
Args:
value_list: the list of values to use in the calculation
trace_name: the trace that produced the data (not used in the base
implementation, but subclasses may use it)
Returns:
A 3-tuple - mean, standard deviation, and a dict which is either
empty or contains information about some file contents.
"""
n = len(value_list)
if n == 0:
return 0.0, 0.0, {}
mean = float(sum(value_list)) / n
variance = sum([(element - mean)**2 for element in value_list]) / n
stddev = math.sqrt(variance)
return mean, stddev, {}
def _FormatHumanReadable(number):
"""Formats a float into three significant figures, using metric suffixes.
Only m, k, and M prefixes (for 1/1000, 1000, and 1,000,000) are used.
Examples:
0.0387 => 38.7m
1.1234 => 1.12
10866 => 10.8k
682851200 => 683M
"""
metric_prefixes = {-3: 'm', 0: '', 3: 'k', 6: 'M'}
scientific = '%.2e' % float(number) # 6.83e+005
e_idx = scientific.find('e') # 4, or 5 if negative
digits = float(scientific[:e_idx]) # 6.83
exponent = int(scientific[e_idx + 1:]) # int('+005') = 5
while exponent % 3:
digits *= 10
exponent -= 1
while exponent > 6:
digits *= 10
exponent -= 1
while exponent < -3:
digits /= 10
exponent += 1
if digits >= 100:
# Don't append a meaningless '.0' to an integer number.
digits = int(digits) # pylint: disable=redefined-variable-type
# Exponent is now divisible by 3, between -3 and 6 inclusive: (-3, 0, 3, 6).
return '%s%s' % (digits, metric_prefixes[exponent])
| bsd-3-clause |
sridhar912/tsr-py-faster-rcnn | caffe-fast-rcnn/examples/pycaffe/layers/pyloss.py | 38 | 1223 | import caffe
import numpy as np
class EuclideanLossLayer(caffe.Layer):
"""
Compute the Euclidean Loss in the same manner as the C++ EuclideanLossLayer
to demonstrate the class interface for developing layers in Python.
"""
def setup(self, bottom, top):
# check input pair
if len(bottom) != 2:
raise Exception("Need two inputs to compute distance.")
def reshape(self, bottom, top):
# check input dimensions match
if bottom[0].count != bottom[1].count:
raise Exception("Inputs must have the same dimension.")
# difference is shape of inputs
self.diff = np.zeros_like(bottom[0].data, dtype=np.float32)
# loss output is scalar
top[0].reshape(1)
def forward(self, bottom, top):
self.diff[...] = bottom[0].data - bottom[1].data
top[0].data[...] = np.sum(self.diff**2) / bottom[0].num / 2.
def backward(self, top, propagate_down, bottom):
for i in range(2):
if not propagate_down[i]:
continue
if i == 0:
sign = 1
else:
sign = -1
bottom[i].diff[...] = sign * self.diff / bottom[i].num
| mit |
ajdawson/iris | lib/iris/fileformats/grib/_save_rules.py | 4 | 42383 | # (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Grib save implementation.
This module replaces the deprecated
:mod:`iris.fileformats.grib.grib_save_rules`. It is a private module
with no public API. It is invoked from
:meth:`iris.fileformats.grib.save_grib2`.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import warnings
import cf_units
import gribapi
import numpy as np
import numpy.ma as ma
import iris
import iris.exceptions
from iris.coord_systems import GeogCS, RotatedGeogCS, TransverseMercator
from iris.fileformats.grib import grib_phenom_translation as gptx
from iris.fileformats.grib._load_convert import (_STATISTIC_TYPE_NAMES,
_TIME_RANGE_UNITS)
from iris.util import is_regular, regular_step
# Invert code tables from :mod:`iris.fileformats.grib._load_convert`.
_STATISTIC_TYPE_NAMES = {val: key for key, val in
_STATISTIC_TYPE_NAMES.items()}
_TIME_RANGE_UNITS = {val: key for key, val in _TIME_RANGE_UNITS.items()}
def fixup_float32_as_int32(value):
"""
Workaround for use when the ECMWF GRIB API treats an IEEE 32-bit
floating-point value as a signed, 4-byte integer.
Returns the integer value which will result in the on-disk
representation corresponding to the IEEE 32-bit floating-point
value.
"""
value_as_float32 = np.array(value, dtype='f4')
value_as_uint32 = value_as_float32.view(dtype='u4')
if value_as_uint32 >= 0x80000000:
# Convert from two's-complement to sign-and-magnitude.
# NB. Because of the silly representation of negative
# integers in GRIB2, there is no value we can pass to
# grib_set that will result in the bit pattern 0x80000000.
# But since that bit pattern corresponds to a floating
# point value of negative-zero, we can safely treat it as
# positive-zero instead.
value_as_grib_int = 0x80000000 - int(value_as_uint32)
else:
value_as_grib_int = int(value_as_uint32)
return value_as_grib_int
def fixup_int32_as_uint32(value):
"""
Workaround for use when the ECMWF GRIB API treats a signed, 4-byte
integer value as an unsigned, 4-byte integer.
Returns the unsigned integer value which will result in the on-disk
representation corresponding to the signed, 4-byte integer value.
"""
value = int(value)
if -0x7fffffff <= value <= 0x7fffffff:
if value < 0:
# Convert from two's-complement to sign-and-magnitude.
value = 0x80000000 - value
else:
msg = '{} out of range -2147483647 to 2147483647.'.format(value)
raise ValueError(msg)
return value
def ensure_set_int32_value(grib, key, value):
"""
Ensure the workaround function :func:`fixup_int32_as_uint32` is applied as
necessary to problem keys.
"""
try:
gribapi.grib_set(grib, key, value)
except gribapi.GribInternalError:
value = fixup_int32_as_uint32(value)
gribapi.grib_set(grib, key, value)
###############################################################################
#
# Constants
#
###############################################################################
# Reference Flag Table 3.3
_RESOLUTION_AND_COMPONENTS_GRID_WINDS_BIT = 3 # NB "bit5", from MSB=1.
# Reference Regulation 92.1.6
_DEFAULT_DEGREES_UNITS = 1.0e-6
###############################################################################
#
# Identification Section 1
#
###############################################################################
def centre(cube, grib):
# TODO: read centre from cube
gribapi.grib_set_long(grib, "centre", 74) # UKMO
gribapi.grib_set_long(grib, "subCentre", 0) # exeter is not in the spec
def reference_time(cube, grib):
# Set the reference time.
# (analysis, forecast start, verify time, obs time, etc)
try:
fp_coord = cube.coord("forecast_period")
except iris.exceptions.CoordinateNotFoundError:
fp_coord = None
if fp_coord is not None:
rt, rt_meaning, _, _ = _non_missing_forecast_period(cube)
else:
rt, rt_meaning, _, _ = _missing_forecast_period(cube)
gribapi.grib_set_long(grib, "significanceOfReferenceTime", rt_meaning)
gribapi.grib_set_long(
grib, "dataDate", "%04d%02d%02d" % (rt.year, rt.month, rt.day))
gribapi.grib_set_long(
grib, "dataTime", "%02d%02d" % (rt.hour, rt.minute))
# TODO: Set the calendar, when we find out what happened to the proposal!
# http://tinyurl.com/oefqgv6
# I was sure it was approved for pre-operational use but it's not there.
def identification(cube, grib):
centre(cube, grib)
reference_time(cube, grib)
# operational product, operational test, research product, etc
# (missing for now)
gribapi.grib_set_long(grib, "productionStatusOfProcessedData", 255)
# Code table 1.4
# analysis, forecast, processed satellite, processed radar,
if cube.coords('realization'):
# assume realization will always have 1 and only 1 point
# as cubes saving to GRIB2 a 2D horizontal slices
if cube.coord('realization').points[0] != 0:
gribapi.grib_set_long(grib, "typeOfProcessedData", 4)
else:
gribapi.grib_set_long(grib, "typeOfProcessedData", 3)
else:
gribapi.grib_set_long(grib, "typeOfProcessedData", 2)
###############################################################################
#
# Grid Definition Section 3
#
###############################################################################
def shape_of_the_earth(cube, grib):
# assume latlon
cs = cube.coord(dimensions=[0]).coord_system
# Initially set shape_of_earth keys to missing (255 for byte, -1 for long).
gribapi.grib_set_long(grib, "scaleFactorOfRadiusOfSphericalEarth", 255)
gribapi.grib_set_long(grib, "scaledValueOfRadiusOfSphericalEarth", -1)
gribapi.grib_set_long(grib, "scaleFactorOfEarthMajorAxis", 255)
gribapi.grib_set_long(grib, "scaledValueOfEarthMajorAxis", -1)
gribapi.grib_set_long(grib, "scaleFactorOfEarthMinorAxis", 255)
gribapi.grib_set_long(grib, "scaledValueOfEarthMinorAxis", -1)
if isinstance(cs, GeogCS):
ellipsoid = cs
else:
ellipsoid = cs.ellipsoid
if ellipsoid is None:
msg = "Could not determine shape of the earth from coord system "\
"of horizontal grid."
raise iris.exceptions.TranslationError(msg)
# Spherical earth.
if ellipsoid.inverse_flattening == 0.0:
gribapi.grib_set_long(grib, "shapeOfTheEarth", 1)
gribapi.grib_set_long(grib, "scaleFactorOfRadiusOfSphericalEarth", 0)
gribapi.grib_set_long(grib, "scaledValueOfRadiusOfSphericalEarth",
ellipsoid.semi_major_axis)
# Oblate spheroid earth.
else:
gribapi.grib_set_long(grib, "shapeOfTheEarth", 7)
gribapi.grib_set_long(grib, "scaleFactorOfEarthMajorAxis", 0)
gribapi.grib_set_long(grib, "scaledValueOfEarthMajorAxis",
ellipsoid.semi_major_axis)
gribapi.grib_set_long(grib, "scaleFactorOfEarthMinorAxis", 0)
gribapi.grib_set_long(grib, "scaledValueOfEarthMinorAxis",
ellipsoid.semi_minor_axis)
def grid_dims(x_coord, y_coord, grib):
gribapi.grib_set_long(grib, "Ni", x_coord.shape[0])
gribapi.grib_set_long(grib, "Nj", y_coord.shape[0])
def latlon_first_last(x_coord, y_coord, grib):
if x_coord.has_bounds() or y_coord.has_bounds():
warnings.warn("Ignoring xy bounds")
# XXX Pending #1125
# gribapi.grib_set_double(grib, "latitudeOfFirstGridPointInDegrees",
# float(y_coord.points[0]))
# gribapi.grib_set_double(grib, "latitudeOfLastGridPointInDegrees",
# float(y_coord.points[-1]))
# gribapi.grib_set_double(grib, "longitudeOfFirstGridPointInDegrees",
# float(x_coord.points[0]))
# gribapi.grib_set_double(grib, "longitudeOfLastGridPointInDegrees",
# float(x_coord.points[-1]))
# WORKAROUND
gribapi.grib_set_long(grib, "latitudeOfFirstGridPoint",
int(y_coord.points[0]*1000000))
gribapi.grib_set_long(grib, "latitudeOfLastGridPoint",
int(y_coord.points[-1]*1000000))
gribapi.grib_set_long(grib, "longitudeOfFirstGridPoint",
int((x_coord.points[0] % 360)*1000000))
gribapi.grib_set_long(grib, "longitudeOfLastGridPoint",
int((x_coord.points[-1] % 360)*1000000))
def dx_dy(x_coord, y_coord, grib):
x_step = regular_step(x_coord)
y_step = regular_step(y_coord)
gribapi.grib_set(grib, "DxInDegrees", float(abs(x_step)))
gribapi.grib_set(grib, "DyInDegrees", float(abs(y_step)))
def scanning_mode_flags(x_coord, y_coord, grib):
gribapi.grib_set_long(grib, "iScansPositively",
int(x_coord.points[1] - x_coord.points[0] > 0))
gribapi.grib_set_long(grib, "jScansPositively",
int(y_coord.points[1] - y_coord.points[0] > 0))
def horizontal_grid_common(cube, grib):
# Grib encoding of the sequences of X and Y points.
y_coord = cube.coord(dimensions=[0])
x_coord = cube.coord(dimensions=[1])
shape_of_the_earth(cube, grib)
grid_dims(x_coord, y_coord, grib)
scanning_mode_flags(x_coord, y_coord, grib)
def latlon_points_regular(cube, grib):
y_coord = cube.coord(dimensions=[0])
x_coord = cube.coord(dimensions=[1])
latlon_first_last(x_coord, y_coord, grib)
dx_dy(x_coord, y_coord, grib)
def latlon_points_irregular(cube, grib):
y_coord = cube.coord(dimensions=[0])
x_coord = cube.coord(dimensions=[1])
# Distinguish between true-north and grid-oriented vectors.
is_grid_wind = cube.name() in ('x_wind', 'y_wind', 'grid_eastward_wind',
'grid_northward_wind')
# Encode in bit "5" of 'resolutionAndComponentFlags' (other bits unused).
component_flags = 0
if is_grid_wind:
component_flags |= 2 ** _RESOLUTION_AND_COMPONENTS_GRID_WINDS_BIT
gribapi.grib_set(grib, 'resolutionAndComponentFlags', component_flags)
# Record the X and Y coordinate values.
# NOTE: there is currently a bug in the gribapi which means that the size
# of the longitudes array does not equal 'Nj', as it should.
# See : https://software.ecmwf.int/issues/browse/SUP-1096
# So, this only works at present if the x and y dimensions are **equal**.
lon_values = x_coord.points / _DEFAULT_DEGREES_UNITS
lat_values = y_coord.points / _DEFAULT_DEGREES_UNITS
gribapi.grib_set_array(grib, 'longitudes',
np.array(np.round(lon_values), dtype=np.int64))
gribapi.grib_set_array(grib, 'latitudes',
np.array(np.round(lat_values), dtype=np.int64))
def rotated_pole(cube, grib):
# Grib encoding of a rotated pole coordinate system.
cs = cube.coord(dimensions=[0]).coord_system
if cs.north_pole_grid_longitude != 0.0:
raise iris.exceptions.TranslationError(
'Grib save does not yet support Rotated-pole coordinates with '
'a rotated prime meridian.')
# XXX Pending #1125
# gribapi.grib_set_double(grib, "latitudeOfSouthernPoleInDegrees",
# float(cs.n_pole.latitude))
# gribapi.grib_set_double(grib, "longitudeOfSouthernPoleInDegrees",
# float(cs.n_pole.longitude))
# gribapi.grib_set_double(grib, "angleOfRotationInDegrees", 0)
# WORKAROUND
latitude = cs.grid_north_pole_latitude / _DEFAULT_DEGREES_UNITS
longitude = (((cs.grid_north_pole_longitude + 180) % 360) /
_DEFAULT_DEGREES_UNITS)
gribapi.grib_set(grib, "latitudeOfSouthernPole", - int(round(latitude)))
gribapi.grib_set(grib, "longitudeOfSouthernPole", int(round(longitude)))
gribapi.grib_set(grib, "angleOfRotation", 0)
def grid_definition_template_0(cube, grib):
"""
Set keys within the provided grib message based on
Grid Definition Template 3.0.
Template 3.0 is used to represent "latitude/longitude (or equidistant
cylindrical, or Plate Carree)".
The coordinates are regularly spaced, true latitudes and longitudes.
"""
# Constant resolution, aka 'regular' true lat-lon grid.
gribapi.grib_set_long(grib, "gridDefinitionTemplateNumber", 0)
horizontal_grid_common(cube, grib)
latlon_points_regular(cube, grib)
def grid_definition_template_1(cube, grib):
"""
Set keys within the provided grib message based on
Grid Definition Template 3.1.
Template 3.1 is used to represent "rotated latitude/longitude (or
equidistant cylindrical, or Plate Carree)".
The coordinates are regularly spaced, rotated latitudes and longitudes.
"""
# Constant resolution, aka 'regular' rotated lat-lon grid.
gribapi.grib_set_long(grib, "gridDefinitionTemplateNumber", 1)
# Record details of the rotated coordinate system.
rotated_pole(cube, grib)
# Encode the lat/lon points.
horizontal_grid_common(cube, grib)
latlon_points_regular(cube, grib)
def grid_definition_template_5(cube, grib):
"""
Set keys within the provided grib message based on
Grid Definition Template 3.5.
Template 3.5 is used to represent "variable resolution rotated
latitude/longitude".
The coordinates are irregularly spaced, rotated latitudes and longitudes.
"""
# NOTE: we must set Ni=Nj=1 before establishing the template.
# Without this, setting "gridDefinitionTemplateNumber" = 5 causes an
# immediate error.
# See: https://software.ecmwf.int/issues/browse/SUP-1095
# This is acceptable, as the subsequent call to 'horizontal_grid_common'
# will set these to the correct horizontal dimensions
# (by calling 'grid_dims').
gribapi.grib_set(grib, "Ni", 1)
gribapi.grib_set(grib, "Nj", 1)
gribapi.grib_set(grib, "gridDefinitionTemplateNumber", 5)
# Record details of the rotated coordinate system.
rotated_pole(cube, grib)
# Encode the lat/lon points.
horizontal_grid_common(cube, grib)
latlon_points_irregular(cube, grib)
def grid_definition_template_12(cube, grib):
"""
Set keys within the provided grib message based on
Grid Definition Template 3.12.
Template 3.12 is used to represent a Transverse Mercator grid.
"""
gribapi.grib_set(grib, "gridDefinitionTemplateNumber", 12)
# Retrieve some information from the cube.
y_coord = cube.coord(dimensions=[0])
x_coord = cube.coord(dimensions=[1])
cs = y_coord.coord_system
# Normalise the coordinate values to centimetres - the resolution
# used in the GRIB message.
def points_in_cm(coord):
points = coord.units.convert(coord.points, 'cm')
points = np.around(points).astype(int)
return points
y_cm = points_in_cm(y_coord)
x_cm = points_in_cm(x_coord)
# Set some keys specific to GDT12.
# Encode the horizontal points.
# NB. Since we're already in centimetres, our tolerance for
# discrepancy in the differences is 1.
def step(points):
diffs = points[1:] - points[:-1]
mean_diff = np.mean(diffs).astype(points.dtype)
if not np.allclose(diffs, mean_diff, atol=1):
msg = ('Irregular coordinates not supported for transverse '
'Mercator.')
raise iris.exceptions.TranslationError(msg)
return int(mean_diff)
gribapi.grib_set(grib, 'Di', abs(step(x_cm)))
gribapi.grib_set(grib, 'Dj', abs(step(y_cm)))
horizontal_grid_common(cube, grib)
# GRIBAPI expects unsigned ints in X1, X2, Y1, Y2 but it should accept
# signed ints, so work around this.
# See https://software.ecmwf.int/issues/browse/SUP-1101
ensure_set_int32_value(grib, 'Y1', int(y_cm[0]))
ensure_set_int32_value(grib, 'Y2', int(y_cm[-1]))
ensure_set_int32_value(grib, 'X1', int(x_cm[0]))
ensure_set_int32_value(grib, 'X2', int(x_cm[-1]))
# Lat and lon of reference point are measured in millionths of a degree.
gribapi.grib_set(grib, "latitudeOfReferencePoint",
cs.latitude_of_projection_origin / _DEFAULT_DEGREES_UNITS)
gribapi.grib_set(grib, "longitudeOfReferencePoint",
cs.longitude_of_central_meridian / _DEFAULT_DEGREES_UNITS)
# Convert a value in metres into the closest integer number of
# centimetres.
def m_to_cm(value):
return int(round(value * 100))
# False easting and false northing are measured in units of (10^-2)m.
gribapi.grib_set(grib, 'XR', m_to_cm(cs.false_easting))
gribapi.grib_set(grib, 'YR', m_to_cm(cs.false_northing))
# GRIBAPI expects a signed int for scaleFactorAtReferencePoint
# but it should accept a float, so work around this.
# See https://software.ecmwf.int/issues/browse/SUP-1100
value = cs.scale_factor_at_central_meridian
key_type = gribapi.grib_get_native_type(grib,
"scaleFactorAtReferencePoint")
if key_type is not float:
value = fixup_float32_as_int32(value)
gribapi.grib_set(grib, "scaleFactorAtReferencePoint", value)
def grid_definition_section(cube, grib):
"""
Set keys within the grid definition section of the provided grib message,
based on the properties of the cube.
"""
x_coord = cube.coord(dimensions=[1])
y_coord = cube.coord(dimensions=[0])
cs = x_coord.coord_system # N.B. already checked same cs for x and y.
regular_x_and_y = is_regular(x_coord) and is_regular(y_coord)
if isinstance(cs, GeogCS):
if not regular_x_and_y:
raise iris.exceptions.TranslationError(
'Saving an irregular latlon grid to GRIB (PDT3.4) is not '
'yet supported.')
grid_definition_template_0(cube, grib)
elif isinstance(cs, RotatedGeogCS):
# Rotated coordinate system cases.
# Choose between GDT 3.1 and 3.5 according to coordinate regularity.
if regular_x_and_y:
grid_definition_template_1(cube, grib)
else:
grid_definition_template_5(cube, grib)
elif isinstance(cs, TransverseMercator):
# Transverse Mercator coordinate system (template 3.12).
grid_definition_template_12(cube, grib)
else:
raise ValueError('Grib saving is not supported for coordinate system: '
'{}'.format(cs))
###############################################################################
#
# Product Definition Section 4
#
###############################################################################
def set_discipline_and_parameter(cube, grib):
# NOTE: for now, can match by *either* standard_name or long_name.
# This allows workarounds for data with no identified standard_name.
grib2_info = gptx.cf_phenom_to_grib2_info(cube.standard_name,
cube.long_name)
if grib2_info is not None:
gribapi.grib_set(grib, "discipline", grib2_info.discipline)
gribapi.grib_set(grib, "parameterCategory", grib2_info.category)
gribapi.grib_set(grib, "parameterNumber", grib2_info.number)
else:
gribapi.grib_set(grib, "discipline", 255)
gribapi.grib_set(grib, "parameterCategory", 255)
gribapi.grib_set(grib, "parameterNumber", 255)
warnings.warn('Unable to determine Grib2 parameter code for cube.\n'
'discipline, parameterCategory and parameterNumber '
'have been set to "missing".')
def _non_missing_forecast_period(cube):
# Calculate "model start time" to use as the reference time.
fp_coord = cube.coord("forecast_period")
# Convert fp and t to hours so we can subtract to calculate R.
cf_fp_hrs = fp_coord.units.convert(fp_coord.points[0], 'hours')
t_coord = cube.coord("time").copy()
hours_since = cf_units.Unit("hours since epoch",
calendar=t_coord.units.calendar)
t_coord.convert_units(hours_since)
rt_num = t_coord.points[0] - cf_fp_hrs
rt = hours_since.num2date(rt_num)
rt_meaning = 1 # "start of forecast"
# Forecast period
if fp_coord.units == cf_units.Unit("hours"):
grib_time_code = 1
elif fp_coord.units == cf_units.Unit("minutes"):
grib_time_code = 0
elif fp_coord.units == cf_units.Unit("seconds"):
grib_time_code = 13
else:
raise iris.exceptions.TranslationError(
"Unexpected units for 'forecast_period' : %s" % fp_coord.units)
if not t_coord.has_bounds():
fp = fp_coord.points[0]
else:
if not fp_coord.has_bounds():
raise iris.exceptions.TranslationError(
"bounds on 'time' coordinate requires bounds on"
" 'forecast_period'.")
fp = fp_coord.bounds[0][0]
if fp - int(fp):
warnings.warn("forecast_period encoding problem: "
"scaling required.")
fp = int(fp)
# Turn negative forecast times into grib negative numbers?
from iris.fileformats.grib import hindcast_workaround
if hindcast_workaround and fp < 0:
msg = "Encoding negative forecast period from {} to ".format(fp)
fp = 2**31 + abs(fp)
msg += "{}".format(np.int32(fp))
warnings.warn(msg)
return rt, rt_meaning, fp, grib_time_code
def _missing_forecast_period(cube):
"""
Returns a reference time and significance code together with a forecast
period and corresponding units type code.
"""
t_coord = cube.coord("time")
if cube.coords('forecast_reference_time'):
# Make copies and convert them to common "hours since" units.
hours_since = cf_units.Unit('hours since epoch',
calendar=t_coord.units.calendar)
frt_coord = cube.coord('forecast_reference_time').copy()
frt_coord.convert_units(hours_since)
t_coord = t_coord.copy()
t_coord.convert_units(hours_since)
# Extract values.
t = t_coord.bounds[0, 0] if t_coord.has_bounds() else t_coord.points[0]
frt = frt_coord.points[0]
# Calculate GRIB parameters.
rt = frt_coord.units.num2date(frt)
rt_meaning = 1 # Forecast reference time.
fp = t - frt
integer_fp = int(fp)
if integer_fp != fp:
msg = 'Truncating floating point forecast period {} to ' \
'integer value {}'
warnings.warn(msg.format(fp, integer_fp))
fp = integer_fp
fp_meaning = 1 # Hours
else:
# With no forecast period or forecast reference time set assume a
# reference time significance of "Observation time" and set the
# forecast period to 0h.
t = t_coord.bounds[0, 0] if t_coord.has_bounds() else t_coord.points[0]
rt = t_coord.units.num2date(t)
rt_meaning = 3 # Observation time
fp = 0
fp_meaning = 1 # Hours
return rt, rt_meaning, fp, fp_meaning
def set_forecast_time(cube, grib):
"""
Set the forecast time keys based on the forecast_period coordinate. In
the absence of a forecast_period and forecast_reference_time,
the forecast time is set to zero.
"""
try:
fp_coord = cube.coord("forecast_period")
except iris.exceptions.CoordinateNotFoundError:
fp_coord = None
if fp_coord is not None:
_, _, fp, grib_time_code = _non_missing_forecast_period(cube)
else:
_, _, fp, grib_time_code = _missing_forecast_period(cube)
gribapi.grib_set(grib, "indicatorOfUnitOfTimeRange", grib_time_code)
gribapi.grib_set(grib, "forecastTime", fp)
def set_fixed_surfaces(cube, grib):
# Look for something we can export
v_coord = grib_v_code = output_unit = None
# pressure
if cube.coords("air_pressure") or cube.coords("pressure"):
grib_v_code = 100
output_unit = cf_units.Unit("Pa")
v_coord = (cube.coords("air_pressure") or cube.coords("pressure"))[0]
# altitude
elif cube.coords("altitude"):
grib_v_code = 102
output_unit = cf_units.Unit("m")
v_coord = cube.coord("altitude")
# height
elif cube.coords("height"):
grib_v_code = 103
output_unit = cf_units.Unit("m")
v_coord = cube.coord("height")
elif cube.coords("air_potential_temperature"):
grib_v_code = 107
output_unit = cf_units.Unit('K')
v_coord = cube.coord("air_potential_temperature")
# unknown / absent
else:
# check for *ANY* height coords at all...
v_coords = cube.coords(axis='z')
if v_coords:
# There are vertical coordinate(s), but we don't understand them...
v_coords_str = ' ,'.join(["'{}'".format(c.name())
for c in v_coords])
raise iris.exceptions.TranslationError(
'The vertical-axis coordinate(s) ({}) '
'are not recognised or handled.'.format(v_coords_str))
# What did we find?
if v_coord is None:
# No vertical coordinate: record as 'surface' level (levelType=1).
# NOTE: may *not* be truly correct, but seems to be common practice.
# Still under investigation :
# See https://github.com/SciTools/iris/issues/519
gribapi.grib_set(grib, "typeOfFirstFixedSurface", 1)
gribapi.grib_set(grib, "scaleFactorOfFirstFixedSurface", 0)
gribapi.grib_set(grib, "scaledValueOfFirstFixedSurface", 0)
# Set secondary surface = 'missing'.
gribapi.grib_set(grib, "typeOfSecondFixedSurface", -1)
gribapi.grib_set(grib, "scaleFactorOfSecondFixedSurface", 255)
gribapi.grib_set(grib, "scaledValueOfSecondFixedSurface", -1)
elif not v_coord.has_bounds():
# No second surface
output_v = v_coord.units.convert(v_coord.points[0], output_unit)
if output_v - abs(output_v):
warnings.warn("Vertical level encoding problem: scaling required.")
output_v = int(output_v)
gribapi.grib_set(grib, "typeOfFirstFixedSurface", grib_v_code)
gribapi.grib_set(grib, "scaleFactorOfFirstFixedSurface", 0)
gribapi.grib_set(grib, "scaledValueOfFirstFixedSurface", output_v)
gribapi.grib_set(grib, "typeOfSecondFixedSurface", -1)
gribapi.grib_set(grib, "scaleFactorOfSecondFixedSurface", 255)
gribapi.grib_set(grib, "scaledValueOfSecondFixedSurface", -1)
else:
# bounded : set lower+upper surfaces
output_v = v_coord.units.convert(v_coord.bounds[0], output_unit)
if output_v[0] - abs(output_v[0]) or output_v[1] - abs(output_v[1]):
warnings.warn("Vertical level encoding problem: scaling required.")
gribapi.grib_set(grib, "typeOfFirstFixedSurface", grib_v_code)
gribapi.grib_set(grib, "typeOfSecondFixedSurface", grib_v_code)
gribapi.grib_set(grib, "scaleFactorOfFirstFixedSurface", 0)
gribapi.grib_set(grib, "scaleFactorOfSecondFixedSurface", 0)
gribapi.grib_set(grib, "scaledValueOfFirstFixedSurface",
int(output_v[0]))
gribapi.grib_set(grib, "scaledValueOfSecondFixedSurface",
int(output_v[1]))
def set_time_range(time_coord, grib):
"""
Set the time range keys in the specified message
based on the bounds of the provided time coordinate.
"""
if len(time_coord.points) != 1:
msg = 'Expected length one time coordinate, got {} points'
raise ValueError(msg.format(len(time_coord.points)))
if time_coord.nbounds != 2:
msg = 'Expected time coordinate with two bounds, got {} bounds'
raise ValueError(msg.format(time_coord.nbounds))
# Set type to hours and convert period to this unit.
gribapi.grib_set(grib, "indicatorOfUnitForTimeRange",
_TIME_RANGE_UNITS['hours'])
hours_since_units = cf_units.Unit('hours since epoch',
calendar=time_coord.units.calendar)
start_hours, end_hours = time_coord.units.convert(time_coord.bounds[0],
hours_since_units)
# Cast from np.float to Python int. The lengthOfTimeRange key is a
# 4 byte integer so we cast to highlight truncation of any floating
# point value. The grib_api will do the cast from float to int, but it
# cannot handle numpy floats.
time_range_in_hours = end_hours - start_hours
integer_hours = int(time_range_in_hours)
if integer_hours != time_range_in_hours:
msg = 'Truncating floating point lengthOfTimeRange {} to ' \
'integer value {}'
warnings.warn(msg.format(time_range_in_hours, integer_hours))
gribapi.grib_set(grib, "lengthOfTimeRange", integer_hours)
def set_time_increment(cell_method, grib):
"""
Set the time increment keys in the specified message
based on the provided cell method.
"""
# Type of time increment, e.g incrementing forecast period, incrementing
# forecast reference time, etc. Set to missing, but we could use the
# cell method coord to infer a value (see code table 4.11).
gribapi.grib_set(grib, "typeOfTimeIncrement", 255)
# Default values for the time increment value and units type.
inc = 0
units_type = 255
# Attempt to determine time increment from cell method intervals string.
intervals = cell_method.intervals
if intervals is not None and len(intervals) == 1:
interval, = intervals
try:
inc, units = interval.split()
inc = float(inc)
if units in ('hr', 'hour', 'hours'):
units_type = _TIME_RANGE_UNITS['hours']
else:
raise ValueError('Unable to parse units of interval')
except ValueError:
# Problem interpreting the interval string.
inc = 0
units_type = 255
else:
# Cast to int as timeIncrement key is a 4 byte integer.
integer_inc = int(inc)
if integer_inc != inc:
warnings.warn('Truncating floating point timeIncrement {} to '
'integer value {}'.format(inc, integer_inc))
inc = integer_inc
gribapi.grib_set(grib, "indicatorOfUnitForTimeIncrement", units_type)
gribapi.grib_set(grib, "timeIncrement", inc)
def _cube_is_time_statistic(cube):
"""
Test whether we can identify this cube as a statistic over time.
At present, accept anything whose latest cell method operates over a single
coordinate that "looks like" a time factor (i.e. some specific names).
In particular, we recognise the coordinate names defined in
:py:mod:`iris.coord_categorisation`.
"""
# The *only* relevant information is in cell_methods, as coordinates or
# dimensions of aggregation may no longer exist. So it's not possible to
# be definitive, but we handle *some* useful cases.
# In other cases just say "no", which is safe even when not ideal.
# Identify a single coordinate from the latest cell_method.
if not cube.cell_methods:
return False
latest_coordnames = cube.cell_methods[-1].coord_names
if len(latest_coordnames) != 1:
return False
coord_name = latest_coordnames[0]
# Define accepted time names, including those from coord_categorisations.
recognised_time_names = ['time', 'year', 'month', 'day', 'weekday',
'season']
# Accept it if the name is recognised.
# Currently does *not* recognise related names like 'month_number' or
# 'years', as that seems potentially unsafe.
return coord_name in recognised_time_names
def product_definition_template_common(cube, grib):
"""
Set keys within the provided grib message that are common across
all of the supported product definition templates.
"""
set_discipline_and_parameter(cube, grib)
# Various missing values.
gribapi.grib_set(grib, "typeOfGeneratingProcess", 255)
gribapi.grib_set(grib, "backgroundProcess", 255)
gribapi.grib_set(grib, "generatingProcessIdentifier", 255)
# Generic time handling.
set_forecast_time(cube, grib)
# Handle vertical coords.
set_fixed_surfaces(cube, grib)
def product_definition_template_0(cube, grib):
"""
Set keys within the provided grib message based on Product
Definition Template 4.0.
Template 4.0 is used to represent an analysis or forecast at
a horizontal level at a point in time.
"""
gribapi.grib_set_long(grib, "productDefinitionTemplateNumber", 0)
product_definition_template_common(cube, grib)
def product_definition_template_8(cube, grib):
"""
Set keys within the provided grib message based on Product
Definition Template 4.8.
Template 4.8 is used to represent an aggregation over a time
interval.
"""
gribapi.grib_set(grib, "productDefinitionTemplateNumber", 8)
_product_definition_template_8_and_11(cube, grib)
def product_definition_template_11(cube, grib):
"""
Set keys within the provided grib message based on Product
Definition Template 4.8.
Template 4.8 is used to represent an aggregation over a time
interval.
"""
gribapi.grib_set(grib, "productDefinitionTemplateNumber", 11)
if not (cube.coords('realization') and
len(cube.coord('realization').points) == 1):
raise ValueError("A cube 'realization' coordinate with one"
"point is required, but not present")
gribapi.grib_set(grib, "perturbationNumber",
int(cube.coord('realization').points[0]))
# no encoding at present in Iris, set to missing
gribapi.grib_set(grib, "numberOfForecastsInEnsemble", 255)
gribapi.grib_set(grib, "typeOfEnsembleForecast", 255)
_product_definition_template_8_and_11(cube, grib)
def _product_definition_template_8_and_11(cube, grib):
"""
Set keys within the provided grib message based on common aspects of
Product Definition Templates 4.8 and 4.11.
Templates 4.8 and 4.11 are used to represent aggregations over a time
interval.
"""
product_definition_template_common(cube, grib)
# Check for time coordinate.
time_coord = cube.coord('time')
if len(time_coord.points) != 1:
msg = 'Expected length one time coordinate, got {} points'
raise ValueError(msg.format(time_coord.points))
if time_coord.nbounds != 2:
msg = 'Expected time coordinate with two bounds, got {} bounds'
raise ValueError(msg.format(time_coord.nbounds))
# Check that there is one and only one cell method related to the
# time coord.
time_cell_methods = [cell_method for cell_method in cube.cell_methods if
'time' in cell_method.coord_names]
if not time_cell_methods:
raise ValueError("Expected a cell method with a coordinate name "
"of 'time'")
if len(time_cell_methods) > 1:
raise ValueError("Cannot handle multiple 'time' cell methods")
cell_method, = time_cell_methods
if len(cell_method.coord_names) > 1:
raise ValueError("Cannot handle multiple coordinate names in "
"the time related cell method. Expected ('time',), "
"got {!r}".format(cell_method.coord_names))
# Extract the datetime-like object corresponding to the end of
# the overall processing interval.
end = time_coord.units.num2date(time_coord.bounds[0, -1])
# Set the associated keys for the end of the interval (octets 35-41
# in section 4).
gribapi.grib_set(grib, "yearOfEndOfOverallTimeInterval", end.year)
gribapi.grib_set(grib, "monthOfEndOfOverallTimeInterval", end.month)
gribapi.grib_set(grib, "dayOfEndOfOverallTimeInterval", end.day)
gribapi.grib_set(grib, "hourOfEndOfOverallTimeInterval", end.hour)
gribapi.grib_set(grib, "minuteOfEndOfOverallTimeInterval", end.minute)
gribapi.grib_set(grib, "secondOfEndOfOverallTimeInterval", end.second)
# Only one time range specification. If there were a series of aggregations
# (e.g. the mean of an accumulation) one might set this to a higher value,
# but we currently only handle a single time related cell method.
gribapi.grib_set(grib, "numberOfTimeRange", 1)
gribapi.grib_set(grib, "numberOfMissingInStatisticalProcess", 0)
# Type of statistical process (see code table 4.10)
statistic_type = _STATISTIC_TYPE_NAMES.get(cell_method.method, 255)
gribapi.grib_set(grib, "typeOfStatisticalProcessing", statistic_type)
# Period over which statistical processing is performed.
set_time_range(time_coord, grib)
# Time increment i.e. interval of cell method (if any)
set_time_increment(cell_method, grib)
def product_definition_section(cube, grib):
"""
Set keys within the product definition section of the provided
grib message based on the properties of the cube.
"""
if not cube.coord("time").has_bounds():
# forecast (template 4.0)
product_definition_template_0(cube, grib)
elif _cube_is_time_statistic(cube):
if cube.coords('realization'):
# time processed (template 4.11)
pdt = product_definition_template_11
else:
# time processed (template 4.8)
pdt = product_definition_template_8
try:
pdt(cube, grib)
except ValueError as e:
raise ValueError('Saving to GRIB2 failed: the cube is not suitable'
' for saving as a time processed statistic GRIB'
' message. {}'.format(e))
else:
# Don't know how to handle this kind of data
msg = 'A suitable product template could not be deduced'
raise iris.exceptions.TranslationError(msg)
###############################################################################
#
# Data Representation Section 5
#
###############################################################################
def data_section(cube, grib):
# Masked data?
if isinstance(cube.data, ma.core.MaskedArray):
# What missing value shall we use?
if not np.isnan(cube.data.fill_value):
# Use the data's fill value.
fill_value = float(cube.data.fill_value)
else:
# We can't use the data's fill value if it's NaN,
# the GRIB API doesn't like it.
# Calculate an MDI outside the data range.
min, max = cube.data.min(), cube.data.max()
fill_value = min - (max - min) * 0.1
# Prepare the unmaksed data array, using fill_value as the MDI.
data = cube.data.filled(fill_value)
else:
fill_value = None
data = cube.data
# units scaling
grib2_info = gptx.cf_phenom_to_grib2_info(cube.standard_name,
cube.long_name)
if grib2_info is None:
# for now, just allow this
warnings.warn('Unable to determine Grib2 parameter code for cube.\n'
'Message data may not be correctly scaled.')
else:
if cube.units != grib2_info.units:
data = cube.units.convert(data, grib2_info.units)
if fill_value is not None:
fill_value = cube.units.convert(fill_value, grib2_info.units)
if fill_value is None:
# Disable missing values in the grib message.
gribapi.grib_set(grib, "bitmapPresent", 0)
else:
# Enable missing values in the grib message.
gribapi.grib_set(grib, "bitmapPresent", 1)
gribapi.grib_set_double(grib, "missingValue", fill_value)
gribapi.grib_set_double_array(grib, "values", data.flatten())
# todo: check packing accuracy?
# print("packingError", gribapi.getb_get_double(grib, "packingError"))
###############################################################################
def gribbability_check(cube):
"We always need the following things for grib saving."
# GeogCS exists?
cs0 = cube.coord(dimensions=[0]).coord_system
cs1 = cube.coord(dimensions=[1]).coord_system
if cs0 is None or cs1 is None:
raise iris.exceptions.TranslationError("CoordSystem not present")
if cs0 != cs1:
raise iris.exceptions.TranslationError("Inconsistent CoordSystems")
# Time period exists?
if not cube.coords("time"):
raise iris.exceptions.TranslationError("time coord not found")
def run(cube, grib):
"""
Set the keys of the grib message based on the contents of the cube.
Args:
* cube:
An instance of :class:`iris.cube.Cube`.
* grib_message_id:
ID of a grib message in memory. This is typically the return value of
:func:`gribapi.grib_new_from_samples`.
"""
gribbability_check(cube)
# Section 1 - Identification Section.
identification(cube, grib)
# Section 3 - Grid Definition Section (Grid Definition Template)
grid_definition_section(cube, grib)
# Section 4 - Product Definition Section (Product Definition Template)
product_definition_section(cube, grib)
# Section 5 - Data Representation Section (Data Representation Template)
data_section(cube, grib)
| gpl-3.0 |
maartenq/ansible | test/sanity/code-smell/shebang.py | 9 | 3132 | #!/usr/bin/env python
import os
import stat
import sys
def main():
allowed = set([
b'#!/bin/bash -eu',
b'#!/bin/bash -eux',
b'#!/bin/bash',
b'#!/bin/sh',
b'#!/usr/bin/env bash',
b'#!/usr/bin/env fish',
b'#!/usr/bin/env pwsh',
b'#!/usr/bin/env python',
b'#!/usr/bin/make -f',
])
module_shebangs = {
'': b'#!/usr/bin/python',
'.py': b'#!/usr/bin/python',
'.ps1': b'#!powershell',
}
skip = set([
'test/integration/targets/win_module_utils/library/legacy_only_new_way_win_line_ending.ps1',
'test/integration/targets/win_module_utils/library/legacy_only_old_way_win_line_ending.ps1',
'test/utils/shippable/timing.py',
])
for path in sys.argv[1:] or sys.stdin.read().splitlines():
if path in skip:
continue
with open(path, 'rb') as path_fd:
shebang = path_fd.readline().strip()
mode = os.stat(path).st_mode
executable = (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & mode
if not shebang or not shebang.startswith(b'#!'):
if executable:
print('%s:%d:%d: file without shebang should not be executable' % (path, 0, 0))
continue
is_module = False
if path.startswith('lib/ansible/modules/'):
is_module = True
elif path.startswith('lib/') or path.startswith('test/runner/lib/'):
if executable:
print('%s:%d:%d: should not be executable' % (path, 0, 0))
if shebang:
print('%s:%d:%d: should not have a shebang' % (path, 0, 0))
continue
elif path.startswith('test/integration/targets/'):
dirname = os.path.dirname(path)
if dirname.endswith('/library') or dirname in (
# non-standard module library directories
'test/integration/targets/module_precedence/lib_no_extension',
'test/integration/targets/module_precedence/lib_with_extension',
):
is_module = True
if is_module:
if executable:
print('%s:%d:%d: module should not be executable' % (path, 0, 0))
ext = os.path.splitext(path)[1]
expected_shebang = module_shebangs.get(ext)
expected_ext = ' or '.join(['"%s"' % k for k in module_shebangs])
if expected_shebang:
if shebang == expected_shebang:
continue
print('%s:%d:%d: expected module shebang "%s" but found: %s' % (path, 1, 1, expected_shebang, shebang))
else:
print('%s:%d:%d: expected module extension %s but found: %s' % (path, 0, 0, expected_ext, ext))
else:
if shebang not in allowed:
print('%s:%d:%d: unexpected non-module shebang: %s' % (path, 1, 1, shebang))
if __name__ == '__main__':
main()
| gpl-3.0 |
sss/calibre-at-bzr | src/calibre/gui2/viewer/table_popup.py | 7 | 2960 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from PyQt4.Qt import (QDialog, QDialogButtonBox, QVBoxLayout, QApplication,
QSize, QIcon, Qt)
from PyQt4.QtWebKit import QWebView
from calibre.gui2 import gprefs, error_dialog
class TableView(QDialog):
def __init__(self, parent, font_magnification_step):
QDialog.__init__(self, parent)
self.font_magnification_step = font_magnification_step
dw = QApplication.instance().desktop()
self.avail_geom = dw.availableGeometry(parent)
self.view = QWebView(self)
self.bb = bb = QDialogButtonBox(QDialogButtonBox.Close)
bb.accepted.connect(self.accept)
bb.rejected.connect(self.reject)
self.zi_button = zi = bb.addButton(_('Zoom &in'), bb.ActionRole)
self.zo_button = zo = bb.addButton(_('Zoom &out'), bb.ActionRole)
zi.setIcon(QIcon(I('plus.png')))
zo.setIcon(QIcon(I('minus.png')))
zi.clicked.connect(self.zoom_in)
zo.clicked.connect(self.zoom_out)
self.l = l = QVBoxLayout()
self.setLayout(l)
l.addWidget(self.view)
l.addWidget(bb)
def zoom_in(self):
self.view.setZoomFactor(self.view.zoomFactor() +
self.font_magnification_step)
def zoom_out(self):
self.view.setZoomFactor(max(0.1, self.view.zoomFactor()
-self.font_magnification_step))
def __call__(self, html, baseurl):
self.view.setHtml(
'<!DOCTYPE html><html><body bgcolor="white">%s<body></html>'%html,
baseurl)
geom = self.avail_geom
self.resize(QSize(int(geom.width()/2.5), geom.height()-50))
geom = gprefs.get('viewer_table_popup_geometry', None)
if geom is not None:
self.restoreGeometry(geom)
self.setWindowTitle(_('View Table'))
self.show()
def done(self, e):
gprefs['viewer_table_popup_geometry'] = bytearray(self.saveGeometry())
return QDialog.done(self, e)
class TablePopup(object):
def __init__(self, parent):
self.parent = parent
self.dialogs = []
def __call__(self, html, baseurl, font_magnification_step):
if not html:
return error_dialog(self.parent, _('No table found'),
_('No table was found'), show=True)
d = TableView(self.parent, font_magnification_step)
self.dialogs.append(d)
d.finished.connect(self.cleanup, type=Qt.QueuedConnection)
d(html, baseurl)
def cleanup(self):
for d in tuple(self.dialogs):
if not d.isVisible():
self.dialogs.remove(d)
| gpl-3.0 |
Patrikkk/TShock | scripts/gpltext.py | 4 | 2626 | ''' TShock, a server mod for Terraria
Copyright (C) 2011-2019 Pryaxis & TShock Contributors
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import os
import glob
extensions = {'.cs', '.py'}
path = "./"
pattern = "/\*\s?\n?TShock, a server mod for Terraria(\n|.)*\*/"
pypattern = "'''\s?\n?TShock, a server mod for Terraria(\n|.)*'''"
year = "2019"
filename = "./README.md"
text = "/*\n\
TShock, a server mod for Terraria\n\
Copyright (C) 2011-2019 Pryaxis & TShock Contributors\n\
\n\
This program is free software: you can redistribute it and/or modify\n\
it under the terms of the GNU General Public License as published by\n\
the Free Software Foundation, either version 3 of the License, or\n\
(at your option) any later version.\n\
\n\
This program is distributed in the hope that it will be useful,\n\
but WITHOUT ANY WARRANTY; without even the implied warranty of\n\
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\
GNU General Public License for more details.\n\
\n\
You should have received a copy of the GNU General Public License\n\
along with this program. If not, see <http://www.gnu.org/licenses/>.\n\
*/\n\
\n\
"
pytext = re.sub(r"\*/", "'''", text)
pytext = re.sub(r"/\*", "'''", pytext)
def changeText(filename):
content = ''
with open(filename, 'r') as f:
content = f.read()
if filename.endswith('.py'):
if re.search(pypattern, content):
content = re.sub(r"Copyright \(C\) 2011-[\d]{4}", "Copyright (C) 2011-%s" % year, content)
else:
content = pytext + content
else:
if re.search(pattern, content):
content = re.sub(r"Copyright \(C\) 2011-[\d]{4}", "Copyright (C) 2011-%s" % year, content)
else:
content = text + content
with open(filename, 'w') as f:
f.write(content)
def getFiles(path):
list = os.listdir(path)
for f in list:
#print (f)
if os.path.isdir(f):
getFiles(path + f + '/')
else:
for ext in extensions:
if f.endswith(ext):
if f.endswith('.Designer.cs'):
break
print (path + f)
changeText(path + f)
break
getFiles(path) | gpl-3.0 |
cloakedcode/CouchPotatoServer | libs/sqlalchemy/util/langhelpers.py | 17 | 28261 | # util/langhelpers.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to help with the creation, loading and introspection of
modules, classes, hierarchies, attributes, functions, and methods.
"""
import itertools
import inspect
import operator
import re
import sys
import types
import warnings
from compat import update_wrapper, set_types, threading, callable, inspect_getfullargspec, py3k_warning
from sqlalchemy import exc
def _unique_symbols(used, *bases):
used = set(used)
for base in bases:
pool = itertools.chain((base,),
itertools.imap(lambda i: base + str(i),
xrange(1000)))
for sym in pool:
if sym not in used:
used.add(sym)
yield sym
break
else:
raise NameError("exhausted namespace for symbol base %s" % base)
def decorator(target):
"""A signature-matching decorator factory."""
def decorate(fn):
if not inspect.isfunction(fn):
raise Exception("not a decoratable function")
spec = inspect_getfullargspec(fn)
names = tuple(spec[0]) + spec[1:3] + (fn.func_name,)
targ_name, fn_name = _unique_symbols(names, 'target', 'fn')
metadata = dict(target=targ_name, fn=fn_name)
metadata.update(format_argspec_plus(spec, grouped=False))
code = 'lambda %(args)s: %(target)s(%(fn)s, %(apply_kw)s)' % (
metadata)
decorated = eval(code, {targ_name:target, fn_name:fn})
decorated.func_defaults = getattr(fn, 'im_func', fn).func_defaults
return update_wrapper(decorated, fn)
return update_wrapper(decorate, target)
def get_cls_kwargs(cls):
"""Return the full set of inherited kwargs for the given `cls`.
Probes a class's __init__ method, collecting all named arguments. If the
__init__ defines a \**kwargs catch-all, then the constructor is presumed to
pass along unrecognized keywords to it's base classes, and the collection
process is repeated recursively on each of the bases.
Uses a subset of inspect.getargspec() to cut down on method overhead.
No anonymous tuple arguments please !
"""
for c in cls.__mro__:
if '__init__' in c.__dict__:
stack = set([c])
break
else:
return []
args = set()
while stack:
class_ = stack.pop()
ctr = class_.__dict__.get('__init__', False)
if (not ctr or
not isinstance(ctr, types.FunctionType) or
not isinstance(ctr.func_code, types.CodeType)):
stack.update(class_.__bases__)
continue
# this is shorthand for
# names, _, has_kw, _ = inspect.getargspec(ctr)
names, has_kw = inspect_func_args(ctr)
args.update(names)
if has_kw:
stack.update(class_.__bases__)
args.discard('self')
return args
try:
from inspect import CO_VARKEYWORDS
def inspect_func_args(fn):
co = fn.func_code
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
has_kw = bool(co.co_flags & CO_VARKEYWORDS)
return args, has_kw
except ImportError:
def inspect_func_args(fn):
names, _, has_kw, _ = inspect.getargspec(fn)
return names, bool(has_kw)
def get_func_kwargs(func):
"""Return the set of legal kwargs for the given `func`.
Uses getargspec so is safe to call for methods, functions,
etc.
"""
return inspect.getargspec(func)[0]
def format_argspec_plus(fn, grouped=True):
"""Returns a dictionary of formatted, introspected function arguments.
A enhanced variant of inspect.formatargspec to support code generation.
fn
An inspectable callable or tuple of inspect getargspec() results.
grouped
Defaults to True; include (parens, around, argument) lists
Returns:
args
Full inspect.formatargspec for fn
self_arg
The name of the first positional argument, varargs[0], or None
if the function defines no positional arguments.
apply_pos
args, re-written in calling rather than receiving syntax. Arguments are
passed positionally.
apply_kw
Like apply_pos, except keyword-ish args are passed as keywords.
Example::
>>> format_argspec_plus(lambda self, a, b, c=3, **d: 123)
{'args': '(self, a, b, c=3, **d)',
'self_arg': 'self',
'apply_kw': '(self, a, b, c=c, **d)',
'apply_pos': '(self, a, b, c, **d)'}
"""
if callable(fn):
spec = inspect_getfullargspec(fn)
else:
# we accept an existing argspec...
spec = fn
args = inspect.formatargspec(*spec)
if spec[0]:
self_arg = spec[0][0]
elif spec[1]:
self_arg = '%s[0]' % spec[1]
else:
self_arg = None
# Py3K
#apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2], None, spec[4])
#num_defaults = 0
#if spec[3]:
# num_defaults += len(spec[3])
#if spec[4]:
# num_defaults += len(spec[4])
#name_args = spec[0] + spec[4]
# Py2K
apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2])
num_defaults = 0
if spec[3]:
num_defaults += len(spec[3])
name_args = spec[0]
# end Py2K
if num_defaults:
defaulted_vals = name_args[0-num_defaults:]
else:
defaulted_vals = ()
apply_kw = inspect.formatargspec(name_args, spec[1], spec[2], defaulted_vals,
formatvalue=lambda x: '=' + x)
if grouped:
return dict(args=args, self_arg=self_arg,
apply_pos=apply_pos, apply_kw=apply_kw)
else:
return dict(args=args[1:-1], self_arg=self_arg,
apply_pos=apply_pos[1:-1], apply_kw=apply_kw[1:-1])
def format_argspec_init(method, grouped=True):
"""format_argspec_plus with considerations for typical __init__ methods
Wraps format_argspec_plus with error handling strategies for typical
__init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return format_argspec_plus(method, grouped=grouped)
except TypeError:
self_arg = 'self'
if method is object.__init__:
args = grouped and '(self)' or 'self'
else:
args = (grouped and '(self, *args, **kwargs)'
or 'self, *args, **kwargs')
return dict(self_arg='self', args=args, apply_pos=args, apply_kw=args)
def getargspec_init(method):
"""inspect.getargspec with considerations for typical __init__ methods
Wraps inspect.getargspec with error handling for typical __init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return inspect.getargspec(method)
except TypeError:
if method is object.__init__:
return (['self'], None, None, None)
else:
return (['self'], 'args', 'kwargs', None)
def unbound_method_to_callable(func_or_cls):
"""Adjust the incoming callable such that a 'self' argument is not required."""
if isinstance(func_or_cls, types.MethodType) and not func_or_cls.im_self:
return func_or_cls.im_func
else:
return func_or_cls
def generic_repr(obj):
"""Produce a __repr__() based on direct association of the __init__()
specification vs. same-named attributes present.
"""
def genargs():
try:
(args, vargs, vkw, defaults) = inspect.getargspec(obj.__init__)
except TypeError:
return
default_len = defaults and len(defaults) or 0
if not default_len:
for arg in args[1:]:
yield repr(getattr(obj, arg, None))
if vargs is not None and hasattr(obj, vargs):
yield ', '.join(repr(val) for val in getattr(obj, vargs))
else:
for arg in args[1:-default_len]:
yield repr(getattr(obj, arg, None))
for (arg, defval) in zip(args[-default_len:], defaults):
try:
val = getattr(obj, arg, None)
if val != defval:
yield '%s=%r' % (arg, val)
except:
pass
return "%s(%s)" % (obj.__class__.__name__, ", ".join(genargs()))
class portable_instancemethod(object):
"""Turn an instancemethod into a (parent, name) pair
to produce a serializable callable.
"""
def __init__(self, meth):
self.target = meth.im_self
self.name = meth.__name__
def __call__(self, *arg, **kw):
return getattr(self.target, self.name)(*arg, **kw)
def class_hierarchy(cls):
"""Return an unordered sequence of all classes related to cls.
Traverses diamond hierarchies.
Fibs slightly: subclasses of builtin types are not returned. Thus
class_hierarchy(class A(object)) returns (A, object), not A plus every
class systemwide that derives from object.
Old-style classes are discarded and hierarchies rooted on them
will not be descended.
"""
# Py2K
if isinstance(cls, types.ClassType):
return list()
# end Py2K
hier = set([cls])
process = list(cls.__mro__)
while process:
c = process.pop()
# Py2K
if isinstance(c, types.ClassType):
continue
for b in (_ for _ in c.__bases__
if _ not in hier and not isinstance(_, types.ClassType)):
# end Py2K
# Py3K
#for b in (_ for _ in c.__bases__
# if _ not in hier):
process.append(b)
hier.add(b)
# Py3K
#if c.__module__ == 'builtins' or not hasattr(c, '__subclasses__'):
# continue
# Py2K
if c.__module__ == '__builtin__' or not hasattr(c, '__subclasses__'):
continue
# end Py2K
for s in [_ for _ in c.__subclasses__() if _ not in hier]:
process.append(s)
hier.add(s)
return list(hier)
def iterate_attributes(cls):
"""iterate all the keys and attributes associated
with a class, without using getattr().
Does not use getattr() so that class-sensitive
descriptors (i.e. property.__get__()) are not called.
"""
keys = dir(cls)
for key in keys:
for c in cls.__mro__:
if key in c.__dict__:
yield (key, c.__dict__[key])
break
def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None,
name='self.proxy', from_instance=None):
"""Automates delegation of __specials__ for a proxying type."""
if only:
dunders = only
else:
if skip is None:
skip = ('__slots__', '__del__', '__getattribute__',
'__metaclass__', '__getstate__', '__setstate__')
dunders = [m for m in dir(from_cls)
if (m.startswith('__') and m.endswith('__') and
not hasattr(into_cls, m) and m not in skip)]
for method in dunders:
try:
fn = getattr(from_cls, method)
if not hasattr(fn, '__call__'):
continue
fn = getattr(fn, 'im_func', fn)
except AttributeError:
continue
try:
spec = inspect.getargspec(fn)
fn_args = inspect.formatargspec(spec[0])
d_args = inspect.formatargspec(spec[0][1:])
except TypeError:
fn_args = '(self, *args, **kw)'
d_args = '(*args, **kw)'
py = ("def %(method)s%(fn_args)s: "
"return %(name)s.%(method)s%(d_args)s" % locals())
env = from_instance is not None and {name: from_instance} or {}
exec py in env
try:
env[method].func_defaults = fn.func_defaults
except AttributeError:
pass
setattr(into_cls, method, env[method])
def methods_equivalent(meth1, meth2):
"""Return True if the two methods are the same implementation."""
# Py3K
#return getattr(meth1, '__func__', meth1) is getattr(meth2, '__func__', meth2)
# Py2K
return getattr(meth1, 'im_func', meth1) is getattr(meth2, 'im_func', meth2)
# end Py2K
def as_interface(obj, cls=None, methods=None, required=None):
"""Ensure basic interface compliance for an instance or dict of callables.
Checks that ``obj`` implements public methods of ``cls`` or has members
listed in ``methods``. If ``required`` is not supplied, implementing at
least one interface method is sufficient. Methods present on ``obj`` that
are not in the interface are ignored.
If ``obj`` is a dict and ``dict`` does not meet the interface
requirements, the keys of the dictionary are inspected. Keys present in
``obj`` that are not in the interface will raise TypeErrors.
Raises TypeError if ``obj`` does not meet the interface criteria.
In all passing cases, an object with callable members is returned. In the
simple case, ``obj`` is returned as-is; if dict processing kicks in then
an anonymous class is returned.
obj
A type, instance, or dictionary of callables.
cls
Optional, a type. All public methods of cls are considered the
interface. An ``obj`` instance of cls will always pass, ignoring
``required``..
methods
Optional, a sequence of method names to consider as the interface.
required
Optional, a sequence of mandatory implementations. If omitted, an
``obj`` that provides at least one interface method is considered
sufficient. As a convenience, required may be a type, in which case
all public methods of the type are required.
"""
if not cls and not methods:
raise TypeError('a class or collection of method names are required')
if isinstance(cls, type) and isinstance(obj, cls):
return obj
interface = set(methods or [m for m in dir(cls) if not m.startswith('_')])
implemented = set(dir(obj))
complies = operator.ge
if isinstance(required, type):
required = interface
elif not required:
required = set()
complies = operator.gt
else:
required = set(required)
if complies(implemented.intersection(interface), required):
return obj
# No dict duck typing here.
if not type(obj) is dict:
qualifier = complies is operator.gt and 'any of' or 'all of'
raise TypeError("%r does not implement %s: %s" % (
obj, qualifier, ', '.join(interface)))
class AnonymousInterface(object):
"""A callable-holding shell."""
if cls:
AnonymousInterface.__name__ = 'Anonymous' + cls.__name__
found = set()
for method, impl in dictlike_iteritems(obj):
if method not in interface:
raise TypeError("%r: unknown in this interface" % method)
if not callable(impl):
raise TypeError("%r=%r is not callable" % (method, impl))
setattr(AnonymousInterface, method, staticmethod(impl))
found.add(method)
if complies(found, required):
return AnonymousInterface
raise TypeError("dictionary does not contain required keys %s" %
', '.join(required - found))
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
class memoized_instancemethod(object):
"""Decorate a method memoize its return value.
Best applied to no-arg methods: memoization is not sensitive to
argument values, and will always return the same value even when
called with different arguments.
"""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
def oneshot(*args, **kw):
result = self.fget(obj, *args, **kw)
memo = lambda *a, **kw: result
memo.__name__ = self.__name__
memo.__doc__ = self.__doc__
obj.__dict__[self.__name__] = memo
return result
oneshot.__name__ = self.__name__
oneshot.__doc__ = self.__doc__
return oneshot
def reset_memoized(instance, name):
instance.__dict__.pop(name, None)
class group_expirable_memoized_property(object):
"""A family of @memoized_properties that can be expired in tandem."""
def __init__(self, attributes=()):
self.attributes = []
if attributes:
self.attributes.extend(attributes)
def expire_instance(self, instance):
"""Expire all memoized properties for *instance*."""
stash = instance.__dict__
for attribute in self.attributes:
stash.pop(attribute, None)
def __call__(self, fn):
self.attributes.append(fn.__name__)
return memoized_property(fn)
def method(self, fn):
self.attributes.append(fn.__name__)
return memoized_instancemethod(fn)
class importlater(object):
"""Deferred import object.
e.g.::
somesubmod = importlater("mypackage.somemodule", "somesubmod")
is equivalent to::
from mypackage.somemodule import somesubmod
except evaluted upon attribute access to "somesubmod".
importlater() currently requires that resolve_all() be
called, typically at the bottom of a package's __init__.py.
This is so that __import__ still called only at
module import time, and not potentially within
a non-main thread later on.
"""
_unresolved = set()
def __init__(self, path, addtl=None):
self._il_path = path
self._il_addtl = addtl
importlater._unresolved.add(self)
@classmethod
def resolve_all(cls):
for m in list(importlater._unresolved):
m._resolve()
@property
def _full_path(self):
if self._il_addtl:
return self._il_path + "." + self._il_addtl
else:
return self._il_path
@memoized_property
def module(self):
if self in importlater._unresolved:
raise ImportError(
"importlater.resolve_all() hasn't been called")
m = self._initial_import
if self._il_addtl:
m = getattr(m, self._il_addtl)
else:
for token in self._il_path.split(".")[1:]:
m = getattr(m, token)
return m
def _resolve(self):
importlater._unresolved.discard(self)
if self._il_addtl:
self._initial_import = __import__(
self._il_path, globals(), locals(),
[self._il_addtl])
else:
self._initial_import = __import__(self._il_path)
def __getattr__(self, key):
if key == 'module':
raise ImportError("Could not resolve module %s"
% self._full_path)
try:
attr = getattr(self.module, key)
except AttributeError:
raise AttributeError(
"Module %s has no attribute '%s'" %
(self._full_path, key)
)
self.__dict__[key] = attr
return attr
# from paste.deploy.converters
def asbool(obj):
if isinstance(obj, (str, unicode)):
obj = obj.strip().lower()
if obj in ['true', 'yes', 'on', 'y', 't', '1']:
return True
elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
return False
else:
raise ValueError("String is not true/false: %r" % obj)
return bool(obj)
def bool_or_str(*text):
"""Return a callable that will evaulate a string as
boolean, or one of a set of "alternate" string values.
"""
def bool_or_value(obj):
if obj in text:
return obj
else:
return asbool(obj)
return bool_or_value
def asint(value):
"""Coerce to integer."""
if value is None:
return value
return int(value)
def coerce_kw_type(kw, key, type_, flexi_bool=True):
"""If 'key' is present in dict 'kw', coerce its value to type 'type\_' if
necessary. If 'flexi_bool' is True, the string '0' is considered false
when coercing to boolean.
"""
if key in kw and type(kw[key]) is not type_ and kw[key] is not None:
if type_ is bool and flexi_bool:
kw[key] = asbool(kw[key])
else:
kw[key] = type_(kw[key])
def constructor_copy(obj, cls, **kw):
"""Instantiate cls using the __dict__ of obj as constructor arguments.
Uses inspect to match the named arguments of ``cls``.
"""
names = get_cls_kwargs(cls)
kw.update((k, obj.__dict__[k]) for k in names if k in obj.__dict__)
return cls(**kw)
def counter():
"""Return a threadsafe counter function."""
lock = threading.Lock()
counter = itertools.count(1L)
# avoid the 2to3 "next" transformation...
def _next():
lock.acquire()
try:
return counter.next()
finally:
lock.release()
return _next
def duck_type_collection(specimen, default=None):
"""Given an instance or class, guess if it is or is acting as one of
the basic collection types: list, set and dict. If the __emulates__
property is present, return that preferentially.
"""
if hasattr(specimen, '__emulates__'):
# canonicalize set vs sets.Set to a standard: the builtin set
if (specimen.__emulates__ is not None and
issubclass(specimen.__emulates__, set_types)):
return set
else:
return specimen.__emulates__
isa = isinstance(specimen, type) and issubclass or isinstance
if isa(specimen, list):
return list
elif isa(specimen, set_types):
return set
elif isa(specimen, dict):
return dict
if hasattr(specimen, 'append'):
return list
elif hasattr(specimen, 'add'):
return set
elif hasattr(specimen, 'set'):
return dict
else:
return default
def assert_arg_type(arg, argtype, name):
if isinstance(arg, argtype):
return arg
else:
if isinstance(argtype, tuple):
raise exc.ArgumentError(
"Argument '%s' is expected to be one of type %s, got '%s'" %
(name, ' or '.join("'%s'" % a for a in argtype), type(arg)))
else:
raise exc.ArgumentError(
"Argument '%s' is expected to be of type '%s', got '%s'" %
(name, argtype, type(arg)))
def dictlike_iteritems(dictlike):
"""Return a (key, value) iterator for almost any dict-like object."""
# Py3K
#if hasattr(dictlike, 'items'):
# return dictlike.items()
# Py2K
if hasattr(dictlike, 'iteritems'):
return dictlike.iteritems()
elif hasattr(dictlike, 'items'):
return iter(dictlike.items())
# end Py2K
getter = getattr(dictlike, '__getitem__', getattr(dictlike, 'get', None))
if getter is None:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
if hasattr(dictlike, 'iterkeys'):
def iterator():
for key in dictlike.iterkeys():
yield key, getter(key)
return iterator()
elif hasattr(dictlike, 'keys'):
return iter((key, getter(key)) for key in dictlike.keys())
else:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
class classproperty(property):
"""A decorator that behaves like @property except that operates
on classes rather than instances.
The decorator is currently special when using the declarative
module, but note that the
:class:`~.sqlalchemy.ext.declarative.declared_attr`
decorator should be used for this purpose with declarative.
"""
def __init__(self, fget, *arg, **kw):
super(classproperty, self).__init__(fget, *arg, **kw)
self.__doc__ = fget.__doc__
def __get__(desc, self, cls):
return desc.fget(cls)
class _symbol(object):
def __init__(self, name, doc=None):
"""Construct a new named symbol."""
assert isinstance(name, str)
self.name = name
if doc:
self.__doc__ = doc
def __reduce__(self):
return symbol, (self.name,)
def __repr__(self):
return "<symbol '%s>" % self.name
_symbol.__name__ = 'symbol'
class symbol(object):
"""A constant symbol.
>>> symbol('foo') is symbol('foo')
True
>>> symbol('foo')
<symbol 'foo>
A slight refinement of the MAGICCOOKIE=object() pattern. The primary
advantage of symbol() is its repr(). They are also singletons.
Repeated calls of symbol('name') will all return the same instance.
The optional ``doc`` argument assigns to ``__doc__``. This
is strictly so that Sphinx autoattr picks up the docstring we want
(it doesn't appear to pick up the in-module docstring if the datamember
is in a different module - autoattribute also blows up completely).
If Sphinx fixes/improves this then we would no longer need
``doc`` here.
"""
symbols = {}
_lock = threading.Lock()
def __new__(cls, name, doc=None):
cls._lock.acquire()
try:
sym = cls.symbols.get(name)
if sym is None:
cls.symbols[name] = sym = _symbol(name, doc)
return sym
finally:
symbol._lock.release()
_creation_order = 1
def set_creation_order(instance):
"""Assign a '_creation_order' sequence to the given instance.
This allows multiple instances to be sorted in order of creation
(typically within a single thread; the counter is not particularly
threadsafe).
"""
global _creation_order
instance._creation_order = _creation_order
_creation_order +=1
def warn_exception(func, *args, **kwargs):
"""executes the given function, catches all exceptions and converts to a warning."""
try:
return func(*args, **kwargs)
except:
warn("%s('%s') ignored" % sys.exc_info()[0:2])
def warn(msg, stacklevel=3):
"""Issue a warning.
If msg is a string, :class:`.exc.SAWarning` is used as
the category.
.. note::
This function is swapped out when the test suite
runs, with a compatible version that uses
warnings.warn_explicit, so that the warnings registry can
be controlled.
"""
if isinstance(msg, basestring):
warnings.warn(msg, exc.SAWarning, stacklevel=stacklevel)
else:
warnings.warn(msg, stacklevel=stacklevel)
_SQLA_RE = re.compile(r'sqlalchemy/([a-z_]+/){0,2}[a-z_]+\.py')
_UNITTEST_RE = re.compile(r'unit(?:2|test2?/)')
def chop_traceback(tb, exclude_prefix=_UNITTEST_RE, exclude_suffix=_SQLA_RE):
"""Chop extraneous lines off beginning and end of a traceback.
:param tb:
a list of traceback lines as returned by ``traceback.format_stack()``
:param exclude_prefix:
a regular expression object matching lines to skip at beginning of ``tb``
:param exclude_suffix:
a regular expression object matching lines to skip at end of ``tb``
"""
start = 0
end = len(tb) - 1
while start <= end and exclude_prefix.search(tb[start]):
start += 1
while start <= end and exclude_suffix.search(tb[end]):
end -= 1
return tb[start:end+1]
NoneType = type(None)
| gpl-3.0 |
yiakwy/numpy | numpy/core/code_generators/generate_umath.py | 57 | 31090 | from __future__ import division, print_function
import os
import re
import struct
import sys
import textwrap
sys.path.insert(0, os.path.dirname(__file__))
import ufunc_docstrings as docstrings
sys.path.pop(0)
Zero = "PyUFunc_Zero"
One = "PyUFunc_One"
None_ = "PyUFunc_None"
ReorderableNone = "PyUFunc_ReorderableNone"
# Sentinel value to specify using the full type description in the
# function name
class FullTypeDescr(object):
pass
class FuncNameSuffix(object):
"""Stores the suffix to append when generating functions names.
"""
def __init__(self, suffix):
self.suffix = suffix
class TypeDescription(object):
"""Type signature for a ufunc.
Attributes
----------
type : str
Character representing the nominal type.
func_data : str or None or FullTypeDescr or FuncNameSuffix, optional
The string representing the expression to insert into the data
array, if any.
in_ : str or None, optional
The typecode(s) of the inputs.
out : str or None, optional
The typecode(s) of the outputs.
astype : dict or None, optional
If astype['x'] is 'y', uses PyUFunc_x_x_As_y_y/PyUFunc_xx_x_As_yy_y
instead of PyUFunc_x_x/PyUFunc_xx_x.
"""
def __init__(self, type, f=None, in_=None, out=None, astype=None):
self.type = type
self.func_data = f
if astype is None:
astype = {}
self.astype_dict = astype
if in_ is not None:
in_ = in_.replace('P', type)
self.in_ = in_
if out is not None:
out = out.replace('P', type)
self.out = out
def finish_signature(self, nin, nout):
if self.in_ is None:
self.in_ = self.type * nin
assert len(self.in_) == nin
if self.out is None:
self.out = self.type * nout
assert len(self.out) == nout
self.astype = self.astype_dict.get(self.type, None)
_fdata_map = dict(e='npy_%sf', f='npy_%sf', d='npy_%s', g='npy_%sl',
F='nc_%sf', D='nc_%s', G='nc_%sl')
def build_func_data(types, f):
func_data = []
for t in types:
d = _fdata_map.get(t, '%s') % (f,)
func_data.append(d)
return func_data
def TD(types, f=None, astype=None, in_=None, out=None):
if f is not None:
if isinstance(f, str):
func_data = build_func_data(types, f)
else:
assert len(f) == len(types)
func_data = f
else:
func_data = (None,) * len(types)
if isinstance(in_, str):
in_ = (in_,) * len(types)
elif in_ is None:
in_ = (None,) * len(types)
if isinstance(out, str):
out = (out,) * len(types)
elif out is None:
out = (None,) * len(types)
tds = []
for t, fd, i, o in zip(types, func_data, in_, out):
tds.append(TypeDescription(t, f=fd, in_=i, out=o, astype=astype))
return tds
class Ufunc(object):
"""Description of a ufunc.
Attributes
----------
nin : number of input arguments
nout : number of output arguments
identity : identity element for a two-argument function
docstring : docstring for the ufunc
type_descriptions : list of TypeDescription objects
"""
def __init__(self, nin, nout, identity, docstring, typereso,
*type_descriptions):
self.nin = nin
self.nout = nout
if identity is None:
identity = None_
self.identity = identity
self.docstring = docstring
self.typereso = typereso
self.type_descriptions = []
for td in type_descriptions:
self.type_descriptions.extend(td)
for td in self.type_descriptions:
td.finish_signature(self.nin, self.nout)
# String-handling utilities to avoid locale-dependence.
import string
if sys.version_info[0] < 3:
UPPER_TABLE = string.maketrans(string.ascii_lowercase,
string.ascii_uppercase)
else:
UPPER_TABLE = bytes.maketrans(bytes(string.ascii_lowercase, "ascii"),
bytes(string.ascii_uppercase, "ascii"))
def english_upper(s):
""" Apply English case rules to convert ASCII strings to all upper case.
This is an internal utility function to replace calls to str.upper() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
uppered : str
Examples
--------
>>> from numpy.lib.utils import english_upper
>>> s = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_'
>>> english_upper(s)
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
>>> english_upper('')
''
"""
uppered = s.translate(UPPER_TABLE)
return uppered
#each entry in defdict is a Ufunc object.
#name: [string of chars for which it is defined,
# string of characters using func interface,
# tuple of strings giving funcs for data,
# (in, out), or (instr, outstr) giving the signature as character codes,
# identity,
# docstring,
# output specification (optional)
# ]
chartoname = {'?': 'bool',
'b': 'byte',
'B': 'ubyte',
'h': 'short',
'H': 'ushort',
'i': 'int',
'I': 'uint',
'l': 'long',
'L': 'ulong',
'q': 'longlong',
'Q': 'ulonglong',
'e': 'half',
'f': 'float',
'd': 'double',
'g': 'longdouble',
'F': 'cfloat',
'D': 'cdouble',
'G': 'clongdouble',
'M': 'datetime',
'm': 'timedelta',
'O': 'OBJECT',
# '.' is like 'O', but calls a method of the object instead
# of a function
'P': 'OBJECT',
}
all = '?bBhHiIlLqQefdgFDGOMm'
O = 'O'
P = 'P'
ints = 'bBhHiIlLqQ'
times = 'Mm'
timedeltaonly = 'm'
intsO = ints + O
bints = '?' + ints
bintsO = bints + O
flts = 'efdg'
fltsO = flts + O
fltsP = flts + P
cmplx = 'FDG'
cmplxO = cmplx + O
cmplxP = cmplx + P
inexact = flts + cmplx
inexactvec = 'fd'
noint = inexact+O
nointP = inexact+P
allP = bints+times+flts+cmplxP
nobool = all[1:]
noobj = all[:-3]+all[-2:]
nobool_or_obj = all[1:-3]+all[-2:]
nobool_or_datetime = all[1:-2]+all[-1:]
intflt = ints+flts
intfltcmplx = ints+flts+cmplx
nocmplx = bints+times+flts
nocmplxO = nocmplx+O
nocmplxP = nocmplx+P
notimes_or_obj = bints + inexact
nodatetime_or_obj = bints + inexact
# Find which code corresponds to int64.
int64 = ''
uint64 = ''
for code in 'bhilq':
if struct.calcsize(code) == 8:
int64 = code
uint64 = english_upper(code)
break
# This dictionary describes all the ufunc implementations, generating
# all the function names and their corresponding ufunc signatures. TD is
# an object which expands a list of character codes into an array of
# TypeDescriptions.
defdict = {
'add':
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.add'),
'PyUFunc_AdditionTypeResolver',
TD(notimes_or_obj),
[TypeDescription('M', FullTypeDescr, 'Mm', 'M'),
TypeDescription('m', FullTypeDescr, 'mm', 'm'),
TypeDescription('M', FullTypeDescr, 'mM', 'M'),
],
TD(O, f='PyNumber_Add'),
),
'subtract':
Ufunc(2, 1, None, # Zero is only a unit to the right, not the left
docstrings.get('numpy.core.umath.subtract'),
'PyUFunc_SubtractionTypeResolver',
TD(notimes_or_obj),
[TypeDescription('M', FullTypeDescr, 'Mm', 'M'),
TypeDescription('m', FullTypeDescr, 'mm', 'm'),
TypeDescription('M', FullTypeDescr, 'MM', 'm'),
],
TD(O, f='PyNumber_Subtract'),
),
'multiply':
Ufunc(2, 1, One,
docstrings.get('numpy.core.umath.multiply'),
'PyUFunc_MultiplicationTypeResolver',
TD(notimes_or_obj),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'qm', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
TypeDescription('m', FullTypeDescr, 'dm', 'm'),
],
TD(O, f='PyNumber_Multiply'),
),
'divide':
Ufunc(2, 1, None, # One is only a unit to the right, not the left
docstrings.get('numpy.core.umath.divide'),
'PyUFunc_DivisionTypeResolver',
TD(intfltcmplx),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
TypeDescription('m', FullTypeDescr, 'mm', 'd'),
],
TD(O, f='PyNumber_Divide'),
),
'floor_divide':
Ufunc(2, 1, None, # One is only a unit to the right, not the left
docstrings.get('numpy.core.umath.floor_divide'),
'PyUFunc_DivisionTypeResolver',
TD(intfltcmplx),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
#TypeDescription('m', FullTypeDescr, 'mm', 'd'),
],
TD(O, f='PyNumber_FloorDivide'),
),
'true_divide':
Ufunc(2, 1, None, # One is only a unit to the right, not the left
docstrings.get('numpy.core.umath.true_divide'),
'PyUFunc_DivisionTypeResolver',
TD('bBhH', out='d'),
TD('iIlLqQ', out='d'),
TD(flts+cmplx),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
TypeDescription('m', FullTypeDescr, 'mm', 'd'),
],
TD(O, f='PyNumber_TrueDivide'),
),
'conjugate':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.conjugate'),
None,
TD(ints+flts+cmplx),
TD(P, f='conjugate'),
),
'fmod':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.fmod'),
None,
TD(ints),
TD(flts, f='fmod', astype={'e':'f'}),
TD(P, f='fmod'),
),
'square':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.square'),
None,
TD(ints+inexact),
TD(O, f='Py_square'),
),
'reciprocal':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.reciprocal'),
None,
TD(ints+inexact),
TD(O, f='Py_reciprocal'),
),
# This is no longer used as numpy.ones_like, however it is
# still used by some internal calls.
'_ones_like':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath._ones_like'),
'PyUFunc_OnesLikeTypeResolver',
TD(noobj),
TD(O, f='Py_get_one'),
),
'power':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.power'),
None,
TD(ints),
TD(inexact, f='pow', astype={'e':'f'}),
TD(O, f='npy_ObjectPower'),
),
'absolute':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.absolute'),
'PyUFunc_AbsoluteTypeResolver',
TD(bints+flts+timedeltaonly),
TD(cmplx, out=('f', 'd', 'g')),
TD(O, f='PyNumber_Absolute'),
),
'_arg':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath._arg'),
None,
TD(cmplx, out=('f', 'd', 'g')),
),
'negative':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.negative'),
'PyUFunc_NegativeTypeResolver',
TD(bints+flts+timedeltaonly),
TD(cmplx, f='neg'),
TD(O, f='PyNumber_Negative'),
),
'sign':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sign'),
'PyUFunc_SimpleUnaryOperationTypeResolver',
TD(nobool_or_datetime),
),
'greater':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.greater'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?'),
),
'greater_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.greater_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?'),
),
'less':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.less'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?'),
),
'less_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.less_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?'),
),
'equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?'),
),
'not_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.not_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?'),
),
'logical_and':
Ufunc(2, 1, One,
docstrings.get('numpy.core.umath.logical_and'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?'),
TD(O, f='npy_ObjectLogicalAnd'),
),
'logical_not':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.logical_not'),
None,
TD(nodatetime_or_obj, out='?'),
TD(O, f='npy_ObjectLogicalNot'),
),
'logical_or':
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.logical_or'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?'),
TD(O, f='npy_ObjectLogicalOr'),
),
'logical_xor':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.logical_xor'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?'),
TD(P, f='logical_xor'),
),
'maximum':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.maximum'),
'PyUFunc_SimpleBinaryOperationTypeResolver',
TD(noobj),
TD(O, f='npy_ObjectMax')
),
'minimum':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.minimum'),
'PyUFunc_SimpleBinaryOperationTypeResolver',
TD(noobj),
TD(O, f='npy_ObjectMin')
),
'fmax':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.fmax'),
'PyUFunc_SimpleBinaryOperationTypeResolver',
TD(noobj),
TD(O, f='npy_ObjectMax')
),
'fmin':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.fmin'),
'PyUFunc_SimpleBinaryOperationTypeResolver',
TD(noobj),
TD(O, f='npy_ObjectMin')
),
'logaddexp':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.logaddexp'),
None,
TD(flts, f="logaddexp", astype={'e':'f'})
),
'logaddexp2':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.logaddexp2'),
None,
TD(flts, f="logaddexp2", astype={'e':'f'})
),
'bitwise_and':
Ufunc(2, 1, One,
docstrings.get('numpy.core.umath.bitwise_and'),
None,
TD(bints),
TD(O, f='PyNumber_And'),
),
'bitwise_or':
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.bitwise_or'),
None,
TD(bints),
TD(O, f='PyNumber_Or'),
),
'bitwise_xor':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.bitwise_xor'),
None,
TD(bints),
TD(O, f='PyNumber_Xor'),
),
'invert':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.invert'),
None,
TD(bints),
TD(O, f='PyNumber_Invert'),
),
'left_shift':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.left_shift'),
None,
TD(ints),
TD(O, f='PyNumber_Lshift'),
),
'right_shift':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.right_shift'),
None,
TD(ints),
TD(O, f='PyNumber_Rshift'),
),
'degrees':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.degrees'),
None,
TD(fltsP, f='degrees', astype={'e':'f'}),
),
'rad2deg':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.rad2deg'),
None,
TD(fltsP, f='rad2deg', astype={'e':'f'}),
),
'radians':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.radians'),
None,
TD(fltsP, f='radians', astype={'e':'f'}),
),
'deg2rad':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.deg2rad'),
None,
TD(fltsP, f='deg2rad', astype={'e':'f'}),
),
'arccos':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arccos'),
None,
TD(inexact, f='acos', astype={'e':'f'}),
TD(P, f='arccos'),
),
'arccosh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arccosh'),
None,
TD(inexact, f='acosh', astype={'e':'f'}),
TD(P, f='arccosh'),
),
'arcsin':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arcsin'),
None,
TD(inexact, f='asin', astype={'e':'f'}),
TD(P, f='arcsin'),
),
'arcsinh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arcsinh'),
None,
TD(inexact, f='asinh', astype={'e':'f'}),
TD(P, f='arcsinh'),
),
'arctan':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arctan'),
None,
TD(inexact, f='atan', astype={'e':'f'}),
TD(P, f='arctan'),
),
'arctanh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arctanh'),
None,
TD(inexact, f='atanh', astype={'e':'f'}),
TD(P, f='arctanh'),
),
'cos':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cos'),
None,
TD(inexact, f='cos', astype={'e':'f'}),
TD(P, f='cos'),
),
'sin':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sin'),
None,
TD(inexact, f='sin', astype={'e':'f'}),
TD(P, f='sin'),
),
'tan':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.tan'),
None,
TD(inexact, f='tan', astype={'e':'f'}),
TD(P, f='tan'),
),
'cosh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cosh'),
None,
TD(inexact, f='cosh', astype={'e':'f'}),
TD(P, f='cosh'),
),
'sinh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sinh'),
None,
TD(inexact, f='sinh', astype={'e':'f'}),
TD(P, f='sinh'),
),
'tanh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.tanh'),
None,
TD(inexact, f='tanh', astype={'e':'f'}),
TD(P, f='tanh'),
),
'exp':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.exp'),
None,
TD(inexact, f='exp', astype={'e':'f'}),
TD(P, f='exp'),
),
'exp2':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.exp2'),
None,
TD(inexact, f='exp2', astype={'e':'f'}),
TD(P, f='exp2'),
),
'expm1':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.expm1'),
None,
TD(inexact, f='expm1', astype={'e':'f'}),
TD(P, f='expm1'),
),
'log':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log'),
None,
TD(inexact, f='log', astype={'e':'f'}),
TD(P, f='log'),
),
'log2':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log2'),
None,
TD(inexact, f='log2', astype={'e':'f'}),
TD(P, f='log2'),
),
'log10':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log10'),
None,
TD(inexact, f='log10', astype={'e':'f'}),
TD(P, f='log10'),
),
'log1p':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log1p'),
None,
TD(inexact, f='log1p', astype={'e':'f'}),
TD(P, f='log1p'),
),
'sqrt':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sqrt'),
None,
TD(inexactvec),
TD(inexact, f='sqrt', astype={'e':'f'}),
TD(P, f='sqrt'),
),
'cbrt':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cbrt'),
None,
TD(flts, f='cbrt', astype={'e':'f'}),
TD(P, f='cbrt'),
),
'ceil':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.ceil'),
None,
TD(flts, f='ceil', astype={'e':'f'}),
TD(P, f='ceil'),
),
'trunc':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.trunc'),
None,
TD(flts, f='trunc', astype={'e':'f'}),
TD(P, f='trunc'),
),
'fabs':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.fabs'),
None,
TD(flts, f='fabs', astype={'e':'f'}),
TD(P, f='fabs'),
),
'floor':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.floor'),
None,
TD(flts, f='floor', astype={'e':'f'}),
TD(P, f='floor'),
),
'rint':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.rint'),
None,
TD(inexact, f='rint', astype={'e':'f'}),
TD(P, f='rint'),
),
'arctan2':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.arctan2'),
None,
TD(flts, f='atan2', astype={'e':'f'}),
TD(P, f='arctan2'),
),
'remainder':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.remainder'),
None,
TD(intflt),
TD(O, f='PyNumber_Remainder'),
),
'hypot':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.hypot'),
None,
TD(flts, f='hypot', astype={'e':'f'}),
TD(P, f='hypot'),
),
'isnan':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isnan'),
None,
TD(inexact, out='?'),
),
'isinf':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isinf'),
None,
TD(inexact, out='?'),
),
'isfinite':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isfinite'),
None,
TD(inexact, out='?'),
),
'signbit':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.signbit'),
None,
TD(flts, out='?'),
),
'copysign':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.copysign'),
None,
TD(flts),
),
'nextafter':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.nextafter'),
None,
TD(flts),
),
'spacing':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.spacing'),
None,
TD(flts),
),
'modf':
Ufunc(1, 2, None,
docstrings.get('numpy.core.umath.modf'),
None,
TD(flts),
),
'ldexp' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.ldexp'),
None,
[TypeDescription('e', None, 'ei', 'e'),
TypeDescription('f', None, 'fi', 'f'),
TypeDescription('e', FuncNameSuffix('long'), 'el', 'e'),
TypeDescription('f', FuncNameSuffix('long'), 'fl', 'f'),
TypeDescription('d', None, 'di', 'd'),
TypeDescription('d', FuncNameSuffix('long'), 'dl', 'd'),
TypeDescription('g', None, 'gi', 'g'),
TypeDescription('g', FuncNameSuffix('long'), 'gl', 'g'),
],
),
'frexp' :
Ufunc(1, 2, None,
docstrings.get('numpy.core.umath.frexp'),
None,
[TypeDescription('e', None, 'e', 'ei'),
TypeDescription('f', None, 'f', 'fi'),
TypeDescription('d', None, 'd', 'di'),
TypeDescription('g', None, 'g', 'gi'),
],
)
}
if sys.version_info[0] >= 3:
# Will be aliased to true_divide in umathmodule.c.src:InitOtherOperators
del defdict['divide']
def indent(st, spaces):
indention = ' '*spaces
indented = indention + st.replace('\n', '\n'+indention)
# trim off any trailing spaces
indented = re.sub(r' +$', r'', indented)
return indented
chartotype1 = {'e': 'e_e',
'f': 'f_f',
'd': 'd_d',
'g': 'g_g',
'F': 'F_F',
'D': 'D_D',
'G': 'G_G',
'O': 'O_O',
'P': 'O_O_method'}
chartotype2 = {'e': 'ee_e',
'f': 'ff_f',
'd': 'dd_d',
'g': 'gg_g',
'F': 'FF_F',
'D': 'DD_D',
'G': 'GG_G',
'O': 'OO_O',
'P': 'OO_O_method'}
#for each name
# 1) create functions, data, and signature
# 2) fill in functions and data in InitOperators
# 3) add function.
def make_arrays(funcdict):
# functions array contains an entry for every type implemented NULL
# should be placed where PyUfunc_ style function will be filled in
# later
code1list = []
code2list = []
names = sorted(funcdict.keys())
for name in names:
uf = funcdict[name]
funclist = []
datalist = []
siglist = []
k = 0
sub = 0
if uf.nin > 1:
assert uf.nin == 2
thedict = chartotype2 # two inputs and one output
else:
thedict = chartotype1 # one input and one output
for t in uf.type_descriptions:
if (t.func_data not in (None, FullTypeDescr) and
not isinstance(t.func_data, FuncNameSuffix)):
funclist.append('NULL')
astype = ''
if not t.astype is None:
astype = '_As_%s' % thedict[t.astype]
astr = ('%s_functions[%d] = PyUFunc_%s%s;' %
(name, k, thedict[t.type], astype))
code2list.append(astr)
if t.type == 'O':
astr = ('%s_data[%d] = (void *) %s;' %
(name, k, t.func_data))
code2list.append(astr)
datalist.append('(void *)NULL')
elif t.type == 'P':
datalist.append('(void *)"%s"' % t.func_data)
else:
astr = ('%s_data[%d] = (void *) %s;' %
(name, k, t.func_data))
code2list.append(astr)
datalist.append('(void *)NULL')
#datalist.append('(void *)%s' % t.func_data)
sub += 1
elif t.func_data is FullTypeDescr:
tname = english_upper(chartoname[t.type])
datalist.append('(void *)NULL')
funclist.append(
'%s_%s_%s_%s' % (tname, t.in_, t.out, name))
elif isinstance(t.func_data, FuncNameSuffix):
datalist.append('(void *)NULL')
tname = english_upper(chartoname[t.type])
funclist.append(
'%s_%s_%s' % (tname, name, t.func_data.suffix))
else:
datalist.append('(void *)NULL')
tname = english_upper(chartoname[t.type])
funclist.append('%s_%s' % (tname, name))
for x in t.in_ + t.out:
siglist.append('NPY_%s' % (english_upper(chartoname[x]),))
k += 1
funcnames = ', '.join(funclist)
signames = ', '.join(siglist)
datanames = ', '.join(datalist)
code1list.append("static PyUFuncGenericFunction %s_functions[] = {%s};"
% (name, funcnames))
code1list.append("static void * %s_data[] = {%s};"
% (name, datanames))
code1list.append("static char %s_signatures[] = {%s};"
% (name, signames))
return "\n".join(code1list), "\n".join(code2list)
def make_ufuncs(funcdict):
code3list = []
names = sorted(funcdict.keys())
for name in names:
uf = funcdict[name]
mlist = []
docstring = textwrap.dedent(uf.docstring).strip()
if sys.version_info[0] < 3:
docstring = docstring.encode('string-escape')
docstring = docstring.replace(r'"', r'\"')
else:
docstring = docstring.encode('unicode-escape').decode('ascii')
docstring = docstring.replace(r'"', r'\"')
# XXX: I don't understand why the following replace is not
# necessary in the python 2 case.
docstring = docstring.replace(r"'", r"\'")
# Split the docstring because some compilers (like MS) do not like big
# string literal in C code. We split at endlines because textwrap.wrap
# do not play well with \n
docstring = '\\n\"\"'.join(docstring.split(r"\n"))
mlist.append(\
r"""f = PyUFunc_FromFuncAndData(%s_functions, %s_data, %s_signatures, %d,
%d, %d, %s, "%s",
"%s", 0);""" % (name, name, name,
len(uf.type_descriptions),
uf.nin, uf.nout,
uf.identity,
name, docstring))
if uf.typereso != None:
mlist.append(
r"((PyUFuncObject *)f)->type_resolver = &%s;" % uf.typereso)
mlist.append(r"""PyDict_SetItemString(dictionary, "%s", f);""" % name)
mlist.append(r"""Py_DECREF(f);""")
code3list.append('\n'.join(mlist))
return '\n'.join(code3list)
def make_code(funcdict, filename):
code1, code2 = make_arrays(funcdict)
code3 = make_ufuncs(funcdict)
code2 = indent(code2, 4)
code3 = indent(code3, 4)
code = r"""
/** Warning this file is autogenerated!!!
Please make changes to the code generator program (%s)
**/
%s
static void
InitOperators(PyObject *dictionary) {
PyObject *f;
%s
%s
}
""" % (filename, code1, code2, code3)
return code;
if __name__ == "__main__":
filename = __file__
fid = open('__umath_generated.c', 'w')
code = make_code(defdict, filename)
fid.write(code)
fid.close()
| bsd-3-clause |
danakj/chromium | third_party/cython/src/Cython/Compiler/Nodes.py | 86 | 331600 | #
# Parse tree nodes
#
import cython
cython.declare(sys=object, os=object, copy=object,
Builtin=object, error=object, warning=object, Naming=object, PyrexTypes=object,
py_object_type=object, ModuleScope=object, LocalScope=object, ClosureScope=object,
StructOrUnionScope=object, PyClassScope=object,
CppClassScope=object, UtilityCode=object, EncodedString=object,
absolute_path_length=cython.Py_ssize_t)
import sys, os, copy
from itertools import chain
import Builtin
from Errors import error, warning, InternalError, CompileError
import Naming
import PyrexTypes
import TypeSlots
from PyrexTypes import py_object_type, error_type
from Symtab import (ModuleScope, LocalScope, ClosureScope,
StructOrUnionScope, PyClassScope, CppClassScope, TemplateScope)
from Code import UtilityCode
from StringEncoding import EncodedString, escape_byte_string, split_string_literal
import Options
import DebugFlags
from Cython.Utils import cached_function
absolute_path_length = 0
def relative_position(pos):
"""
We embed the relative filename in the generated C file, since we
don't want to have to regenerate and compile all the source code
whenever the Python install directory moves (which could happen,
e.g,. when distributing binaries.)
INPUT:
a position tuple -- (absolute filename, line number column position)
OUTPUT:
relative filename
line number
AUTHOR: William Stein
"""
global absolute_path_length
if absolute_path_length==0:
absolute_path_length = len(os.path.abspath(os.getcwd()))
return (pos[0].get_filenametable_entry()[absolute_path_length+1:], pos[1])
def embed_position(pos, docstring):
if not Options.embed_pos_in_docstring:
return docstring
pos_line = u'File: %s (starting at line %s)' % relative_position(pos)
if docstring is None:
# unicode string
return EncodedString(pos_line)
# make sure we can encode the filename in the docstring encoding
# otherwise make the docstring a unicode string
encoding = docstring.encoding
if encoding is not None:
try:
pos_line.encode(encoding)
except UnicodeEncodeError:
encoding = None
if not docstring:
# reuse the string encoding of the original docstring
doc = EncodedString(pos_line)
else:
doc = EncodedString(pos_line + u'\n' + docstring)
doc.encoding = encoding
return doc
def write_func_call(func, codewriter_class):
def f(*args, **kwds):
if len(args) > 1 and isinstance(args[1], codewriter_class):
# here we annotate the code with this function call
# but only if new code is generated
node, code = args[:2]
marker = ' /* %s -> %s.%s %s */' % (
' ' * code.call_level,
node.__class__.__name__,
func.__name__,
node.pos[1:])
pristine = code.buffer.stream.tell()
code.putln(marker)
start = code.buffer.stream.tell()
code.call_level += 4
res = func(*args, **kwds)
code.call_level -= 4
if start == code.buffer.stream.tell():
code.buffer.stream.seek(pristine)
else:
marker = marker.replace('->', '<-')
code.putln(marker)
return res
else:
return func(*args, **kwds)
return f
class VerboseCodeWriter(type):
# Set this as a metaclass to trace function calls in code.
# This slows down code generation and makes much larger files.
def __new__(cls, name, bases, attrs):
from types import FunctionType
from Code import CCodeWriter
attrs = dict(attrs)
for mname, m in attrs.items():
if isinstance(m, FunctionType):
attrs[mname] = write_func_call(m, CCodeWriter)
return super(VerboseCodeWriter, cls).__new__(cls, name, bases, attrs)
class CheckAnalysers(type):
"""Metaclass to check that type analysis functions return a node.
"""
methods = set(['analyse_types',
'analyse_expressions',
'analyse_target_types'])
def __new__(cls, name, bases, attrs):
from types import FunctionType
def check(name, func):
def call(*args, **kwargs):
retval = func(*args, **kwargs)
if retval is None:
print name, args, kwargs
return retval
return call
attrs = dict(attrs)
for mname, m in attrs.items():
if isinstance(m, FunctionType) and mname in cls.methods:
attrs[mname] = check(mname, m)
return super(CheckAnalysers, cls).__new__(cls, name, bases, attrs)
class Node(object):
# pos (string, int, int) Source file position
# is_name boolean Is a NameNode
# is_literal boolean Is a ConstNode
#__metaclass__ = CheckAnalysers
if DebugFlags.debug_trace_code_generation:
__metaclass__ = VerboseCodeWriter
is_name = 0
is_none = 0
is_nonecheck = 0
is_literal = 0
is_terminator = 0
temps = None
# All descendants should set child_attrs to a list of the attributes
# containing nodes considered "children" in the tree. Each such attribute
# can either contain a single node or a list of nodes. See Visitor.py.
child_attrs = None
cf_state = None
# This may be an additional (or 'actual') type that will be checked when
# this node is coerced to another type. This could be useful to set when
# the actual type to which it can coerce is known, but you want to leave
# the type a py_object_type
coercion_type = None
def __init__(self, pos, **kw):
self.pos = pos
self.__dict__.update(kw)
gil_message = "Operation"
nogil_check = None
def gil_error(self, env=None):
error(self.pos, "%s not allowed without gil" % self.gil_message)
cpp_message = "Operation"
def cpp_check(self, env):
if not env.is_cpp():
self.cpp_error()
def cpp_error(self):
error(self.pos, "%s only allowed in c++" % self.cpp_message)
def clone_node(self):
"""Clone the node. This is defined as a shallow copy, except for member lists
amongst the child attributes (from get_child_accessors) which are also
copied. Lists containing child nodes are thus seen as a way for the node
to hold multiple children directly; the list is not treated as a separate
level in the tree."""
result = copy.copy(self)
for attrname in result.child_attrs:
value = getattr(result, attrname)
if isinstance(value, list):
setattr(result, attrname, [x for x in value])
return result
#
# There are 3 phases of parse tree processing, applied in order to
# all the statements in a given scope-block:
#
# (0) analyse_declarations
# Make symbol table entries for all declarations at the current
# level, both explicit (def, cdef, etc.) and implicit (assignment
# to an otherwise undeclared name).
#
# (1) analyse_expressions
# Determine the result types of expressions and fill in the
# 'type' attribute of each ExprNode. Insert coercion nodes into the
# tree where needed to convert to and from Python objects.
# Allocate temporary locals for intermediate results. Fill
# in the 'result_code' attribute of each ExprNode with a C code
# fragment.
#
# (2) generate_code
# Emit C code for all declarations, statements and expressions.
# Recursively applies the 3 processing phases to the bodies of
# functions.
#
def analyse_declarations(self, env):
pass
def analyse_expressions(self, env):
raise InternalError("analyse_expressions not implemented for %s" % \
self.__class__.__name__)
def generate_code(self, code):
raise InternalError("generate_code not implemented for %s" % \
self.__class__.__name__)
def annotate(self, code):
# mro does the wrong thing
if isinstance(self, BlockNode):
self.body.annotate(code)
def end_pos(self):
try:
return self._end_pos
except AttributeError:
pos = self.pos
if not self.child_attrs:
self._end_pos = pos
return pos
for attr in self.child_attrs:
child = getattr(self, attr)
# Sometimes lists, sometimes nodes
if child is None:
pass
elif isinstance(child, list):
for c in child:
pos = max(pos, c.end_pos())
else:
pos = max(pos, child.end_pos())
self._end_pos = pos
return pos
def dump(self, level=0, filter_out=("pos",), cutoff=100, encountered=None):
"""Debug helper method that returns a recursive string representation of this node.
"""
if cutoff == 0:
return "<...nesting level cutoff...>"
if encountered is None:
encountered = set()
if id(self) in encountered:
return "<%s (0x%x) -- already output>" % (self.__class__.__name__, id(self))
encountered.add(id(self))
def dump_child(x, level):
if isinstance(x, Node):
return x.dump(level, filter_out, cutoff-1, encountered)
elif isinstance(x, list):
return "[%s]" % ", ".join([dump_child(item, level) for item in x])
else:
return repr(x)
attrs = [(key, value) for key, value in self.__dict__.items() if key not in filter_out]
if len(attrs) == 0:
return "<%s (0x%x)>" % (self.__class__.__name__, id(self))
else:
indent = " " * level
res = "<%s (0x%x)\n" % (self.__class__.__name__, id(self))
for key, value in attrs:
res += "%s %s: %s\n" % (indent, key, dump_child(value, level + 1))
res += "%s>" % indent
return res
def dump_pos(self, mark_column=False, marker='(#)'):
"""Debug helper method that returns the source code context of this node as a string.
"""
if not self.pos:
return u''
source_desc, line, col = self.pos
contents = source_desc.get_lines(encoding='ASCII',
error_handling='ignore')
# line numbers start at 1
lines = contents[max(0,line-3):line]
current = lines[-1]
if mark_column:
current = current[:col] + marker + current[col:]
lines[-1] = current.rstrip() + u' # <<<<<<<<<<<<<<\n'
lines += contents[line:line+2]
return u'"%s":%d:%d\n%s\n' % (
source_desc.get_escaped_description(), line, col, u''.join(lines))
class CompilerDirectivesNode(Node):
"""
Sets compiler directives for the children nodes
"""
# directives {string:value} A dictionary holding the right value for
# *all* possible directives.
# body Node
child_attrs = ["body"]
def analyse_declarations(self, env):
old = env.directives
env.directives = self.directives
self.body.analyse_declarations(env)
env.directives = old
def analyse_expressions(self, env):
old = env.directives
env.directives = self.directives
self.body = self.body.analyse_expressions(env)
env.directives = old
return self
def generate_function_definitions(self, env, code):
env_old = env.directives
code_old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.generate_function_definitions(env, code)
env.directives = env_old
code.globalstate.directives = code_old
def generate_execution_code(self, code):
old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.generate_execution_code(code)
code.globalstate.directives = old
def annotate(self, code):
old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.annotate(code)
code.globalstate.directives = old
class BlockNode(object):
# Mixin class for nodes representing a declaration block.
def generate_cached_builtins_decls(self, env, code):
entries = env.global_scope().undeclared_cached_builtins
for entry in entries:
code.globalstate.add_cached_builtin_decl(entry)
del entries[:]
def generate_lambda_definitions(self, env, code):
for node in env.lambda_defs:
node.generate_function_definitions(env, code)
class StatListNode(Node):
# stats a list of StatNode
child_attrs = ["stats"]
def create_analysed(pos, env, *args, **kw):
node = StatListNode(pos, *args, **kw)
return node # No node-specific analysis necesarry
create_analysed = staticmethod(create_analysed)
def analyse_declarations(self, env):
#print "StatListNode.analyse_declarations" ###
for stat in self.stats:
stat.analyse_declarations(env)
def analyse_expressions(self, env):
#print "StatListNode.analyse_expressions" ###
self.stats = [ stat.analyse_expressions(env)
for stat in self.stats ]
return self
def generate_function_definitions(self, env, code):
#print "StatListNode.generate_function_definitions" ###
for stat in self.stats:
stat.generate_function_definitions(env, code)
def generate_execution_code(self, code):
#print "StatListNode.generate_execution_code" ###
for stat in self.stats:
code.mark_pos(stat.pos)
stat.generate_execution_code(code)
def annotate(self, code):
for stat in self.stats:
stat.annotate(code)
class StatNode(Node):
#
# Code generation for statements is split into the following subphases:
#
# (1) generate_function_definitions
# Emit C code for the definitions of any structs,
# unions, enums and functions defined in the current
# scope-block.
#
# (2) generate_execution_code
# Emit C code for executable statements.
#
def generate_function_definitions(self, env, code):
pass
def generate_execution_code(self, code):
raise InternalError("generate_execution_code not implemented for %s" % \
self.__class__.__name__)
class CDefExternNode(StatNode):
# include_file string or None
# body StatNode
child_attrs = ["body"]
def analyse_declarations(self, env):
if self.include_file:
env.add_include_file(self.include_file)
old_cinclude_flag = env.in_cinclude
env.in_cinclude = 1
self.body.analyse_declarations(env)
env.in_cinclude = old_cinclude_flag
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
def annotate(self, code):
self.body.annotate(code)
class CDeclaratorNode(Node):
# Part of a C declaration.
#
# Processing during analyse_declarations phase:
#
# analyse
# Returns (name, type) pair where name is the
# CNameDeclaratorNode of the name being declared
# and type is the type it is being declared as.
#
# calling_convention string Calling convention of CFuncDeclaratorNode
# for which this is a base
child_attrs = []
calling_convention = ""
def analyse_templates(self):
# Only C++ functions have templates.
return None
class CNameDeclaratorNode(CDeclaratorNode):
# name string The Cython name being declared
# cname string or None C name, if specified
# default ExprNode or None the value assigned on declaration
child_attrs = ['default']
default = None
def analyse(self, base_type, env, nonempty = 0):
if nonempty and self.name == '':
# May have mistaken the name for the type.
if base_type.is_ptr or base_type.is_array or base_type.is_buffer:
error(self.pos, "Missing argument name")
elif base_type.is_void:
error(self.pos, "Use spam() rather than spam(void) to declare a function with no arguments.")
else:
self.name = base_type.declaration_code("", for_display=1, pyrex=1)
base_type = py_object_type
if base_type.is_fused and env.fused_to_specific:
base_type = base_type.specialize(env.fused_to_specific)
self.type = base_type
return self, base_type
class CPtrDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
child_attrs = ["base"]
def analyse(self, base_type, env, nonempty = 0):
if base_type.is_pyobject:
error(self.pos,
"Pointer base type cannot be a Python object")
ptr_type = PyrexTypes.c_ptr_type(base_type)
return self.base.analyse(ptr_type, env, nonempty = nonempty)
class CReferenceDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
child_attrs = ["base"]
def analyse(self, base_type, env, nonempty = 0):
if base_type.is_pyobject:
error(self.pos,
"Reference base type cannot be a Python object")
ref_type = PyrexTypes.c_ref_type(base_type)
return self.base.analyse(ref_type, env, nonempty = nonempty)
class CArrayDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
# dimension ExprNode
child_attrs = ["base", "dimension"]
def analyse(self, base_type, env, nonempty = 0):
if base_type.is_cpp_class or base_type.is_cfunction:
from ExprNodes import TupleNode
if isinstance(self.dimension, TupleNode):
args = self.dimension.args
else:
args = self.dimension,
values = [v.analyse_as_type(env) for v in args]
if None in values:
ix = values.index(None)
error(args[ix].pos, "Template parameter not a type")
base_type = error_type
else:
base_type = base_type.specialize_here(self.pos, values)
return self.base.analyse(base_type, env, nonempty = nonempty)
if self.dimension:
self.dimension = self.dimension.analyse_const_expression(env)
if not self.dimension.type.is_int:
error(self.dimension.pos, "Array dimension not integer")
size = self.dimension.get_constant_c_result_code()
if size is not None:
try:
size = int(size)
except ValueError:
# runtime constant?
pass
else:
size = None
if not base_type.is_complete():
error(self.pos,
"Array element type '%s' is incomplete" % base_type)
if base_type.is_pyobject:
error(self.pos,
"Array element cannot be a Python object")
if base_type.is_cfunction:
error(self.pos,
"Array element cannot be a function")
array_type = PyrexTypes.c_array_type(base_type, size)
return self.base.analyse(array_type, env, nonempty = nonempty)
class CFuncDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
# args [CArgDeclNode]
# templates [TemplatePlaceholderType]
# has_varargs boolean
# exception_value ConstNode
# exception_check boolean True if PyErr_Occurred check needed
# nogil boolean Can be called without gil
# with_gil boolean Acquire gil around function body
# is_const_method boolean Whether this is a const method
child_attrs = ["base", "args", "exception_value"]
overridable = 0
optional_arg_count = 0
is_const_method = 0
templates = None
def analyse_templates(self):
if isinstance(self.base, CArrayDeclaratorNode):
from ExprNodes import TupleNode, NameNode
template_node = self.base.dimension
if isinstance(template_node, TupleNode):
template_nodes = template_node.args
elif isinstance(template_node, NameNode):
template_nodes = [template_node]
else:
error(template_node.pos, "Template arguments must be a list of names")
return None
self.templates = []
for template in template_nodes:
if isinstance(template, NameNode):
self.templates.append(PyrexTypes.TemplatePlaceholderType(template.name))
else:
error(template.pos, "Template arguments must be a list of names")
self.base = self.base.base
return self.templates
else:
return None
def analyse(self, return_type, env, nonempty = 0, directive_locals = {}):
if nonempty:
nonempty -= 1
func_type_args = []
for i, arg_node in enumerate(self.args):
name_declarator, type = arg_node.analyse(env, nonempty = nonempty,
is_self_arg = (i == 0 and env.is_c_class_scope))
name = name_declarator.name
if name in directive_locals:
type_node = directive_locals[name]
other_type = type_node.analyse_as_type(env)
if other_type is None:
error(type_node.pos, "Not a type")
elif (type is not PyrexTypes.py_object_type
and not type.same_as(other_type)):
error(self.base.pos, "Signature does not agree with previous declaration")
error(type_node.pos, "Previous declaration here")
else:
type = other_type
if name_declarator.cname:
error(self.pos,
"Function argument cannot have C name specification")
if i==0 and env.is_c_class_scope and type.is_unspecified:
# fix the type of self
type = env.parent_type
# Turn *[] argument into **
if type.is_array:
type = PyrexTypes.c_ptr_type(type.base_type)
# Catch attempted C-style func(void) decl
if type.is_void:
error(arg_node.pos, "Use spam() rather than spam(void) to declare a function with no arguments.")
func_type_args.append(
PyrexTypes.CFuncTypeArg(name, type, arg_node.pos))
if arg_node.default:
self.optional_arg_count += 1
elif self.optional_arg_count:
error(self.pos, "Non-default argument follows default argument")
exc_val = None
exc_check = 0
if self.exception_check == '+':
env.add_include_file('ios') # for std::ios_base::failure
env.add_include_file('new') # for std::bad_alloc
env.add_include_file('stdexcept')
env.add_include_file('typeinfo') # for std::bad_cast
if (return_type.is_pyobject
and (self.exception_value or self.exception_check)
and self.exception_check != '+'):
error(self.pos,
"Exception clause not allowed for function returning Python object")
else:
if self.exception_value:
self.exception_value = self.exception_value.analyse_const_expression(env)
if self.exception_check == '+':
exc_val_type = self.exception_value.type
if (not exc_val_type.is_error
and not exc_val_type.is_pyobject
and not (exc_val_type.is_cfunction
and not exc_val_type.return_type.is_pyobject
and not exc_val_type.args)):
error(self.exception_value.pos,
"Exception value must be a Python exception or cdef function with no arguments.")
exc_val = self.exception_value
else:
self.exception_value = self.exception_value.coerce_to(
return_type, env).analyse_const_expression(env)
exc_val = self.exception_value.get_constant_c_result_code()
if exc_val is None:
raise InternalError(
"get_constant_c_result_code not implemented for %s" %
self.exception_value.__class__.__name__)
if not return_type.assignable_from(self.exception_value.type):
error(self.exception_value.pos,
"Exception value incompatible with function return type")
exc_check = self.exception_check
if return_type.is_cfunction:
error(self.pos,
"Function cannot return a function")
func_type = PyrexTypes.CFuncType(
return_type, func_type_args, self.has_varargs,
optional_arg_count = self.optional_arg_count,
exception_value = exc_val, exception_check = exc_check,
calling_convention = self.base.calling_convention,
nogil = self.nogil, with_gil = self.with_gil, is_overridable = self.overridable,
is_const_method = self.is_const_method,
templates = self.templates)
if self.optional_arg_count:
if func_type.is_fused:
# This is a bit of a hack... When we need to create specialized CFuncTypes
# on the fly because the cdef is defined in a pxd, we need to declare the specialized optional arg
# struct
def declare_opt_arg_struct(func_type, fused_cname):
self.declare_optional_arg_struct(func_type, env, fused_cname)
func_type.declare_opt_arg_struct = declare_opt_arg_struct
else:
self.declare_optional_arg_struct(func_type, env)
callspec = env.directives['callspec']
if callspec:
current = func_type.calling_convention
if current and current != callspec:
error(self.pos, "cannot have both '%s' and '%s' "
"calling conventions" % (current, callspec))
func_type.calling_convention = callspec
return self.base.analyse(func_type, env)
def declare_optional_arg_struct(self, func_type, env, fused_cname=None):
"""
Declares the optional argument struct (the struct used to hold the
values for optional arguments). For fused cdef functions, this is
deferred as analyse_declarations is called only once (on the fused
cdef function).
"""
scope = StructOrUnionScope()
arg_count_member = '%sn' % Naming.pyrex_prefix
scope.declare_var(arg_count_member, PyrexTypes.c_int_type, self.pos)
for arg in func_type.args[len(func_type.args)-self.optional_arg_count:]:
scope.declare_var(arg.name, arg.type, arg.pos, allow_pyobject = 1)
struct_cname = env.mangle(Naming.opt_arg_prefix, self.base.name)
if fused_cname is not None:
struct_cname = PyrexTypes.get_fused_cname(fused_cname, struct_cname)
op_args_struct = env.global_scope().declare_struct_or_union(
name = struct_cname,
kind = 'struct',
scope = scope,
typedef_flag = 0,
pos = self.pos,
cname = struct_cname)
op_args_struct.defined_in_pxd = 1
op_args_struct.used = 1
func_type.op_arg_struct = PyrexTypes.c_ptr_type(op_args_struct.type)
class CConstDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
child_attrs = ["base"]
def analyse(self, base_type, env, nonempty = 0):
if base_type.is_pyobject:
error(self.pos,
"Const base type cannot be a Python object")
const = PyrexTypes.c_const_type(base_type)
return self.base.analyse(const, env, nonempty = nonempty)
class CArgDeclNode(Node):
# Item in a function declaration argument list.
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# not_none boolean Tagged with 'not None'
# or_none boolean Tagged with 'or None'
# accept_none boolean Resolved boolean for not_none/or_none
# default ExprNode or None
# default_value PyObjectConst constant for default value
# annotation ExprNode or None Py3 function arg annotation
# is_self_arg boolean Is the "self" arg of an extension type method
# is_type_arg boolean Is the "class" arg of an extension type classmethod
# is_kw_only boolean Is a keyword-only argument
# is_dynamic boolean Non-literal arg stored inside CyFunction
child_attrs = ["base_type", "declarator", "default", "annotation"]
is_self_arg = 0
is_type_arg = 0
is_generic = 1
kw_only = 0
not_none = 0
or_none = 0
type = None
name_declarator = None
default_value = None
annotation = None
is_dynamic = 0
def analyse(self, env, nonempty = 0, is_self_arg = False):
if is_self_arg:
self.base_type.is_self_arg = self.is_self_arg = True
if self.type is None:
# The parser may misinterpret names as types. We fix that here.
if isinstance(self.declarator, CNameDeclaratorNode) and self.declarator.name == '':
if nonempty:
if self.base_type.is_basic_c_type:
# char, short, long called "int"
type = self.base_type.analyse(env, could_be_name = True)
arg_name = type.declaration_code("")
else:
arg_name = self.base_type.name
self.declarator.name = EncodedString(arg_name)
self.base_type.name = None
self.base_type.is_basic_c_type = False
could_be_name = True
else:
could_be_name = False
self.base_type.is_arg = True
base_type = self.base_type.analyse(env, could_be_name = could_be_name)
if hasattr(self.base_type, 'arg_name') and self.base_type.arg_name:
self.declarator.name = self.base_type.arg_name
# The parser is unable to resolve the ambiguity of [] as part of the
# type (e.g. in buffers) or empty declarator (as with arrays).
# This is only arises for empty multi-dimensional arrays.
if (base_type.is_array
and isinstance(self.base_type, TemplatedTypeNode)
and isinstance(self.declarator, CArrayDeclaratorNode)):
declarator = self.declarator
while isinstance(declarator.base, CArrayDeclaratorNode):
declarator = declarator.base
declarator.base = self.base_type.array_declarator
base_type = base_type.base_type
return self.declarator.analyse(base_type, env, nonempty = nonempty)
else:
return self.name_declarator, self.type
def calculate_default_value_code(self, code):
if self.default_value is None:
if self.default:
if self.default.is_literal:
# will not output any code, just assign the result_code
self.default.generate_evaluation_code(code)
return self.type.cast_code(self.default.result())
self.default_value = code.get_argument_default_const(self.type)
return self.default_value
def annotate(self, code):
if self.default:
self.default.annotate(code)
def generate_assignment_code(self, code, target=None):
default = self.default
if default is None or default.is_literal:
return
if target is None:
target = self.calculate_default_value_code(code)
default.generate_evaluation_code(code)
default.make_owned_reference(code)
result = default.result_as(self.type)
code.putln("%s = %s;" % (target, result))
if self.type.is_pyobject:
code.put_giveref(default.result())
default.generate_post_assignment_code(code)
default.free_temps(code)
class CBaseTypeNode(Node):
# Abstract base class for C base type nodes.
#
# Processing during analyse_declarations phase:
#
# analyse
# Returns the type.
pass
def analyse_as_type(self, env):
return self.analyse(env)
class CAnalysedBaseTypeNode(Node):
# type type
child_attrs = []
def analyse(self, env, could_be_name = False):
return self.type
class CSimpleBaseTypeNode(CBaseTypeNode):
# name string
# module_path [string] Qualifying name components
# is_basic_c_type boolean
# signed boolean
# longness integer
# complex boolean
# is_self_arg boolean Is self argument of C method
# ##is_type_arg boolean Is type argument of class method
child_attrs = []
arg_name = None # in case the argument name was interpreted as a type
module_path = []
is_basic_c_type = False
complex = False
def analyse(self, env, could_be_name = False):
# Return type descriptor.
#print "CSimpleBaseTypeNode.analyse: is_self_arg =", self.is_self_arg ###
type = None
if self.is_basic_c_type:
type = PyrexTypes.simple_c_type(self.signed, self.longness, self.name)
if not type:
error(self.pos, "Unrecognised type modifier combination")
elif self.name == "object" and not self.module_path:
type = py_object_type
elif self.name is None:
if self.is_self_arg and env.is_c_class_scope:
#print "CSimpleBaseTypeNode.analyse: defaulting to parent type" ###
type = env.parent_type
## elif self.is_type_arg and env.is_c_class_scope:
## type = Builtin.type_type
else:
type = py_object_type
else:
if self.module_path:
# Maybe it's a nested C++ class.
scope = env
for item in self.module_path:
entry = scope.lookup(item)
if entry is not None and entry.is_cpp_class:
scope = entry.type.scope
else:
scope = None
break
if scope is None:
# Maybe it's a cimport.
scope = env.find_imported_module(self.module_path, self.pos)
if scope:
scope.fused_to_specific = env.fused_to_specific
else:
scope = env
if scope:
if scope.is_c_class_scope:
scope = scope.global_scope()
type = scope.lookup_type(self.name)
if type is not None:
pass
elif could_be_name:
if self.is_self_arg and env.is_c_class_scope:
type = env.parent_type
## elif self.is_type_arg and env.is_c_class_scope:
## type = Builtin.type_type
else:
type = py_object_type
self.arg_name = EncodedString(self.name)
else:
if self.templates:
if not self.name in self.templates:
error(self.pos, "'%s' is not a type identifier" % self.name)
type = PyrexTypes.TemplatePlaceholderType(self.name)
else:
error(self.pos, "'%s' is not a type identifier" % self.name)
if self.complex:
if not type.is_numeric or type.is_complex:
error(self.pos, "can only complexify c numeric types")
type = PyrexTypes.CComplexType(type)
type.create_declaration_utility_code(env)
elif type is Builtin.complex_type:
# Special case: optimise builtin complex type into C's
# double complex. The parser cannot do this (as for the
# normal scalar types) as the user may have redeclared the
# 'complex' type. Testing for the exact type here works.
type = PyrexTypes.c_double_complex_type
type.create_declaration_utility_code(env)
self.complex = True
if type:
return type
else:
return PyrexTypes.error_type
class MemoryViewSliceTypeNode(CBaseTypeNode):
name = 'memoryview'
child_attrs = ['base_type_node', 'axes']
def analyse(self, env, could_be_name = False):
base_type = self.base_type_node.analyse(env)
if base_type.is_error: return base_type
import MemoryView
try:
axes_specs = MemoryView.get_axes_specs(env, self.axes)
except CompileError, e:
error(e.position, e.message_only)
self.type = PyrexTypes.ErrorType()
return self.type
if not MemoryView.validate_axes(self.pos, axes_specs):
self.type = error_type
else:
MemoryView.validate_memslice_dtype(self.pos, base_type)
self.type = PyrexTypes.MemoryViewSliceType(base_type, axes_specs)
self.use_memview_utilities(env)
return self.type
def use_memview_utilities(self, env):
import MemoryView
env.use_utility_code(MemoryView.view_utility_code)
class CNestedBaseTypeNode(CBaseTypeNode):
# For C++ classes that live inside other C++ classes.
# name string
# base_type CBaseTypeNode
child_attrs = ['base_type']
def analyse(self, env, could_be_name = None):
base_type = self.base_type.analyse(env)
if base_type is PyrexTypes.error_type:
return PyrexTypes.error_type
if not base_type.is_cpp_class:
error(self.pos, "'%s' is not a valid type scope" % base_type)
return PyrexTypes.error_type
type_entry = base_type.scope.lookup_here(self.name)
if not type_entry or not type_entry.is_type:
error(self.pos, "'%s.%s' is not a type identifier" % (base_type, self.name))
return PyrexTypes.error_type
return type_entry.type
class TemplatedTypeNode(CBaseTypeNode):
# After parsing:
# positional_args [ExprNode] List of positional arguments
# keyword_args DictNode Keyword arguments
# base_type_node CBaseTypeNode
# After analysis:
# type PyrexTypes.BufferType or PyrexTypes.CppClassType ...containing the right options
child_attrs = ["base_type_node", "positional_args",
"keyword_args", "dtype_node"]
dtype_node = None
name = None
def analyse(self, env, could_be_name = False, base_type = None):
if base_type is None:
base_type = self.base_type_node.analyse(env)
if base_type.is_error: return base_type
if base_type.is_cpp_class:
# Templated class
if self.keyword_args and self.keyword_args.key_value_pairs:
error(self.pos, "c++ templates cannot take keyword arguments")
self.type = PyrexTypes.error_type
else:
template_types = []
for template_node in self.positional_args:
type = template_node.analyse_as_type(env)
if type is None:
error(template_node.pos, "unknown type in template argument")
return error_type
template_types.append(type)
self.type = base_type.specialize_here(self.pos, template_types)
elif base_type.is_pyobject:
# Buffer
import Buffer
options = Buffer.analyse_buffer_options(
self.pos,
env,
self.positional_args,
self.keyword_args,
base_type.buffer_defaults)
if sys.version_info[0] < 3:
# Py 2.x enforces byte strings as keyword arguments ...
options = dict([ (name.encode('ASCII'), value)
for name, value in options.items() ])
self.type = PyrexTypes.BufferType(base_type, **options)
else:
# Array
empty_declarator = CNameDeclaratorNode(self.pos, name="", cname=None)
if len(self.positional_args) > 1 or self.keyword_args.key_value_pairs:
error(self.pos, "invalid array declaration")
self.type = PyrexTypes.error_type
else:
# It would be nice to merge this class with CArrayDeclaratorNode,
# but arrays are part of the declaration, not the type...
if not self.positional_args:
dimension = None
else:
dimension = self.positional_args[0]
self.array_declarator = CArrayDeclaratorNode(self.pos,
base = empty_declarator,
dimension = dimension)
self.type = self.array_declarator.analyse(base_type, env)[1]
if self.type.is_fused and env.fused_to_specific:
self.type = self.type.specialize(env.fused_to_specific)
return self.type
class CComplexBaseTypeNode(CBaseTypeNode):
# base_type CBaseTypeNode
# declarator CDeclaratorNode
child_attrs = ["base_type", "declarator"]
def analyse(self, env, could_be_name = False):
base = self.base_type.analyse(env, could_be_name)
_, type = self.declarator.analyse(base, env)
return type
class FusedTypeNode(CBaseTypeNode):
"""
Represents a fused type in a ctypedef statement:
ctypedef cython.fused_type(int, long, long long) integral
name str name of this fused type
types [CSimpleBaseTypeNode] is the list of types to be fused
"""
child_attrs = []
def analyse_declarations(self, env):
type = self.analyse(env)
entry = env.declare_typedef(self.name, type, self.pos)
# Omit the typedef declaration that self.declarator would produce
entry.in_cinclude = True
def analyse(self, env):
types = []
for type_node in self.types:
type = type_node.analyse_as_type(env)
if not type:
error(type_node.pos, "Not a type")
continue
if type in types:
error(type_node.pos, "Type specified multiple times")
elif type.is_fused:
error(type_node.pos, "Cannot fuse a fused type")
else:
types.append(type)
# if len(self.types) == 1:
# return types[0]
return PyrexTypes.FusedType(types, name=self.name)
class CConstTypeNode(CBaseTypeNode):
# base_type CBaseTypeNode
child_attrs = ["base_type"]
def analyse(self, env, could_be_name = False):
base = self.base_type.analyse(env, could_be_name)
if base.is_pyobject:
error(self.pos,
"Const base type cannot be a Python object")
return PyrexTypes.c_const_type(base)
class CVarDefNode(StatNode):
# C variable definition or forward/extern function declaration.
#
# visibility 'private' or 'public' or 'extern'
# base_type CBaseTypeNode
# declarators [CDeclaratorNode]
# in_pxd boolean
# api boolean
# overridable boolean whether it is a cpdef
# modifiers ['inline']
# decorators [cython.locals(...)] or None
# directive_locals { string : NameNode } locals defined by cython.locals(...)
child_attrs = ["base_type", "declarators"]
decorators = None
directive_locals = None
def analyse_declarations(self, env, dest_scope = None):
if self.directive_locals is None:
self.directive_locals = {}
if not dest_scope:
dest_scope = env
self.dest_scope = dest_scope
if self.declarators:
templates = self.declarators[0].analyse_templates()
else:
templates = None
if templates is not None:
if self.visibility != 'extern':
error(self.pos, "Only extern functions allowed")
if len(self.declarators) > 1:
error(self.declarators[1].pos, "Can't multiply declare template types")
env = TemplateScope('func_template', env)
env.directives = env.outer_scope.directives
for template_param in templates:
env.declare_type(template_param.name, template_param, self.pos)
base_type = self.base_type.analyse(env)
if base_type.is_fused and not self.in_pxd and (env.is_c_class_scope or
env.is_module_scope):
error(self.pos, "Fused types not allowed here")
return error_type
self.entry = None
visibility = self.visibility
for declarator in self.declarators:
if (len(self.declarators) > 1
and not isinstance(declarator, CNameDeclaratorNode)
and env.directives['warn.multiple_declarators']):
warning(declarator.pos,
"Non-trivial type declarators in shared declaration (e.g. mix of pointers and values). " +
"Each pointer declaration should be on its own line.", 1)
if isinstance(declarator, CFuncDeclaratorNode):
name_declarator, type = declarator.analyse(base_type, env, directive_locals=self.directive_locals)
else:
name_declarator, type = declarator.analyse(base_type, env)
if not type.is_complete():
if not (self.visibility == 'extern' and type.is_array or type.is_memoryviewslice):
error(declarator.pos,
"Variable type '%s' is incomplete" % type)
if self.visibility == 'extern' and type.is_pyobject:
error(declarator.pos,
"Python object cannot be declared extern")
name = name_declarator.name
cname = name_declarator.cname
if name == '':
error(declarator.pos, "Missing name in declaration.")
return
if type.is_cfunction:
self.entry = dest_scope.declare_cfunction(name, type, declarator.pos,
cname = cname, visibility = self.visibility, in_pxd = self.in_pxd,
api = self.api, modifiers = self.modifiers)
if self.entry is not None:
self.entry.is_overridable = self.overridable
self.entry.directive_locals = copy.copy(self.directive_locals)
else:
if self.directive_locals:
error(self.pos, "Decorators can only be followed by functions")
self.entry = dest_scope.declare_var(name, type, declarator.pos,
cname=cname, visibility=visibility, in_pxd=self.in_pxd,
api=self.api, is_cdef=1)
if Options.docstrings:
self.entry.doc = embed_position(self.pos, self.doc)
class CStructOrUnionDefNode(StatNode):
# name string
# cname string or None
# kind "struct" or "union"
# typedef_flag boolean
# visibility "public" or "private"
# api boolean
# in_pxd boolean
# attributes [CVarDefNode] or None
# entry Entry
# packed boolean
child_attrs = ["attributes"]
def declare(self, env, scope=None):
if self.visibility == 'extern' and self.packed and not scope:
error(self.pos, "Cannot declare extern struct as 'packed'")
self.entry = env.declare_struct_or_union(
self.name, self.kind, scope, self.typedef_flag, self.pos,
self.cname, visibility = self.visibility, api = self.api,
packed = self.packed)
def analyse_declarations(self, env):
scope = None
if self.attributes is not None:
scope = StructOrUnionScope(self.name)
self.declare(env, scope)
if self.attributes is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
for attr in self.attributes:
attr.analyse_declarations(env, scope)
if self.visibility != 'extern':
for attr in scope.var_entries:
type = attr.type
while type.is_array:
type = type.base_type
if type == self.entry.type:
error(attr.pos, "Struct cannot contain itself as a member.")
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class CppClassNode(CStructOrUnionDefNode, BlockNode):
# name string
# cname string or None
# visibility "extern"
# in_pxd boolean
# attributes [CVarDefNode] or None
# entry Entry
# base_classes [CBaseTypeNode]
# templates [string] or None
def declare(self, env):
if self.templates is None:
template_types = None
else:
template_types = [PyrexTypes.TemplatePlaceholderType(template_name) for template_name in self.templates]
self.entry = env.declare_cpp_class(
self.name, None, self.pos,
self.cname, base_classes = [], visibility = self.visibility, templates = template_types)
def analyse_declarations(self, env):
scope = None
if self.attributes is not None:
scope = CppClassScope(self.name, env, templates = self.templates)
def base_ok(base_class):
if base_class.is_cpp_class or base_class.is_struct:
return True
else:
error(self.pos, "Base class '%s' not a struct or class." % base_class)
base_class_types = filter(base_ok, [b.analyse(scope or env) for b in self.base_classes])
if self.templates is None:
template_types = None
else:
template_types = [PyrexTypes.TemplatePlaceholderType(template_name) for template_name in self.templates]
self.entry = env.declare_cpp_class(
self.name, scope, self.pos,
self.cname, base_class_types, visibility = self.visibility, templates = template_types)
if self.entry is None:
return
self.entry.is_cpp_class = 1
if scope is not None:
scope.type = self.entry.type
defined_funcs = []
if self.attributes is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
for attr in self.attributes:
attr.analyse_declarations(scope)
if isinstance(attr, CFuncDefNode):
defined_funcs.append(attr)
if self.templates is not None:
attr.template_declaration = "template <typename %s>" % ", typename ".join(self.templates)
self.body = StatListNode(self.pos, stats=defined_funcs)
self.scope = scope
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(self.entry.type.scope)
return self
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(self.entry.type.scope, code)
def generate_execution_code(self, code):
self.body.generate_execution_code(code)
def annotate(self, code):
self.body.annotate(code)
class CEnumDefNode(StatNode):
# name string or None
# cname string or None
# items [CEnumDefItemNode]
# typedef_flag boolean
# visibility "public" or "private"
# api boolean
# in_pxd boolean
# entry Entry
child_attrs = ["items"]
def declare(self, env):
self.entry = env.declare_enum(self.name, self.pos,
cname = self.cname, typedef_flag = self.typedef_flag,
visibility = self.visibility, api = self.api)
def analyse_declarations(self, env):
if self.items is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
for item in self.items:
item.analyse_declarations(env, self.entry)
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
if self.visibility == 'public' or self.api:
temp = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
for item in self.entry.enum_values:
code.putln("%s = PyInt_FromLong(%s); %s" % (
temp,
item.cname,
code.error_goto_if_null(temp, item.pos)))
code.put_gotref(temp)
code.putln('if (PyDict_SetItemString(%s, "%s", %s) < 0) %s' % (
Naming.moddict_cname,
item.name,
temp,
code.error_goto(item.pos)))
code.put_decref_clear(temp, PyrexTypes.py_object_type)
code.funcstate.release_temp(temp)
class CEnumDefItemNode(StatNode):
# name string
# cname string or None
# value ExprNode or None
child_attrs = ["value"]
def analyse_declarations(self, env, enum_entry):
if self.value:
self.value = self.value.analyse_const_expression(env)
if not self.value.type.is_int:
self.value = self.value.coerce_to(PyrexTypes.c_int_type, env)
self.value = self.value.analyse_const_expression(env)
entry = env.declare_const(self.name, enum_entry.type,
self.value, self.pos, cname = self.cname,
visibility = enum_entry.visibility, api = enum_entry.api)
enum_entry.enum_values.append(entry)
class CTypeDefNode(StatNode):
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# visibility "public" or "private"
# api boolean
# in_pxd boolean
child_attrs = ["base_type", "declarator"]
def analyse_declarations(self, env):
base = self.base_type.analyse(env)
name_declarator, type = self.declarator.analyse(base, env)
name = name_declarator.name
cname = name_declarator.cname
entry = env.declare_typedef(name, type, self.pos,
cname = cname, visibility = self.visibility, api = self.api)
if type.is_fused:
entry.in_cinclude = True
if self.in_pxd and not env.in_cinclude:
entry.defined_in_pxd = 1
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class FuncDefNode(StatNode, BlockNode):
# Base class for function definition nodes.
#
# return_type PyrexType
# #filename string C name of filename string const
# entry Symtab.Entry
# needs_closure boolean Whether or not this function has inner functions/classes/yield
# needs_outer_scope boolean Whether or not this function requires outer scope
# pymethdef_required boolean Force Python method struct generation
# directive_locals { string : ExprNode } locals defined by cython.locals(...)
# directive_returns [ExprNode] type defined by cython.returns(...)
# star_arg PyArgDeclNode or None * argument
# starstar_arg PyArgDeclNode or None ** argument
# has_fused_arguments boolean
# Whether this cdef function has fused parameters. This is needed
# by AnalyseDeclarationsTransform, so it can replace CFuncDefNodes
# with fused argument types with a FusedCFuncDefNode
py_func = None
needs_closure = False
needs_outer_scope = False
pymethdef_required = False
is_generator = False
is_generator_body = False
modifiers = []
has_fused_arguments = False
star_arg = None
starstar_arg = None
is_cyfunction = False
def analyse_default_values(self, env):
default_seen = 0
for arg in self.args:
if arg.default:
default_seen = 1
if arg.is_generic:
arg.default = arg.default.analyse_types(env)
arg.default = arg.default.coerce_to(arg.type, env)
else:
error(arg.pos,
"This argument cannot have a default value")
arg.default = None
elif arg.kw_only:
default_seen = 1
elif default_seen:
error(arg.pos, "Non-default argument following default argument")
def align_argument_type(self, env, arg):
directive_locals = self.directive_locals
type = arg.type
if arg.name in directive_locals:
type_node = directive_locals[arg.name]
other_type = type_node.analyse_as_type(env)
if other_type is None:
error(type_node.pos, "Not a type")
elif (type is not PyrexTypes.py_object_type
and not type.same_as(other_type)):
error(arg.base_type.pos, "Signature does not agree with previous declaration")
error(type_node.pos, "Previous declaration here")
else:
arg.type = other_type
return arg
def need_gil_acquisition(self, lenv):
return 0
def create_local_scope(self, env):
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
if self.needs_closure:
lenv = ClosureScope(name=self.entry.name,
outer_scope = genv,
parent_scope = env,
scope_name=self.entry.cname)
else:
lenv = LocalScope(name=self.entry.name,
outer_scope=genv,
parent_scope=env)
lenv.return_type = self.return_type
type = self.entry.type
if type.is_cfunction:
lenv.nogil = type.nogil and not type.with_gil
self.local_scope = lenv
lenv.directives = env.directives
return lenv
def generate_function_body(self, env, code):
self.body.generate_execution_code(code)
def generate_function_definitions(self, env, code):
import Buffer
if self.return_type.is_memoryviewslice:
import MemoryView
lenv = self.local_scope
if lenv.is_closure_scope and not lenv.is_passthrough:
outer_scope_cname = "%s->%s" % (Naming.cur_scope_cname,
Naming.outer_scope_cname)
else:
outer_scope_cname = Naming.outer_scope_cname
lenv.mangle_closure_cnames(outer_scope_cname)
# Generate closure function definitions
self.body.generate_function_definitions(lenv, code)
# generate lambda function definitions
self.generate_lambda_definitions(lenv, code)
is_getbuffer_slot = (self.entry.name == "__getbuffer__" and
self.entry.scope.is_c_class_scope)
is_releasebuffer_slot = (self.entry.name == "__releasebuffer__" and
self.entry.scope.is_c_class_scope)
is_buffer_slot = is_getbuffer_slot or is_releasebuffer_slot
if is_buffer_slot:
if 'cython_unused' not in self.modifiers:
self.modifiers = self.modifiers + ['cython_unused']
preprocessor_guard = self.get_preprocessor_guard()
profile = code.globalstate.directives['profile']
linetrace = code.globalstate.directives['linetrace']
if (linetrace or profile) and lenv.nogil:
warning(self.pos, "Cannot profile nogil function.", 1)
profile = linetrace = False
if profile or linetrace:
code.globalstate.use_utility_code(
UtilityCode.load_cached("Profile", "Profile.c"))
# Generate C code for header and body of function
code.enter_cfunc_scope()
code.return_from_error_cleanup_label = code.new_label()
# ----- Top-level constants used by this function
code.mark_pos(self.pos)
self.generate_cached_builtins_decls(lenv, code)
# ----- Function header
code.putln("")
if preprocessor_guard:
code.putln(preprocessor_guard)
with_pymethdef = (self.needs_assignment_synthesis(env, code) or
self.pymethdef_required)
if self.py_func:
self.py_func.generate_function_header(code,
with_pymethdef = with_pymethdef,
proto_only=True)
self.generate_function_header(code,
with_pymethdef = with_pymethdef)
# ----- Local variable declarations
# Find function scope
cenv = env
while cenv.is_py_class_scope or cenv.is_c_class_scope:
cenv = cenv.outer_scope
if self.needs_closure:
code.put(lenv.scope_class.type.declaration_code(Naming.cur_scope_cname))
code.putln(";")
elif self.needs_outer_scope:
if lenv.is_passthrough:
code.put(lenv.scope_class.type.declaration_code(Naming.cur_scope_cname))
code.putln(";")
code.put(cenv.scope_class.type.declaration_code(Naming.outer_scope_cname))
code.putln(";")
self.generate_argument_declarations(lenv, code)
for entry in lenv.var_entries:
if not (entry.in_closure or entry.is_arg):
code.put_var_declaration(entry)
# Initialize the return variable __pyx_r
init = ""
if not self.return_type.is_void:
if self.return_type.is_pyobject:
init = " = NULL"
elif self.return_type.is_memoryviewslice:
init = ' = ' + MemoryView.memslice_entry_init
code.putln(
"%s%s;" %
(self.return_type.declaration_code(Naming.retval_cname),
init))
tempvardecl_code = code.insertion_point()
self.generate_keyword_list(code)
if profile or linetrace:
code.put_trace_declarations()
# ----- Extern library function declarations
lenv.generate_library_function_declarations(code)
# ----- GIL acquisition
acquire_gil = self.acquire_gil
# See if we need to acquire the GIL for variable declarations, or for
# refnanny only
# Profiling or closures are not currently possible for cdef nogil
# functions, but check them anyway
have_object_args = (self.needs_closure or self.needs_outer_scope or
profile or linetrace)
for arg in lenv.arg_entries:
if arg.type.is_pyobject:
have_object_args = True
break
acquire_gil_for_var_decls_only = (
lenv.nogil and lenv.has_with_gil_block and
(have_object_args or lenv.buffer_entries))
acquire_gil_for_refnanny_only = (
lenv.nogil and lenv.has_with_gil_block and not
acquire_gil_for_var_decls_only)
use_refnanny = not lenv.nogil or lenv.has_with_gil_block
if acquire_gil or acquire_gil_for_var_decls_only:
code.put_ensure_gil()
elif lenv.nogil and lenv.has_with_gil_block:
code.declare_gilstate()
# ----- set up refnanny
if use_refnanny:
tempvardecl_code.put_declare_refcount_context()
code.put_setup_refcount_context(
self.entry.name, acquire_gil=acquire_gil_for_refnanny_only)
# ----- Automatic lead-ins for certain special functions
if is_getbuffer_slot:
self.getbuffer_init(code)
# ----- Create closure scope object
if self.needs_closure:
tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__')
slot_func_cname = TypeSlots.get_slot_function(lenv.scope_class.type.scope, tp_slot)
if not slot_func_cname:
slot_func_cname = '%s->tp_new' % lenv.scope_class.type.typeptr_cname
code.putln("%s = (%s)%s(%s, %s, NULL);" % (
Naming.cur_scope_cname,
lenv.scope_class.type.declaration_code(''),
slot_func_cname,
lenv.scope_class.type.typeptr_cname,
Naming.empty_tuple))
code.putln("if (unlikely(!%s)) {" % Naming.cur_scope_cname)
if is_getbuffer_slot:
self.getbuffer_error_cleanup(code)
if use_refnanny:
code.put_finish_refcount_context()
if acquire_gil or acquire_gil_for_var_decls_only:
code.put_release_ensured_gil()
# FIXME: what if the error return value is a Python value?
code.putln("return %s;" % self.error_value())
code.putln("}")
code.put_gotref(Naming.cur_scope_cname)
# Note that it is unsafe to decref the scope at this point.
if self.needs_outer_scope:
if self.is_cyfunction:
code.putln("%s = (%s) __Pyx_CyFunction_GetClosure(%s);" % (
outer_scope_cname,
cenv.scope_class.type.declaration_code(''),
Naming.self_cname))
else:
code.putln("%s = (%s) %s;" % (
outer_scope_cname,
cenv.scope_class.type.declaration_code(''),
Naming.self_cname))
if lenv.is_passthrough:
code.putln("%s = %s;" % (Naming.cur_scope_cname, outer_scope_cname))
elif self.needs_closure:
# inner closures own a reference to their outer parent
code.put_incref(outer_scope_cname, cenv.scope_class.type)
code.put_giveref(outer_scope_cname)
# ----- Trace function call
if profile or linetrace:
# this looks a bit late, but if we don't get here due to a
# fatal error before hand, it's not really worth tracing
code.put_trace_call(self.entry.name, self.pos)
code.funcstate.can_trace = True
# ----- Fetch arguments
self.generate_argument_parsing_code(env, code)
# If an argument is assigned to in the body, we must
# incref it to properly keep track of refcounts.
is_cdef = isinstance(self, CFuncDefNode)
for entry in lenv.arg_entries:
if entry.type.is_pyobject:
if ((acquire_gil or len(entry.cf_assignments) > 1) and
not entry.in_closure):
code.put_var_incref(entry)
# Note: defaults are always incref-ed. For def functions, we
# we aquire arguments from object converstion, so we have
# new references. If we are a cdef function, we need to
# incref our arguments
elif (is_cdef and entry.type.is_memoryviewslice and
len(entry.cf_assignments) > 1):
code.put_incref_memoryviewslice(entry.cname,
have_gil=not lenv.nogil)
for entry in lenv.var_entries:
if entry.is_arg and len(entry.cf_assignments) > 1:
code.put_var_incref(entry)
# ----- Initialise local buffer auxiliary variables
for entry in lenv.var_entries + lenv.arg_entries:
if entry.type.is_buffer and entry.buffer_aux.buflocal_nd_var.used:
Buffer.put_init_vars(entry, code)
# ----- Check and convert arguments
self.generate_argument_type_tests(code)
# ----- Acquire buffer arguments
for entry in lenv.arg_entries:
if entry.type.is_buffer:
Buffer.put_acquire_arg_buffer(entry, code, self.pos)
if acquire_gil_for_var_decls_only:
code.put_release_ensured_gil()
# -------------------------
# ----- Function body -----
# -------------------------
self.generate_function_body(env, code)
code.mark_pos(self.pos)
code.putln("")
code.putln("/* function exit code */")
# ----- Default return value
if not self.body.is_terminator:
if self.return_type.is_pyobject:
#if self.return_type.is_extension_type:
# lhs = "(PyObject *)%s" % Naming.retval_cname
#else:
lhs = Naming.retval_cname
code.put_init_to_py_none(lhs, self.return_type)
else:
val = self.return_type.default_value
if val:
code.putln("%s = %s;" % (Naming.retval_cname, val))
# ----- Error cleanup
if code.error_label in code.labels_used:
if not self.body.is_terminator:
code.put_goto(code.return_label)
code.put_label(code.error_label)
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type, have_gil=not lenv.nogil)
# Clean up buffers -- this calls a Python function
# so need to save and restore error state
buffers_present = len(lenv.buffer_entries) > 0
memslice_entries = [e for e in lenv.entries.itervalues()
if e.type.is_memoryviewslice]
if buffers_present:
code.globalstate.use_utility_code(restore_exception_utility_code)
code.putln("{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;")
code.putln("__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);")
for entry in lenv.buffer_entries:
Buffer.put_release_buffer_code(code, entry)
#code.putln("%s = 0;" % entry.cname)
code.putln("__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}")
if self.return_type.is_memoryviewslice:
MemoryView.put_init_entry(Naming.retval_cname, code)
err_val = Naming.retval_cname
else:
err_val = self.error_value()
exc_check = self.caller_will_check_exceptions()
if err_val is not None or exc_check:
# TODO: Fix exception tracing (though currently unused by cProfile).
# code.globalstate.use_utility_code(get_exception_tuple_utility_code)
# code.put_trace_exception()
if lenv.nogil and not lenv.has_with_gil_block:
code.putln("{")
code.put_ensure_gil()
code.put_add_traceback(self.entry.qualified_name)
if lenv.nogil and not lenv.has_with_gil_block:
code.put_release_ensured_gil()
code.putln("}")
else:
warning(self.entry.pos,
"Unraisable exception in function '%s'." %
self.entry.qualified_name, 0)
code.put_unraisable(self.entry.qualified_name)
default_retval = self.return_type.default_value
if err_val is None and default_retval:
err_val = default_retval
if err_val is not None:
code.putln("%s = %s;" % (Naming.retval_cname, err_val))
if is_getbuffer_slot:
self.getbuffer_error_cleanup(code)
# If we are using the non-error cleanup section we should
# jump past it if we have an error. The if-test below determine
# whether this section is used.
if buffers_present or is_getbuffer_slot or self.return_type.is_memoryviewslice:
code.put_goto(code.return_from_error_cleanup_label)
# ----- Non-error return cleanup
code.put_label(code.return_label)
for entry in lenv.buffer_entries:
if entry.used:
Buffer.put_release_buffer_code(code, entry)
if is_getbuffer_slot:
self.getbuffer_normal_cleanup(code)
if self.return_type.is_memoryviewslice:
# See if our return value is uninitialized on non-error return
# import MemoryView
# MemoryView.err_if_nogil_initialized_check(self.pos, env)
cond = code.unlikely(self.return_type.error_condition(
Naming.retval_cname))
code.putln(
'if (%s) {' % cond)
if env.nogil:
code.put_ensure_gil()
code.putln(
'PyErr_SetString('
'PyExc_TypeError,'
'"Memoryview return value is not initialized");')
if env.nogil:
code.put_release_ensured_gil()
code.putln(
'}')
# ----- Return cleanup for both error and no-error return
code.put_label(code.return_from_error_cleanup_label)
for entry in lenv.var_entries:
if not entry.used or entry.in_closure:
continue
if entry.type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(entry.cname,
have_gil=not lenv.nogil)
elif entry.type.is_pyobject:
if not entry.is_arg or len(entry.cf_assignments) > 1:
code.put_var_decref(entry)
# Decref any increfed args
for entry in lenv.arg_entries:
if entry.type.is_pyobject:
if ((acquire_gil or len(entry.cf_assignments) > 1) and
not entry.in_closure):
code.put_var_decref(entry)
elif (entry.type.is_memoryviewslice and
(not is_cdef or len(entry.cf_assignments) > 1)):
# decref slices of def functions and acquired slices from cdef
# functions, but not borrowed slices from cdef functions.
code.put_xdecref_memoryviewslice(entry.cname,
have_gil=not lenv.nogil)
if self.needs_closure:
code.put_decref(Naming.cur_scope_cname, lenv.scope_class.type)
# ----- Return
# This code is duplicated in ModuleNode.generate_module_init_func
if not lenv.nogil:
default_retval = self.return_type.default_value
err_val = self.error_value()
if err_val is None and default_retval:
err_val = default_retval # FIXME: why is err_val not used?
if self.return_type.is_pyobject:
code.put_xgiveref(self.return_type.as_pyobject(Naming.retval_cname))
if self.entry.is_special and self.entry.name == "__hash__":
# Returning -1 for __hash__ is supposed to signal an error
# We do as Python instances and coerce -1 into -2.
code.putln("if (unlikely(%s == -1) && !PyErr_Occurred()) %s = -2;" % (
Naming.retval_cname, Naming.retval_cname))
if profile or linetrace:
code.funcstate.can_trace = False
if self.return_type.is_pyobject:
code.put_trace_return(Naming.retval_cname)
else:
code.put_trace_return("Py_None")
if not lenv.nogil:
# GIL holding function
code.put_finish_refcount_context()
if acquire_gil or (lenv.nogil and lenv.has_with_gil_block):
# release the GIL (note that with-gil blocks acquire it on exit in their EnsureGILNode)
code.put_release_ensured_gil()
if not self.return_type.is_void:
code.putln("return %s;" % Naming.retval_cname)
code.putln("}")
if preprocessor_guard:
code.putln("#endif /*!(%s)*/" % preprocessor_guard)
# ----- Go back and insert temp variable declarations
tempvardecl_code.put_temp_declarations(code.funcstate)
# ----- Python version
code.exit_cfunc_scope()
if self.py_func:
self.py_func.generate_function_definitions(env, code)
self.generate_wrapper_functions(code)
def declare_argument(self, env, arg):
if arg.type.is_void:
error(arg.pos, "Invalid use of 'void'")
elif not arg.type.is_complete() and not (arg.type.is_array or arg.type.is_memoryviewslice):
error(arg.pos,
"Argument type '%s' is incomplete" % arg.type)
return env.declare_arg(arg.name, arg.type, arg.pos)
def generate_arg_type_test(self, arg, code):
# Generate type test for one argument.
if arg.type.typeobj_is_available():
code.globalstate.use_utility_code(
UtilityCode.load_cached("ArgTypeTest", "FunctionArguments.c"))
typeptr_cname = arg.type.typeptr_cname
arg_code = "((PyObject *)%s)" % arg.entry.cname
code.putln(
'if (unlikely(!__Pyx_ArgTypeTest(%s, %s, %d, "%s", %s))) %s' % (
arg_code,
typeptr_cname,
arg.accept_none,
arg.name,
arg.type.is_builtin_type,
code.error_goto(arg.pos)))
else:
error(arg.pos, "Cannot test type of extern C class "
"without type object name specification")
def generate_arg_none_check(self, arg, code):
# Generate None check for one argument.
if arg.type.is_memoryviewslice:
cname = "%s.memview" % arg.entry.cname
else:
cname = arg.entry.cname
code.putln('if (unlikely(((PyObject *)%s) == Py_None)) {' % cname)
code.putln('''PyErr_Format(PyExc_TypeError, "Argument '%%.%ds' must not be None", "%s"); %s''' % (
max(200, len(arg.name)), arg.name,
code.error_goto(arg.pos)))
code.putln('}')
def generate_wrapper_functions(self, code):
pass
def generate_execution_code(self, code):
# Evaluate and store argument default values
for arg in self.args:
if not arg.is_dynamic:
arg.generate_assignment_code(code)
#
# Special code for the __getbuffer__ function
#
def getbuffer_init(self, code):
info = self.local_scope.arg_entries[1].cname
# Python 3.0 betas have a bug in memoryview which makes it call
# getbuffer with a NULL parameter. For now we work around this;
# the following block should be removed when this bug is fixed.
code.putln("if (%s != NULL) {" % info)
code.putln("%s->obj = Py_None; __Pyx_INCREF(Py_None);" % info)
code.put_giveref("%s->obj" % info) # Do not refnanny object within structs
code.putln("}")
def getbuffer_error_cleanup(self, code):
info = self.local_scope.arg_entries[1].cname
code.putln("if (%s != NULL && %s->obj != NULL) {"
% (info, info))
code.put_gotref("%s->obj" % info)
code.putln("__Pyx_DECREF(%s->obj); %s->obj = NULL;"
% (info, info))
code.putln("}")
def getbuffer_normal_cleanup(self, code):
info = self.local_scope.arg_entries[1].cname
code.putln("if (%s != NULL && %s->obj == Py_None) {" % (info, info))
code.put_gotref("Py_None")
code.putln("__Pyx_DECREF(Py_None); %s->obj = NULL;" % info)
code.putln("}")
def get_preprocessor_guard(self):
if not self.entry.is_special:
return None
name = self.entry.name
slot = TypeSlots.method_name_to_slot.get(name)
if not slot:
return None
if name == '__long__' and not self.entry.scope.lookup_here('__int__'):
return None
if name in ("__getbuffer__", "__releasebuffer__") and self.entry.scope.is_c_class_scope:
return None
return slot.preprocessor_guard_code()
class CFuncDefNode(FuncDefNode):
# C function definition.
#
# modifiers ['inline']
# visibility 'private' or 'public' or 'extern'
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# cfunc_declarator the CFuncDeclarator of this function
# (this is also available through declarator or a
# base thereof)
# body StatListNode
# api boolean
# decorators [DecoratorNode] list of decorators
#
# with_gil boolean Acquire GIL around body
# type CFuncType
# py_func wrapper for calling from Python
# overridable whether or not this is a cpdef function
# inline_in_pxd whether this is an inline function in a pxd file
# template_declaration String or None Used for c++ class methods
# is_const_method whether this is a const method
child_attrs = ["base_type", "declarator", "body", "py_func"]
inline_in_pxd = False
decorators = None
directive_locals = None
directive_returns = None
override = None
template_declaration = None
is_const_method = False
def unqualified_name(self):
return self.entry.name
def analyse_declarations(self, env):
if self.directive_locals is None:
self.directive_locals = {}
self.directive_locals.update(env.directives['locals'])
if self.directive_returns is not None:
base_type = self.directive_returns.analyse_as_type(env)
if base_type is None:
error(self.directive_returns.pos, "Not a type")
base_type = PyrexTypes.error_type
else:
base_type = self.base_type.analyse(env)
# The 2 here is because we need both function and argument names.
if isinstance(self.declarator, CFuncDeclaratorNode):
name_declarator, type = self.declarator.analyse(base_type, env,
nonempty = 2 * (self.body is not None),
directive_locals = self.directive_locals)
else:
name_declarator, type = self.declarator.analyse(base_type, env, nonempty = 2 * (self.body is not None))
if not type.is_cfunction:
error(self.pos,
"Suite attached to non-function declaration")
# Remember the actual type according to the function header
# written here, because the type in the symbol table entry
# may be different if we're overriding a C method inherited
# from the base type of an extension type.
self.type = type
type.is_overridable = self.overridable
declarator = self.declarator
while not hasattr(declarator, 'args'):
declarator = declarator.base
self.cfunc_declarator = declarator
self.args = declarator.args
opt_arg_count = self.cfunc_declarator.optional_arg_count
if (self.visibility == 'public' or self.api) and opt_arg_count:
error(self.cfunc_declarator.pos,
"Function with optional arguments may not be declared "
"public or api")
if (type.exception_check == '+' and self.visibility != 'extern'):
warning(self.cfunc_declarator.pos,
"Only extern functions can throw C++ exceptions.")
for formal_arg, type_arg in zip(self.args, type.args):
self.align_argument_type(env, type_arg)
formal_arg.type = type_arg.type
formal_arg.name = type_arg.name
formal_arg.cname = type_arg.cname
self._validate_type_visibility(type_arg.type, type_arg.pos, env)
if type_arg.type.is_fused:
self.has_fused_arguments = True
if type_arg.type.is_buffer and 'inline' in self.modifiers:
warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1)
if type_arg.type.is_buffer:
if self.type.nogil:
error(formal_arg.pos,
"Buffer may not be acquired without the GIL. "
"Consider using memoryview slices instead.")
elif 'inline' in self.modifiers:
warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1)
self._validate_type_visibility(type.return_type, self.pos, env)
name = name_declarator.name
cname = name_declarator.cname
type.is_const_method = self.is_const_method
self.entry = env.declare_cfunction(
name, type, self.pos,
cname = cname, visibility = self.visibility, api = self.api,
defining = self.body is not None, modifiers = self.modifiers)
self.entry.inline_func_in_pxd = self.inline_in_pxd
self.return_type = type.return_type
if self.return_type.is_array and self.visibility != 'extern':
error(self.pos,
"Function cannot return an array")
if self.return_type.is_cpp_class:
self.return_type.check_nullary_constructor(self.pos, "used as a return value")
if self.overridable and not env.is_module_scope:
if len(self.args) < 1 or not self.args[0].type.is_pyobject:
# An error will be produced in the cdef function
self.overridable = False
self.declare_cpdef_wrapper(env)
self.create_local_scope(env)
def declare_cpdef_wrapper(self, env):
if self.overridable:
name = self.entry.name
py_func_body = self.call_self_node(is_module_scope = env.is_module_scope)
self.py_func = DefNode(pos = self.pos,
name = self.entry.name,
args = self.args,
star_arg = None,
starstar_arg = None,
doc = self.doc,
body = py_func_body,
is_wrapper = 1)
self.py_func.is_module_scope = env.is_module_scope
self.py_func.analyse_declarations(env)
self.entry.as_variable = self.py_func.entry
self.entry.used = self.entry.as_variable.used = True
# Reset scope entry the above cfunction
env.entries[name] = self.entry
if (not self.entry.is_final_cmethod and
(not env.is_module_scope or Options.lookup_module_cpdef)):
self.override = OverrideCheckNode(self.pos, py_func = self.py_func)
self.body = StatListNode(self.pos, stats=[self.override, self.body])
def _validate_type_visibility(self, type, pos, env):
"""
Ensure that types used in cdef functions are public or api, or
defined in a C header.
"""
public_or_api = (self.visibility == 'public' or self.api)
entry = getattr(type, 'entry', None)
if public_or_api and entry and env.is_module_scope:
if not (entry.visibility in ('public', 'extern') or
entry.api or entry.in_cinclude):
error(pos, "Function declared public or api may not have "
"private types")
def call_self_node(self, omit_optional_args=0, is_module_scope=0):
import ExprNodes
args = self.type.args
if omit_optional_args:
args = args[:len(args) - self.type.optional_arg_count]
arg_names = [arg.name for arg in args]
if is_module_scope:
cfunc = ExprNodes.NameNode(self.pos, name=self.entry.name)
else:
self_arg = ExprNodes.NameNode(self.pos, name=arg_names[0])
cfunc = ExprNodes.AttributeNode(self.pos, obj=self_arg, attribute=self.entry.name)
skip_dispatch = not is_module_scope or Options.lookup_module_cpdef
c_call = ExprNodes.SimpleCallNode(self.pos, function=cfunc, args=[ExprNodes.NameNode(self.pos, name=n) for n in arg_names[1-is_module_scope:]], wrapper_call=skip_dispatch)
return ReturnStatNode(pos=self.pos, return_type=PyrexTypes.py_object_type, value=c_call)
def declare_arguments(self, env):
for arg in self.type.args:
if not arg.name:
error(arg.pos, "Missing argument name")
self.declare_argument(env, arg)
def need_gil_acquisition(self, lenv):
return self.type.with_gil
def nogil_check(self, env):
type = self.type
with_gil = type.with_gil
if type.nogil and not with_gil:
if type.return_type.is_pyobject:
error(self.pos,
"Function with Python return type cannot be declared nogil")
for entry in self.local_scope.var_entries:
if entry.type.is_pyobject and not entry.in_with_gil_block:
error(self.pos, "Function declared nogil has Python locals or temporaries")
def analyse_expressions(self, env):
self.local_scope.directives = env.directives
if self.py_func is not None:
# this will also analyse the default values
self.py_func = self.py_func.analyse_expressions(env)
else:
self.analyse_default_values(env)
self.acquire_gil = self.need_gil_acquisition(self.local_scope)
return self
def needs_assignment_synthesis(self, env, code=None):
return False
def generate_function_header(self, code, with_pymethdef, with_opt_args = 1, with_dispatch = 1, cname = None):
scope = self.local_scope
arg_decls = []
type = self.type
for arg in type.args[:len(type.args)-type.optional_arg_count]:
arg_decl = arg.declaration_code()
entry = scope.lookup(arg.name)
if not entry.cf_used:
arg_decl = 'CYTHON_UNUSED %s' % arg_decl
arg_decls.append(arg_decl)
if with_dispatch and self.overridable:
dispatch_arg = PyrexTypes.c_int_type.declaration_code(
Naming.skip_dispatch_cname)
if self.override:
arg_decls.append(dispatch_arg)
else:
arg_decls.append('CYTHON_UNUSED %s' % dispatch_arg)
if type.optional_arg_count and with_opt_args:
arg_decls.append(type.op_arg_struct.declaration_code(Naming.optional_args_cname))
if type.has_varargs:
arg_decls.append("...")
if not arg_decls:
arg_decls = ["void"]
if cname is None:
cname = self.entry.func_cname
entity = type.function_header_code(cname, ', '.join(arg_decls))
if self.entry.visibility == 'private' and '::' not in cname:
storage_class = "static "
else:
storage_class = ""
dll_linkage = None
modifiers = code.build_function_modifiers(self.entry.func_modifiers)
header = self.return_type.declaration_code(entity, dll_linkage=dll_linkage)
#print (storage_class, modifiers, header)
if self.template_declaration:
code.putln(self.template_declaration)
code.putln("%s%s%s {" % (storage_class, modifiers, header))
def generate_argument_declarations(self, env, code):
scope = self.local_scope
for arg in self.args:
if arg.default:
entry = scope.lookup(arg.name)
if self.override or entry.cf_used:
result = arg.calculate_default_value_code(code)
code.putln('%s = %s;' % (
arg.type.declaration_code(arg.cname), result))
def generate_keyword_list(self, code):
pass
def generate_argument_parsing_code(self, env, code):
i = 0
used = 0
if self.type.optional_arg_count:
scope = self.local_scope
code.putln('if (%s) {' % Naming.optional_args_cname)
for arg in self.args:
if arg.default:
entry = scope.lookup(arg.name)
if self.override or entry.cf_used:
code.putln('if (%s->%sn > %s) {' %
(Naming.optional_args_cname,
Naming.pyrex_prefix, i))
declarator = arg.declarator
while not hasattr(declarator, 'name'):
declarator = declarator.base
code.putln('%s = %s->%s;' %
(arg.cname, Naming.optional_args_cname,
self.type.opt_arg_cname(declarator.name)))
used += 1
i += 1
for _ in range(used):
code.putln('}')
code.putln('}')
def generate_argument_conversion_code(self, code):
pass
def generate_argument_type_tests(self, code):
# Generate type tests for args whose type in a parent
# class is a supertype of the declared type.
for arg in self.type.args:
if arg.needs_type_test:
self.generate_arg_type_test(arg, code)
elif arg.type.is_pyobject and not arg.accept_none:
self.generate_arg_none_check(arg, code)
def error_value(self):
if self.return_type.is_pyobject:
return "0"
else:
#return None
return self.entry.type.exception_value
def caller_will_check_exceptions(self):
return self.entry.type.exception_check
def generate_wrapper_functions(self, code):
# If the C signature of a function has changed, we need to generate
# wrappers to put in the slots here.
k = 0
entry = self.entry
func_type = entry.type
while entry.prev_entry is not None:
k += 1
entry = entry.prev_entry
entry.func_cname = "%s%swrap_%s" % (self.entry.func_cname, Naming.pyrex_prefix, k)
code.putln()
self.generate_function_header(code,
0,
with_dispatch = entry.type.is_overridable,
with_opt_args = entry.type.optional_arg_count,
cname = entry.func_cname)
if not self.return_type.is_void:
code.put('return ')
args = self.type.args
arglist = [arg.cname for arg in args[:len(args)-self.type.optional_arg_count]]
if entry.type.is_overridable:
arglist.append(Naming.skip_dispatch_cname)
elif func_type.is_overridable:
arglist.append('0')
if entry.type.optional_arg_count:
arglist.append(Naming.optional_args_cname)
elif func_type.optional_arg_count:
arglist.append('NULL')
code.putln('%s(%s);' % (self.entry.func_cname, ', '.join(arglist)))
code.putln('}')
class PyArgDeclNode(Node):
# Argument which must be a Python object (used
# for * and ** arguments).
#
# name string
# entry Symtab.Entry
# annotation ExprNode or None Py3 argument annotation
child_attrs = []
is_self_arg = False
is_type_arg = False
def generate_function_definitions(self, env, code):
self.entry.generate_function_definitions(env, code)
class DecoratorNode(Node):
# A decorator
#
# decorator NameNode or CallNode or AttributeNode
child_attrs = ['decorator']
class DefNode(FuncDefNode):
# A Python function definition.
#
# name string the Python name of the function
# lambda_name string the internal name of a lambda 'function'
# decorators [DecoratorNode] list of decorators
# args [CArgDeclNode] formal arguments
# doc EncodedString or None
# body StatListNode
# return_type_annotation
# ExprNode or None the Py3 return type annotation
#
# The following subnode is constructed internally
# when the def statement is inside a Python class definition.
#
# fused_py_func DefNode The original fused cpdef DefNode
# (in case this is a specialization)
# specialized_cpdefs [DefNode] list of specialized cpdef DefNodes
# py_cfunc_node PyCFunctionNode/InnerFunctionNode The PyCFunction to create and assign
#
# decorator_indirection IndirectionNode Used to remove __Pyx_Method_ClassMethod for fused functions
child_attrs = ["args", "star_arg", "starstar_arg", "body", "decorators"]
lambda_name = None
reqd_kw_flags_cname = "0"
is_wrapper = 0
no_assignment_synthesis = 0
decorators = None
return_type_annotation = None
entry = None
acquire_gil = 0
self_in_stararg = 0
py_cfunc_node = None
requires_classobj = False
defaults_struct = None # Dynamic kwrds structure name
doc = None
fused_py_func = False
specialized_cpdefs = None
py_wrapper = None
py_wrapper_required = True
func_cname = None
defaults_getter = None
def __init__(self, pos, **kwds):
FuncDefNode.__init__(self, pos, **kwds)
k = rk = r = 0
for arg in self.args:
if arg.kw_only:
k += 1
if not arg.default:
rk += 1
if not arg.default:
r += 1
self.num_kwonly_args = k
self.num_required_kw_args = rk
self.num_required_args = r
def as_cfunction(self, cfunc=None, scope=None, overridable=True, returns=None):
if self.star_arg:
error(self.star_arg.pos, "cdef function cannot have star argument")
if self.starstar_arg:
error(self.starstar_arg.pos, "cdef function cannot have starstar argument")
if cfunc is None:
cfunc_args = []
for formal_arg in self.args:
name_declarator, type = formal_arg.analyse(scope, nonempty=1)
cfunc_args.append(PyrexTypes.CFuncTypeArg(name = name_declarator.name,
cname = None,
type = py_object_type,
pos = formal_arg.pos))
cfunc_type = PyrexTypes.CFuncType(return_type = py_object_type,
args = cfunc_args,
has_varargs = False,
exception_value = None,
exception_check = False,
nogil = False,
with_gil = False,
is_overridable = overridable)
cfunc = CVarDefNode(self.pos, type=cfunc_type)
else:
if scope is None:
scope = cfunc.scope
cfunc_type = cfunc.type
if len(self.args) != len(cfunc_type.args) or cfunc_type.has_varargs:
error(self.pos, "wrong number of arguments")
error(cfunc.pos, "previous declaration here")
for i, (formal_arg, type_arg) in enumerate(zip(self.args, cfunc_type.args)):
name_declarator, type = formal_arg.analyse(scope, nonempty=1,
is_self_arg = (i == 0 and scope.is_c_class_scope))
if type is None or type is PyrexTypes.py_object_type:
formal_arg.type = type_arg.type
formal_arg.name_declarator = name_declarator
import ExprNodes
if cfunc_type.exception_value is None:
exception_value = None
else:
exception_value = ExprNodes.ConstNode(self.pos, value=cfunc_type.exception_value, type=cfunc_type.return_type)
declarator = CFuncDeclaratorNode(self.pos,
base = CNameDeclaratorNode(self.pos, name=self.name, cname=None),
args = self.args,
has_varargs = False,
exception_check = cfunc_type.exception_check,
exception_value = exception_value,
with_gil = cfunc_type.with_gil,
nogil = cfunc_type.nogil)
return CFuncDefNode(self.pos,
modifiers = [],
base_type = CAnalysedBaseTypeNode(self.pos, type=cfunc_type.return_type),
declarator = declarator,
body = self.body,
doc = self.doc,
overridable = cfunc_type.is_overridable,
type = cfunc_type,
with_gil = cfunc_type.with_gil,
nogil = cfunc_type.nogil,
visibility = 'private',
api = False,
directive_locals = getattr(cfunc, 'directive_locals', {}),
directive_returns = returns)
def is_cdef_func_compatible(self):
"""Determines if the function's signature is compatible with a
cdef function. This can be used before calling
.as_cfunction() to see if that will be successful.
"""
if self.needs_closure:
return False
if self.star_arg or self.starstar_arg:
return False
return True
def analyse_declarations(self, env):
self.is_classmethod = self.is_staticmethod = False
if self.decorators:
for decorator in self.decorators:
func = decorator.decorator
if func.is_name:
self.is_classmethod |= func.name == 'classmethod'
self.is_staticmethod |= func.name == 'staticmethod'
if self.is_classmethod and env.lookup_here('classmethod'):
# classmethod() was overridden - not much we can do here ...
self.is_classmethod = False
if self.is_staticmethod and env.lookup_here('staticmethod'):
# staticmethod() was overridden - not much we can do here ...
self.is_staticmethod = False
if self.name == '__new__' and env.is_py_class_scope:
self.is_staticmethod = 1
self.analyse_argument_types(env)
if self.name == '<lambda>':
self.declare_lambda_function(env)
else:
self.declare_pyfunction(env)
self.analyse_signature(env)
self.return_type = self.entry.signature.return_type()
self.create_local_scope(env)
self.py_wrapper = DefNodeWrapper(
self.pos,
target=self,
name=self.entry.name,
args=self.args,
star_arg=self.star_arg,
starstar_arg=self.starstar_arg,
return_type=self.return_type)
self.py_wrapper.analyse_declarations(env)
def analyse_argument_types(self, env):
self.directive_locals = env.directives['locals']
allow_none_for_extension_args = env.directives['allow_none_for_extension_args']
f2s = env.fused_to_specific
env.fused_to_specific = None
for arg in self.args:
if hasattr(arg, 'name'):
name_declarator = None
else:
base_type = arg.base_type.analyse(env)
name_declarator, type = \
arg.declarator.analyse(base_type, env)
arg.name = name_declarator.name
arg.type = type
if type.is_fused:
self.has_fused_arguments = True
self.align_argument_type(env, arg)
if name_declarator and name_declarator.cname:
error(self.pos,
"Python function argument cannot have C name specification")
arg.type = arg.type.as_argument_type()
arg.hdr_type = None
arg.needs_conversion = 0
arg.needs_type_test = 0
arg.is_generic = 1
if arg.type.is_pyobject or arg.type.is_buffer or arg.type.is_memoryviewslice:
if arg.or_none:
arg.accept_none = True
elif arg.not_none:
arg.accept_none = False
elif (arg.type.is_extension_type or arg.type.is_builtin_type
or arg.type.is_buffer or arg.type.is_memoryviewslice):
if arg.default and arg.default.constant_result is None:
# special case: def func(MyType obj = None)
arg.accept_none = True
else:
# default depends on compiler directive
arg.accept_none = allow_none_for_extension_args
else:
# probably just a plain 'object'
arg.accept_none = True
else:
arg.accept_none = True # won't be used, but must be there
if arg.not_none:
error(arg.pos, "Only Python type arguments can have 'not None'")
if arg.or_none:
error(arg.pos, "Only Python type arguments can have 'or None'")
env.fused_to_specific = f2s
def analyse_signature(self, env):
if self.entry.is_special:
if self.decorators:
error(self.pos, "special functions of cdef classes cannot have decorators")
self.entry.trivial_signature = len(self.args) == 1 and not (self.star_arg or self.starstar_arg)
elif not env.directives['always_allow_keywords'] and not (self.star_arg or self.starstar_arg):
# Use the simpler calling signature for zero- and one-argument functions.
if self.entry.signature is TypeSlots.pyfunction_signature:
if len(self.args) == 0:
self.entry.signature = TypeSlots.pyfunction_noargs
elif len(self.args) == 1:
if self.args[0].default is None and not self.args[0].kw_only:
self.entry.signature = TypeSlots.pyfunction_onearg
elif self.entry.signature is TypeSlots.pymethod_signature:
if len(self.args) == 1:
self.entry.signature = TypeSlots.unaryfunc
elif len(self.args) == 2:
if self.args[1].default is None and not self.args[1].kw_only:
self.entry.signature = TypeSlots.ibinaryfunc
sig = self.entry.signature
nfixed = sig.num_fixed_args()
if sig is TypeSlots.pymethod_signature and nfixed == 1 \
and len(self.args) == 0 and self.star_arg:
# this is the only case where a diverging number of
# arguments is not an error - when we have no explicit
# 'self' parameter as in method(*args)
sig = self.entry.signature = TypeSlots.pyfunction_signature # self is not 'really' used
self.self_in_stararg = 1
nfixed = 0
if self.is_staticmethod and env.is_c_class_scope:
nfixed = 0
self.self_in_stararg = True # FIXME: why for staticmethods?
self.entry.signature = sig = copy.copy(sig)
sig.fixed_arg_format = "*"
sig.is_staticmethod = True
sig.has_generic_args = True
if ((self.is_classmethod or self.is_staticmethod) and
self.has_fused_arguments and env.is_c_class_scope):
del self.decorator_indirection.stats[:]
for i in range(min(nfixed, len(self.args))):
arg = self.args[i]
arg.is_generic = 0
if sig.is_self_arg(i) and not self.is_staticmethod:
if self.is_classmethod:
arg.is_type_arg = 1
arg.hdr_type = arg.type = Builtin.type_type
else:
arg.is_self_arg = 1
arg.hdr_type = arg.type = env.parent_type
arg.needs_conversion = 0
else:
arg.hdr_type = sig.fixed_arg_type(i)
if not arg.type.same_as(arg.hdr_type):
if arg.hdr_type.is_pyobject and arg.type.is_pyobject:
arg.needs_type_test = 1
else:
arg.needs_conversion = 1
if arg.needs_conversion:
arg.hdr_cname = Naming.arg_prefix + arg.name
else:
arg.hdr_cname = Naming.var_prefix + arg.name
if nfixed > len(self.args):
self.bad_signature()
return
elif nfixed < len(self.args):
if not sig.has_generic_args:
self.bad_signature()
for arg in self.args:
if arg.is_generic and \
(arg.type.is_extension_type or arg.type.is_builtin_type):
arg.needs_type_test = 1
def bad_signature(self):
sig = self.entry.signature
expected_str = "%d" % sig.num_fixed_args()
if sig.has_generic_args:
expected_str += " or more"
name = self.name
if name.startswith("__") and name.endswith("__"):
desc = "Special method"
else:
desc = "Method"
error(self.pos,
"%s %s has wrong number of arguments "
"(%d declared, %s expected)" % (
desc, self.name, len(self.args), expected_str))
def declare_pyfunction(self, env):
#print "DefNode.declare_pyfunction:", self.name, "in", env ###
name = self.name
entry = env.lookup_here(name)
if entry:
if entry.is_final_cmethod and not env.parent_type.is_final_type:
error(self.pos, "Only final types can have final Python (def/cpdef) methods")
if (entry.type.is_cfunction and not entry.is_builtin_cmethod
and not self.is_wrapper):
warning(self.pos, "Overriding cdef method with def method.", 5)
entry = env.declare_pyfunction(name, self.pos, allow_redefine=not self.is_wrapper)
self.entry = entry
prefix = env.next_id(env.scope_prefix)
self.entry.pyfunc_cname = Naming.pyfunc_prefix + prefix + name
if Options.docstrings:
entry.doc = embed_position(self.pos, self.doc)
entry.doc_cname = Naming.funcdoc_prefix + prefix + name
if entry.is_special:
if entry.name in TypeSlots.invisible or not entry.doc or (entry.name in '__getattr__' and env.directives['fast_getattr']):
entry.wrapperbase_cname = None
else:
entry.wrapperbase_cname = Naming.wrapperbase_prefix + prefix + name
else:
entry.doc = None
def declare_lambda_function(self, env):
entry = env.declare_lambda_function(self.lambda_name, self.pos)
entry.doc = None
self.entry = entry
self.entry.pyfunc_cname = entry.cname
def declare_arguments(self, env):
for arg in self.args:
if not arg.name:
error(arg.pos, "Missing argument name")
if arg.needs_conversion:
arg.entry = env.declare_var(arg.name, arg.type, arg.pos)
if arg.type.is_pyobject:
arg.entry.init = "0"
else:
arg.entry = self.declare_argument(env, arg)
arg.entry.is_arg = 1
arg.entry.used = 1
arg.entry.is_self_arg = arg.is_self_arg
self.declare_python_arg(env, self.star_arg)
self.declare_python_arg(env, self.starstar_arg)
def declare_python_arg(self, env, arg):
if arg:
if env.directives['infer_types'] != False:
type = PyrexTypes.unspecified_type
else:
type = py_object_type
entry = env.declare_var(arg.name, type, arg.pos)
entry.is_arg = 1
entry.used = 1
entry.init = "0"
entry.xdecref_cleanup = 1
arg.entry = entry
def analyse_expressions(self, env):
self.local_scope.directives = env.directives
self.analyse_default_values(env)
if not self.needs_assignment_synthesis(env) and self.decorators:
for decorator in self.decorators[::-1]:
decorator.decorator = decorator.decorator.analyse_expressions(env)
self.py_wrapper.prepare_argument_coercion(env)
return self
def needs_assignment_synthesis(self, env, code=None):
if self.is_wrapper or self.specialized_cpdefs or self.entry.is_fused_specialized:
return False
if self.is_staticmethod:
return True
if self.no_assignment_synthesis:
return False
# Should enable for module level as well, that will require more testing...
if self.entry.is_anonymous:
return True
if env.is_module_scope:
if code is None:
return env.directives['binding']
else:
return code.globalstate.directives['binding']
return env.is_py_class_scope or env.is_closure_scope
def error_value(self):
return self.entry.signature.error_value
def caller_will_check_exceptions(self):
return self.entry.signature.exception_check
def generate_function_definitions(self, env, code):
if self.defaults_getter:
self.defaults_getter.generate_function_definitions(env, code)
# Before closure cnames are mangled
if self.py_wrapper_required:
# func_cname might be modified by @cname
self.py_wrapper.func_cname = self.entry.func_cname
self.py_wrapper.generate_function_definitions(env, code)
FuncDefNode.generate_function_definitions(self, env, code)
def generate_function_header(self, code, with_pymethdef, proto_only=0):
if proto_only:
if self.py_wrapper_required:
self.py_wrapper.generate_function_header(
code, with_pymethdef, True)
return
arg_code_list = []
if self.entry.signature.has_dummy_arg:
self_arg = 'PyObject *%s' % Naming.self_cname
if not self.needs_outer_scope:
self_arg = 'CYTHON_UNUSED ' + self_arg
arg_code_list.append(self_arg)
def arg_decl_code(arg):
entry = arg.entry
if entry.in_closure:
cname = entry.original_cname
else:
cname = entry.cname
decl = entry.type.declaration_code(cname)
if not entry.cf_used:
decl = 'CYTHON_UNUSED ' + decl
return decl
for arg in self.args:
arg_code_list.append(arg_decl_code(arg))
if self.star_arg:
arg_code_list.append(arg_decl_code(self.star_arg))
if self.starstar_arg:
arg_code_list.append(arg_decl_code(self.starstar_arg))
arg_code = ', '.join(arg_code_list)
dc = self.return_type.declaration_code(self.entry.pyfunc_cname)
decls_code = code.globalstate['decls']
preprocessor_guard = self.get_preprocessor_guard()
if preprocessor_guard:
decls_code.putln(preprocessor_guard)
decls_code.putln(
"static %s(%s); /* proto */" % (dc, arg_code))
if preprocessor_guard:
decls_code.putln("#endif")
code.putln("static %s(%s) {" % (dc, arg_code))
def generate_argument_declarations(self, env, code):
pass
def generate_keyword_list(self, code):
pass
def generate_argument_parsing_code(self, env, code):
# Move arguments into closure if required
def put_into_closure(entry):
if entry.in_closure:
code.putln('%s = %s;' % (entry.cname, entry.original_cname))
code.put_var_incref(entry)
code.put_var_giveref(entry)
for arg in self.args:
put_into_closure(arg.entry)
for arg in self.star_arg, self.starstar_arg:
if arg:
put_into_closure(arg.entry)
def generate_argument_type_tests(self, code):
pass
class DefNodeWrapper(FuncDefNode):
# DefNode python wrapper code generator
defnode = None
target = None # Target DefNode
def __init__(self, *args, **kwargs):
FuncDefNode.__init__(self, *args, **kwargs)
self.num_kwonly_args = self.target.num_kwonly_args
self.num_required_kw_args = self.target.num_required_kw_args
self.num_required_args = self.target.num_required_args
self.self_in_stararg = self.target.self_in_stararg
self.signature = None
def analyse_declarations(self, env):
target_entry = self.target.entry
name = self.name
prefix = env.next_id(env.scope_prefix)
target_entry.func_cname = Naming.pywrap_prefix + prefix + name
target_entry.pymethdef_cname = Naming.pymethdef_prefix + prefix + name
self.signature = target_entry.signature
def prepare_argument_coercion(self, env):
# This is only really required for Cython utility code at this time,
# everything else can be done during code generation. But we expand
# all utility code here, simply because we cannot easily distinguish
# different code types.
for arg in self.args:
if not arg.type.is_pyobject:
if not arg.type.create_from_py_utility_code(env):
pass # will fail later
elif arg.hdr_type and not arg.hdr_type.is_pyobject:
if not arg.hdr_type.create_to_py_utility_code(env):
pass # will fail later
def signature_has_nongeneric_args(self):
argcount = len(self.args)
if argcount == 0 or (
argcount == 1 and (self.args[0].is_self_arg or
self.args[0].is_type_arg)):
return 0
return 1
def signature_has_generic_args(self):
return self.signature.has_generic_args
def generate_function_body(self, code):
args = []
if self.signature.has_dummy_arg:
args.append(Naming.self_cname)
for arg in self.args:
if arg.hdr_type and not (arg.type.is_memoryviewslice or
arg.type.is_struct or
arg.type.is_complex):
args.append(arg.type.cast_code(arg.entry.cname))
else:
args.append(arg.entry.cname)
if self.star_arg:
args.append(self.star_arg.entry.cname)
if self.starstar_arg:
args.append(self.starstar_arg.entry.cname)
args = ', '.join(args)
if not self.return_type.is_void:
code.put('%s = ' % Naming.retval_cname)
code.putln('%s(%s);' % (
self.target.entry.pyfunc_cname, args))
def generate_function_definitions(self, env, code):
lenv = self.target.local_scope
# Generate C code for header and body of function
code.mark_pos(self.pos)
code.putln("")
code.putln("/* Python wrapper */")
preprocessor_guard = self.target.get_preprocessor_guard()
if preprocessor_guard:
code.putln(preprocessor_guard)
code.enter_cfunc_scope()
code.return_from_error_cleanup_label = code.new_label()
with_pymethdef = (self.target.needs_assignment_synthesis(env, code) or
self.target.pymethdef_required)
self.generate_function_header(code, with_pymethdef)
self.generate_argument_declarations(lenv, code)
tempvardecl_code = code.insertion_point()
if self.return_type.is_pyobject:
retval_init = ' = 0'
else:
retval_init = ''
if not self.return_type.is_void:
code.putln('%s%s;' % (
self.return_type.declaration_code(Naming.retval_cname),
retval_init))
code.put_declare_refcount_context()
code.put_setup_refcount_context('%s (wrapper)' % self.name)
self.generate_argument_parsing_code(lenv, code)
self.generate_argument_type_tests(code)
self.generate_function_body(code)
# ----- Go back and insert temp variable declarations
tempvardecl_code.put_temp_declarations(code.funcstate)
code.mark_pos(self.pos)
code.putln("")
code.putln("/* function exit code */")
# ----- Error cleanup
if code.error_label in code.labels_used:
code.put_goto(code.return_label)
code.put_label(code.error_label)
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type)
err_val = self.error_value()
if err_val is not None:
code.putln("%s = %s;" % (Naming.retval_cname, err_val))
# ----- Non-error return cleanup
code.put_label(code.return_label)
for entry in lenv.var_entries:
if entry.is_arg and entry.type.is_pyobject:
code.put_var_decref(entry)
code.put_finish_refcount_context()
if not self.return_type.is_void:
code.putln("return %s;" % Naming.retval_cname)
code.putln('}')
code.exit_cfunc_scope()
if preprocessor_guard:
code.putln("#endif /*!(%s)*/" % preprocessor_guard)
def generate_function_header(self, code, with_pymethdef, proto_only=0):
arg_code_list = []
sig = self.signature
if sig.has_dummy_arg or self.self_in_stararg:
arg_code = "PyObject *%s" % Naming.self_cname
if not sig.has_dummy_arg:
arg_code = 'CYTHON_UNUSED ' + arg_code
arg_code_list.append(arg_code)
for arg in self.args:
if not arg.is_generic:
if arg.is_self_arg or arg.is_type_arg:
arg_code_list.append("PyObject *%s" % arg.hdr_cname)
else:
arg_code_list.append(
arg.hdr_type.declaration_code(arg.hdr_cname))
entry = self.target.entry
if not entry.is_special and sig.method_flags() == [TypeSlots.method_noargs]:
arg_code_list.append("CYTHON_UNUSED PyObject *unused")
if entry.scope.is_c_class_scope and entry.name == "__ipow__":
arg_code_list.append("CYTHON_UNUSED PyObject *unused")
if sig.has_generic_args:
arg_code_list.append(
"PyObject *%s, PyObject *%s"
% (Naming.args_cname, Naming.kwds_cname))
arg_code = ", ".join(arg_code_list)
# Prevent warning: unused function '__pyx_pw_5numpy_7ndarray_1__getbuffer__'
mf = ""
if (entry.name in ("__getbuffer__", "__releasebuffer__")
and entry.scope.is_c_class_scope):
mf = "CYTHON_UNUSED "
with_pymethdef = False
dc = self.return_type.declaration_code(entry.func_cname)
header = "static %s%s(%s)" % (mf, dc, arg_code)
code.putln("%s; /*proto*/" % header)
if proto_only:
if self.target.fused_py_func:
# If we are the specialized version of the cpdef, we still
# want the prototype for the "fused cpdef", in case we're
# checking to see if our method was overridden in Python
self.target.fused_py_func.generate_function_header(
code, with_pymethdef, proto_only=True)
return
if (Options.docstrings and entry.doc and
not self.target.fused_py_func and
not entry.scope.is_property_scope and
(not entry.is_special or entry.wrapperbase_cname)):
# h_code = code.globalstate['h_code']
docstr = entry.doc
if docstr.is_unicode:
docstr = docstr.utf8encode()
code.putln(
'static char %s[] = "%s";' % (
entry.doc_cname,
split_string_literal(escape_byte_string(docstr))))
if entry.is_special:
code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
code.putln(
"struct wrapperbase %s;" % entry.wrapperbase_cname)
code.putln('#endif')
if with_pymethdef or self.target.fused_py_func:
code.put(
"static PyMethodDef %s = " %
entry.pymethdef_cname)
code.put_pymethoddef(self.target.entry, ";", allow_skip=False)
code.putln("%s {" % header)
def generate_argument_declarations(self, env, code):
for arg in self.args:
if arg.is_generic:
if arg.needs_conversion:
code.putln("PyObject *%s = 0;" % arg.hdr_cname)
else:
code.put_var_declaration(arg.entry)
for entry in env.var_entries:
if entry.is_arg:
code.put_var_declaration(entry)
def generate_argument_parsing_code(self, env, code):
# Generate fast equivalent of PyArg_ParseTuple call for
# generic arguments, if any, including args/kwargs
old_error_label = code.new_error_label()
our_error_label = code.error_label
end_label = code.new_label("argument_unpacking_done")
has_kwonly_args = self.num_kwonly_args > 0
has_star_or_kw_args = self.star_arg is not None \
or self.starstar_arg is not None or has_kwonly_args
for arg in self.args:
if not arg.type.is_pyobject:
if not arg.type.create_from_py_utility_code(env):
pass # will fail later
if not self.signature_has_generic_args():
if has_star_or_kw_args:
error(self.pos, "This method cannot have * or keyword arguments")
self.generate_argument_conversion_code(code)
elif not self.signature_has_nongeneric_args():
# func(*args) or func(**kw) or func(*args, **kw)
self.generate_stararg_copy_code(code)
else:
self.generate_tuple_and_keyword_parsing_code(self.args, end_label, code)
code.error_label = old_error_label
if code.label_used(our_error_label):
if not code.label_used(end_label):
code.put_goto(end_label)
code.put_label(our_error_label)
if has_star_or_kw_args:
self.generate_arg_decref(self.star_arg, code)
if self.starstar_arg:
if self.starstar_arg.entry.xdecref_cleanup:
code.put_var_xdecref_clear(self.starstar_arg.entry)
else:
code.put_var_decref_clear(self.starstar_arg.entry)
code.put_add_traceback(self.target.entry.qualified_name)
code.put_finish_refcount_context()
code.putln("return %s;" % self.error_value())
if code.label_used(end_label):
code.put_label(end_label)
def generate_arg_xdecref(self, arg, code):
if arg:
code.put_var_xdecref_clear(arg.entry)
def generate_arg_decref(self, arg, code):
if arg:
code.put_var_decref_clear(arg.entry)
def generate_stararg_copy_code(self, code):
if not self.star_arg:
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
code.putln("if (unlikely(PyTuple_GET_SIZE(%s) > 0)) {" %
Naming.args_cname)
code.put('__Pyx_RaiseArgtupleInvalid("%s", 1, 0, 0, PyTuple_GET_SIZE(%s)); return %s;' % (
self.name, Naming.args_cname, self.error_value()))
code.putln("}")
if self.starstar_arg:
if self.star_arg:
kwarg_check = "unlikely(%s)" % Naming.kwds_cname
else:
kwarg_check = "%s" % Naming.kwds_cname
else:
kwarg_check = "unlikely(%s) && unlikely(PyDict_Size(%s) > 0)" % (
Naming.kwds_cname, Naming.kwds_cname)
code.globalstate.use_utility_code(
UtilityCode.load_cached("KeywordStringCheck", "FunctionArguments.c"))
code.putln(
"if (%s && unlikely(!__Pyx_CheckKeywordStrings(%s, \"%s\", %d))) return %s;" % (
kwarg_check, Naming.kwds_cname, self.name,
bool(self.starstar_arg), self.error_value()))
if self.starstar_arg:
code.putln("%s = (%s) ? PyDict_Copy(%s) : PyDict_New();" % (
self.starstar_arg.entry.cname,
Naming.kwds_cname,
Naming.kwds_cname))
code.putln("if (unlikely(!%s)) return %s;" % (
self.starstar_arg.entry.cname, self.error_value()))
self.starstar_arg.entry.xdecref_cleanup = 0
code.put_gotref(self.starstar_arg.entry.cname)
if self.self_in_stararg and not self.target.is_staticmethod:
# need to create a new tuple with 'self' inserted as first item
code.put("%s = PyTuple_New(PyTuple_GET_SIZE(%s)+1); if (unlikely(!%s)) " % (
self.star_arg.entry.cname,
Naming.args_cname,
self.star_arg.entry.cname))
if self.starstar_arg:
code.putln("{")
code.put_decref_clear(self.starstar_arg.entry.cname, py_object_type)
code.putln("return %s;" % self.error_value())
code.putln("}")
else:
code.putln("return %s;" % self.error_value())
code.put_gotref(self.star_arg.entry.cname)
code.put_incref(Naming.self_cname, py_object_type)
code.put_giveref(Naming.self_cname)
code.putln("PyTuple_SET_ITEM(%s, 0, %s);" % (
self.star_arg.entry.cname, Naming.self_cname))
temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln("for (%s=0; %s < PyTuple_GET_SIZE(%s); %s++) {" % (
temp, temp, Naming.args_cname, temp))
code.putln("PyObject* item = PyTuple_GET_ITEM(%s, %s);" % (
Naming.args_cname, temp))
code.put_incref("item", py_object_type)
code.put_giveref("item")
code.putln("PyTuple_SET_ITEM(%s, %s+1, item);" % (
self.star_arg.entry.cname, temp))
code.putln("}")
code.funcstate.release_temp(temp)
self.star_arg.entry.xdecref_cleanup = 0
elif self.star_arg:
code.put_incref(Naming.args_cname, py_object_type)
code.putln("%s = %s;" % (
self.star_arg.entry.cname,
Naming.args_cname))
self.star_arg.entry.xdecref_cleanup = 0
def generate_tuple_and_keyword_parsing_code(self, args, success_label, code):
argtuple_error_label = code.new_label("argtuple_error")
positional_args = []
required_kw_only_args = []
optional_kw_only_args = []
for arg in args:
if arg.is_generic:
if arg.default:
if not arg.is_self_arg and not arg.is_type_arg:
if arg.kw_only:
optional_kw_only_args.append(arg)
else:
positional_args.append(arg)
elif arg.kw_only:
required_kw_only_args.append(arg)
elif not arg.is_self_arg and not arg.is_type_arg:
positional_args.append(arg)
# sort required kw-only args before optional ones to avoid special
# cases in the unpacking code
kw_only_args = required_kw_only_args + optional_kw_only_args
min_positional_args = self.num_required_args - self.num_required_kw_args
if len(args) > 0 and (args[0].is_self_arg or args[0].is_type_arg):
min_positional_args -= 1
max_positional_args = len(positional_args)
has_fixed_positional_count = not self.star_arg and \
min_positional_args == max_positional_args
has_kw_only_args = bool(kw_only_args)
if self.num_required_kw_args:
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c"))
if self.starstar_arg or self.star_arg:
self.generate_stararg_init_code(max_positional_args, code)
code.putln('{')
all_args = tuple(positional_args) + tuple(kw_only_args)
code.putln("static PyObject **%s[] = {%s,0};" % (
Naming.pykwdlist_cname,
','.join([ '&%s' % code.intern_identifier(arg.name)
for arg in all_args ])))
# Before being converted and assigned to the target variables,
# borrowed references to all unpacked argument values are
# collected into a local PyObject* array called "values",
# regardless if they were taken from default arguments,
# positional arguments or keyword arguments. Note that
# C-typed default arguments are handled at conversion time,
# so their array value is NULL in the end if no argument
# was passed for them.
self.generate_argument_values_setup_code(all_args, code)
# --- optimised code when we receive keyword arguments
code.putln("if (%s(%s)) {" % (
(self.num_required_kw_args > 0) and "likely" or "unlikely",
Naming.kwds_cname))
self.generate_keyword_unpacking_code(
min_positional_args, max_positional_args,
has_fixed_positional_count, has_kw_only_args,
all_args, argtuple_error_label, code)
# --- optimised code when we do not receive any keyword arguments
if (self.num_required_kw_args and min_positional_args > 0) or min_positional_args == max_positional_args:
# Python raises arg tuple related errors first, so we must
# check the length here
if min_positional_args == max_positional_args and not self.star_arg:
compare = '!='
else:
compare = '<'
code.putln('} else if (PyTuple_GET_SIZE(%s) %s %d) {' % (
Naming.args_cname, compare, min_positional_args))
code.put_goto(argtuple_error_label)
if self.num_required_kw_args:
# pure error case: keywords required but not passed
if max_positional_args > min_positional_args and not self.star_arg:
code.putln('} else if (PyTuple_GET_SIZE(%s) > %d) {' % (
Naming.args_cname, max_positional_args))
code.put_goto(argtuple_error_label)
code.putln('} else {')
for i, arg in enumerate(kw_only_args):
if not arg.default:
pystring_cname = code.intern_identifier(arg.name)
# required keyword-only argument missing
code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % (
self.name,
pystring_cname))
code.putln(code.error_goto(self.pos))
break
else:
# optimised tuple unpacking code
code.putln('} else {')
if min_positional_args == max_positional_args:
# parse the exact number of positional arguments from
# the args tuple
for i, arg in enumerate(positional_args):
code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i, Naming.args_cname, i))
else:
# parse the positional arguments from the variable length
# args tuple and reject illegal argument tuple sizes
code.putln('switch (PyTuple_GET_SIZE(%s)) {' % Naming.args_cname)
if self.star_arg:
code.putln('default:')
reversed_args = list(enumerate(positional_args))[::-1]
for i, arg in reversed_args:
if i >= min_positional_args-1:
code.put('case %2d: ' % (i+1))
code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i, Naming.args_cname, i))
if min_positional_args == 0:
code.put('case 0: ')
code.putln('break;')
if self.star_arg:
if min_positional_args:
for i in range(min_positional_args-1, -1, -1):
code.putln('case %2d:' % i)
code.put_goto(argtuple_error_label)
else:
code.put('default: ')
code.put_goto(argtuple_error_label)
code.putln('}')
code.putln('}') # end of the conditional unpacking blocks
# Convert arg values to their final type and assign them.
# Also inject non-Python default arguments, which do cannot
# live in the values[] array.
for i, arg in enumerate(all_args):
self.generate_arg_assignment(arg, "values[%d]" % i, code)
code.putln('}') # end of the whole argument unpacking block
if code.label_used(argtuple_error_label):
code.put_goto(success_label)
code.put_label(argtuple_error_label)
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
code.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, PyTuple_GET_SIZE(%s)); ' % (
self.name, has_fixed_positional_count,
min_positional_args, max_positional_args,
Naming.args_cname))
code.putln(code.error_goto(self.pos))
def generate_arg_assignment(self, arg, item, code):
if arg.type.is_pyobject:
# Python default arguments were already stored in 'item' at the very beginning
if arg.is_generic:
item = PyrexTypes.typecast(arg.type, PyrexTypes.py_object_type, item)
entry = arg.entry
code.putln("%s = %s;" % (entry.cname, item))
else:
func = arg.type.from_py_function
if func:
if arg.default:
# C-typed default arguments must be handled here
code.putln('if (%s) {' % item)
rhs = "%s(%s)" % (func, item)
if arg.type.is_enum:
rhs = arg.type.cast_code(rhs)
code.putln("%s = %s; %s" % (
arg.entry.cname,
rhs,
code.error_goto_if(arg.type.error_condition(arg.entry.cname), arg.pos)))
if arg.default:
code.putln('} else {')
code.putln(
"%s = %s;" % (
arg.entry.cname,
arg.calculate_default_value_code(code)))
if arg.type.is_memoryviewslice:
code.put_incref_memoryviewslice(arg.entry.cname,
have_gil=True)
code.putln('}')
else:
error(arg.pos, "Cannot convert Python object argument to type '%s'" % arg.type)
def generate_stararg_init_code(self, max_positional_args, code):
if self.starstar_arg:
self.starstar_arg.entry.xdecref_cleanup = 0
code.putln('%s = PyDict_New(); if (unlikely(!%s)) return %s;' % (
self.starstar_arg.entry.cname,
self.starstar_arg.entry.cname,
self.error_value()))
code.put_gotref(self.starstar_arg.entry.cname)
if self.star_arg:
self.star_arg.entry.xdecref_cleanup = 0
code.putln('if (PyTuple_GET_SIZE(%s) > %d) {' % (
Naming.args_cname,
max_positional_args))
code.putln('%s = PyTuple_GetSlice(%s, %d, PyTuple_GET_SIZE(%s));' % (
self.star_arg.entry.cname, Naming.args_cname,
max_positional_args, Naming.args_cname))
code.putln("if (unlikely(!%s)) {" % self.star_arg.entry.cname)
if self.starstar_arg:
code.put_decref_clear(self.starstar_arg.entry.cname, py_object_type)
code.put_finish_refcount_context()
code.putln('return %s;' % self.error_value())
code.putln('}')
code.put_gotref(self.star_arg.entry.cname)
code.putln('} else {')
code.put("%s = %s; " % (self.star_arg.entry.cname, Naming.empty_tuple))
code.put_incref(Naming.empty_tuple, py_object_type)
code.putln('}')
def generate_argument_values_setup_code(self, args, code):
max_args = len(args)
# the 'values' array collects borrowed references to arguments
# before doing any type coercion etc.
code.putln("PyObject* values[%d] = {%s};" % (
max_args, ','.join('0'*max_args)))
if self.target.defaults_struct:
code.putln('%s *%s = __Pyx_CyFunction_Defaults(%s, %s);' % (
self.target.defaults_struct, Naming.dynamic_args_cname,
self.target.defaults_struct, Naming.self_cname))
# assign borrowed Python default values to the values array,
# so that they can be overwritten by received arguments below
for i, arg in enumerate(args):
if arg.default and arg.type.is_pyobject:
default_value = arg.calculate_default_value_code(code)
code.putln('values[%d] = %s;' % (i, arg.type.as_pyobject(default_value)))
def generate_keyword_unpacking_code(self, min_positional_args, max_positional_args,
has_fixed_positional_count, has_kw_only_args,
all_args, argtuple_error_label, code):
code.putln('Py_ssize_t kw_args;')
code.putln('const Py_ssize_t pos_args = PyTuple_GET_SIZE(%s);' % Naming.args_cname)
# copy the values from the args tuple and check that it's not too long
code.putln('switch (pos_args) {')
if self.star_arg:
code.putln('default:')
for i in range(max_positional_args-1, -1, -1):
code.put('case %2d: ' % (i+1))
code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (
i, Naming.args_cname, i))
code.putln('case 0: break;')
if not self.star_arg:
code.put('default: ') # more arguments than allowed
code.put_goto(argtuple_error_label)
code.putln('}')
# The code above is very often (but not always) the same as
# the optimised non-kwargs tuple unpacking code, so we keep
# the code block above at the very top, before the following
# 'external' PyDict_Size() call, to make it easy for the C
# compiler to merge the two separate tuple unpacking
# implementations into one when they turn out to be identical.
# If we received kwargs, fill up the positional/required
# arguments with values from the kw dict
code.putln('kw_args = PyDict_Size(%s);' % Naming.kwds_cname)
if self.num_required_args or max_positional_args > 0:
last_required_arg = -1
for i, arg in enumerate(all_args):
if not arg.default:
last_required_arg = i
if last_required_arg < max_positional_args:
last_required_arg = max_positional_args-1
if max_positional_args > 0:
code.putln('switch (pos_args) {')
for i, arg in enumerate(all_args[:last_required_arg+1]):
if max_positional_args > 0 and i <= max_positional_args:
if self.star_arg and i == max_positional_args:
code.putln('default:')
else:
code.putln('case %2d:' % i)
pystring_cname = code.intern_identifier(arg.name)
if arg.default:
if arg.kw_only:
# optional kw-only args are handled separately below
continue
code.putln('if (kw_args > 0) {')
# don't overwrite default argument
code.putln('PyObject* value = PyDict_GetItem(%s, %s);' % (
Naming.kwds_cname, pystring_cname))
code.putln('if (value) { values[%d] = value; kw_args--; }' % i)
code.putln('}')
else:
code.putln('if (likely((values[%d] = PyDict_GetItem(%s, %s)) != 0)) kw_args--;' % (
i, Naming.kwds_cname, pystring_cname))
if i < min_positional_args:
if i == 0:
# special case: we know arg 0 is missing
code.put('else ')
code.put_goto(argtuple_error_label)
else:
# print the correct number of values (args or
# kwargs) that were passed into positional
# arguments up to this point
code.putln('else {')
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
code.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, %d); ' % (
self.name, has_fixed_positional_count,
min_positional_args, max_positional_args, i))
code.putln(code.error_goto(self.pos))
code.putln('}')
elif arg.kw_only:
code.putln('else {')
code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' %(
self.name, pystring_cname))
code.putln(code.error_goto(self.pos))
code.putln('}')
if max_positional_args > 0:
code.putln('}')
if has_kw_only_args:
# unpack optional keyword-only arguments separately because
# checking for interned strings in a dict is faster than iterating
self.generate_optional_kwonly_args_unpacking_code(all_args, code)
code.putln('if (unlikely(kw_args > 0)) {')
# non-positional/-required kw args left in dict: default args,
# kw-only args, **kwargs or error
#
# This is sort of a catch-all: except for checking required
# arguments, this will always do the right thing for unpacking
# keyword arguments, so that we can concentrate on optimising
# common cases above.
if max_positional_args == 0:
pos_arg_count = "0"
elif self.star_arg:
code.putln("const Py_ssize_t used_pos_args = (pos_args < %d) ? pos_args : %d;" % (
max_positional_args, max_positional_args))
pos_arg_count = "used_pos_args"
else:
pos_arg_count = "pos_args"
code.globalstate.use_utility_code(
UtilityCode.load_cached("ParseKeywords", "FunctionArguments.c"))
code.putln(
'if (unlikely(__Pyx_ParseOptionalKeywords(%s, %s, %s, values, %s, "%s") < 0)) %s' % (
Naming.kwds_cname,
Naming.pykwdlist_cname,
self.starstar_arg and self.starstar_arg.entry.cname or '0',
pos_arg_count,
self.name,
code.error_goto(self.pos)))
code.putln('}')
def generate_optional_kwonly_args_unpacking_code(self, all_args, code):
optional_args = []
first_optional_arg = -1
for i, arg in enumerate(all_args):
if not arg.kw_only or not arg.default:
continue
if not optional_args:
first_optional_arg = i
optional_args.append(arg.name)
if optional_args:
if len(optional_args) > 1:
# if we receive more than the named kwargs, we either have **kwargs
# (in which case we must iterate anyway) or it's an error (which we
# also handle during iteration) => skip this part if there are more
code.putln('if (kw_args > 0 && %s(kw_args <= %d)) {' % (
not self.starstar_arg and 'likely' or '',
len(optional_args)))
code.putln('Py_ssize_t index;')
# not unrolling the loop here reduces the C code overhead
code.putln('for (index = %d; index < %d && kw_args > 0; index++) {' % (
first_optional_arg, first_optional_arg + len(optional_args)))
else:
code.putln('if (kw_args == 1) {')
code.putln('const Py_ssize_t index = %d;' % first_optional_arg)
code.putln('PyObject* value = PyDict_GetItem(%s, *%s[index]);' % (
Naming.kwds_cname, Naming.pykwdlist_cname))
code.putln('if (value) { values[index] = value; kw_args--; }')
if len(optional_args) > 1:
code.putln('}')
code.putln('}')
def generate_argument_conversion_code(self, code):
# Generate code to convert arguments from signature type to
# declared type, if needed. Also copies signature arguments
# into closure fields.
for arg in self.args:
if arg.needs_conversion:
self.generate_arg_conversion(arg, code)
def generate_arg_conversion(self, arg, code):
# Generate conversion code for one argument.
old_type = arg.hdr_type
new_type = arg.type
if old_type.is_pyobject:
if arg.default:
code.putln("if (%s) {" % arg.hdr_cname)
else:
code.putln("assert(%s); {" % arg.hdr_cname)
self.generate_arg_conversion_from_pyobject(arg, code)
code.putln("}")
elif new_type.is_pyobject:
self.generate_arg_conversion_to_pyobject(arg, code)
else:
if new_type.assignable_from(old_type):
code.putln(
"%s = %s;" % (arg.entry.cname, arg.hdr_cname))
else:
error(arg.pos,
"Cannot convert 1 argument from '%s' to '%s'" %
(old_type, new_type))
def generate_arg_conversion_from_pyobject(self, arg, code):
new_type = arg.type
func = new_type.from_py_function
# copied from CoerceFromPyTypeNode
if func:
lhs = arg.entry.cname
rhs = "%s(%s)" % (func, arg.hdr_cname)
if new_type.is_enum:
rhs = PyrexTypes.typecast(new_type, PyrexTypes.c_long_type, rhs)
code.putln("%s = %s; %s" % (
lhs,
rhs,
code.error_goto_if(new_type.error_condition(arg.entry.cname), arg.pos)))
else:
error(arg.pos,
"Cannot convert Python object argument to type '%s'"
% new_type)
def generate_arg_conversion_to_pyobject(self, arg, code):
old_type = arg.hdr_type
func = old_type.to_py_function
if func:
code.putln("%s = %s(%s); %s" % (
arg.entry.cname,
func,
arg.hdr_cname,
code.error_goto_if_null(arg.entry.cname, arg.pos)))
code.put_var_gotref(arg.entry)
else:
error(arg.pos,
"Cannot convert argument of type '%s' to Python object"
% old_type)
def generate_argument_type_tests(self, code):
# Generate type tests for args whose signature
# type is PyObject * and whose declared type is
# a subtype thereof.
for arg in self.args:
if arg.needs_type_test:
self.generate_arg_type_test(arg, code)
elif not arg.accept_none and (arg.type.is_pyobject or
arg.type.is_buffer or
arg.type.is_memoryviewslice):
self.generate_arg_none_check(arg, code)
def error_value(self):
return self.signature.error_value
class GeneratorDefNode(DefNode):
# Generator function node that creates a new generator instance when called.
#
# gbody GeneratorBodyDefNode the function implementing the generator
#
is_generator = True
needs_closure = True
child_attrs = DefNode.child_attrs + ["gbody"]
def __init__(self, **kwargs):
# XXX: don't actually needs a body
kwargs['body'] = StatListNode(kwargs['pos'], stats=[])
super(GeneratorDefNode, self).__init__(**kwargs)
def analyse_declarations(self, env):
super(GeneratorDefNode, self).analyse_declarations(env)
self.gbody.local_scope = self.local_scope
self.gbody.analyse_declarations(env)
def generate_function_body(self, env, code):
body_cname = self.gbody.entry.func_cname
code.putln('{')
code.putln('__pyx_GeneratorObject *gen = __Pyx_Generator_New('
'(__pyx_generator_body_t) %s, (PyObject *) %s); %s' % (
body_cname, Naming.cur_scope_cname,
code.error_goto_if_null('gen', self.pos)))
code.put_decref(Naming.cur_scope_cname, py_object_type)
if self.requires_classobj:
classobj_cname = 'gen->classobj'
code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
classobj_cname, Naming.self_cname))
code.put_incref(classobj_cname, py_object_type)
code.put_giveref(classobj_cname)
code.put_finish_refcount_context()
code.putln('return (PyObject *) gen;')
code.putln('}')
def generate_function_definitions(self, env, code):
env.use_utility_code(UtilityCode.load_cached("Generator", "Generator.c"))
self.gbody.generate_function_header(code, proto=True)
super(GeneratorDefNode, self).generate_function_definitions(env, code)
self.gbody.generate_function_definitions(env, code)
class GeneratorBodyDefNode(DefNode):
# Main code body of a generator implemented as a DefNode.
#
is_generator_body = True
def __init__(self, pos=None, name=None, body=None):
super(GeneratorBodyDefNode, self).__init__(
pos=pos, body=body, name=name, doc=None,
args=[], star_arg=None, starstar_arg=None)
def declare_generator_body(self, env):
prefix = env.next_id(env.scope_prefix)
name = env.next_id('generator')
cname = Naming.genbody_prefix + prefix + name
entry = env.declare_var(None, py_object_type, self.pos,
cname=cname, visibility='private')
entry.func_cname = cname
entry.qualified_name = EncodedString(self.name)
self.entry = entry
def analyse_declarations(self, env):
self.analyse_argument_types(env)
self.declare_generator_body(env)
def generate_function_header(self, code, proto=False):
header = "static PyObject *%s(__pyx_GeneratorObject *%s, PyObject *%s)" % (
self.entry.func_cname,
Naming.generator_cname,
Naming.sent_value_cname)
if proto:
code.putln('%s; /* proto */' % header)
else:
code.putln('%s /* generator body */\n{' % header)
def generate_function_definitions(self, env, code):
lenv = self.local_scope
# Generate closure function definitions
self.body.generate_function_definitions(lenv, code)
# Generate C code for header and body of function
code.enter_cfunc_scope()
code.return_from_error_cleanup_label = code.new_label()
# ----- Top-level constants used by this function
code.mark_pos(self.pos)
self.generate_cached_builtins_decls(lenv, code)
# ----- Function header
code.putln("")
self.generate_function_header(code)
closure_init_code = code.insertion_point()
# ----- Local variables
code.putln("PyObject *%s = NULL;" % Naming.retval_cname)
tempvardecl_code = code.insertion_point()
code.put_declare_refcount_context()
code.put_setup_refcount_context(self.entry.name)
# ----- Resume switch point.
code.funcstate.init_closure_temps(lenv.scope_class.type.scope)
resume_code = code.insertion_point()
first_run_label = code.new_label('first_run')
code.use_label(first_run_label)
code.put_label(first_run_label)
code.putln('%s' %
(code.error_goto_if_null(Naming.sent_value_cname, self.pos)))
# ----- Function body
self.generate_function_body(env, code)
# ----- Closure initialization
if lenv.scope_class.type.scope.entries:
closure_init_code.putln('%s = %s;' % (
lenv.scope_class.type.declaration_code(Naming.cur_scope_cname),
lenv.scope_class.type.cast_code('%s->closure' %
Naming.generator_cname)))
code.mark_pos(self.pos)
code.putln("")
code.putln("/* function exit code */")
# on normal generator termination, we do not take the exception propagation
# path: no traceback info is required and not creating it is much faster
if not self.body.is_terminator:
code.putln('PyErr_SetNone(PyExc_StopIteration);')
# ----- Error cleanup
if code.error_label in code.labels_used:
if not self.body.is_terminator:
code.put_goto(code.return_label)
code.put_label(code.error_label)
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type)
code.put_add_traceback(self.entry.qualified_name)
# ----- Non-error return cleanup
code.put_label(code.return_label)
code.put_xdecref(Naming.retval_cname, py_object_type)
code.putln('%s->resume_label = -1;' % Naming.generator_cname)
# clean up as early as possible to help breaking any reference cycles
code.putln('__Pyx_Generator_clear((PyObject*)%s);' % Naming.generator_cname)
code.put_finish_refcount_context()
code.putln('return NULL;')
code.putln("}")
# ----- Go back and insert temp variable declarations
tempvardecl_code.put_temp_declarations(code.funcstate)
# ----- Generator resume code
resume_code.putln("switch (%s->resume_label) {" % (
Naming.generator_cname))
resume_code.putln("case 0: goto %s;" % first_run_label)
for i, label in code.yield_labels:
resume_code.putln("case %d: goto %s;" % (i, label))
resume_code.putln("default: /* CPython raises the right error here */")
resume_code.put_finish_refcount_context()
resume_code.putln("return NULL;")
resume_code.putln("}")
code.exit_cfunc_scope()
class OverrideCheckNode(StatNode):
# A Node for dispatching to the def method if it
# is overriden.
#
# py_func
#
# args
# func_temp
# body
child_attrs = ['body']
body = None
def analyse_expressions(self, env):
self.args = env.arg_entries
if self.py_func.is_module_scope:
first_arg = 0
else:
first_arg = 1
import ExprNodes
self.func_node = ExprNodes.RawCNameExprNode(self.pos, py_object_type)
call_node = ExprNodes.SimpleCallNode(
self.pos, function=self.func_node,
args=[ ExprNodes.NameNode(self.pos, name=arg.name)
for arg in self.args[first_arg:] ])
self.body = ReturnStatNode(self.pos, value=call_node)
self.body = self.body.analyse_expressions(env)
return self
def generate_execution_code(self, code):
interned_attr_cname = code.intern_identifier(self.py_func.entry.name)
# Check to see if we are an extension type
if self.py_func.is_module_scope:
self_arg = "((PyObject *)%s)" % Naming.module_cname
else:
self_arg = "((PyObject *)%s)" % self.args[0].cname
code.putln("/* Check if called by wrapper */")
code.putln("if (unlikely(%s)) ;" % Naming.skip_dispatch_cname)
code.putln("/* Check if overridden in Python */")
if self.py_func.is_module_scope:
code.putln("else {")
else:
code.putln("else if (unlikely(Py_TYPE(%s)->tp_dictoffset != 0)) {" % self_arg)
func_node_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
self.func_node.set_cname(func_node_temp)
# need to get attribute manually--scope would return cdef method
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c"))
err = code.error_goto_if_null(func_node_temp, self.pos)
code.putln("%s = __Pyx_PyObject_GetAttrStr(%s, %s); %s" % (
func_node_temp, self_arg, interned_attr_cname, err))
code.put_gotref(func_node_temp)
is_builtin_function_or_method = "PyCFunction_Check(%s)" % func_node_temp
is_overridden = "(PyCFunction_GET_FUNCTION(%s) != (PyCFunction)%s)" % (
func_node_temp, self.py_func.entry.func_cname)
code.putln("if (!%s || %s) {" % (is_builtin_function_or_method, is_overridden))
self.body.generate_execution_code(code)
code.putln("}")
code.put_decref_clear(func_node_temp, PyrexTypes.py_object_type)
code.funcstate.release_temp(func_node_temp)
code.putln("}")
class ClassDefNode(StatNode, BlockNode):
pass
class PyClassDefNode(ClassDefNode):
# A Python class definition.
#
# name EncodedString Name of the class
# doc string or None
# body StatNode Attribute definition code
# entry Symtab.Entry
# scope PyClassScope
# decorators [DecoratorNode] list of decorators or None
#
# The following subnodes are constructed internally:
#
# dict DictNode Class dictionary or Py3 namespace
# classobj ClassNode Class object
# target NameNode Variable to assign class object to
child_attrs = ["body", "dict", "metaclass", "mkw", "bases", "class_result",
"target", "class_cell", "decorators"]
decorators = None
class_result = None
is_py3_style_class = False # Python3 style class (kwargs)
metaclass = None
mkw = None
def __init__(self, pos, name, bases, doc, body, decorators=None,
keyword_args=None, starstar_arg=None, force_py3_semantics=False):
StatNode.__init__(self, pos)
self.name = name
self.doc = doc
self.body = body
self.decorators = decorators
self.bases = bases
import ExprNodes
if self.doc and Options.docstrings:
doc = embed_position(self.pos, self.doc)
doc_node = ExprNodes.StringNode(pos, value=doc)
else:
doc_node = None
allow_py2_metaclass = not force_py3_semantics
if keyword_args or starstar_arg:
allow_py2_metaclass = False
self.is_py3_style_class = True
if keyword_args and not starstar_arg:
for i, item in list(enumerate(keyword_args.key_value_pairs))[::-1]:
if item.key.value == 'metaclass':
if self.metaclass is not None:
error(item.pos, "keyword argument 'metaclass' passed multiple times")
# special case: we already know the metaclass,
# so we don't need to do the "build kwargs,
# find metaclass" dance at runtime
self.metaclass = item.value
del keyword_args.key_value_pairs[i]
if starstar_arg:
self.mkw = ExprNodes.KeywordArgsNode(
pos, keyword_args=keyword_args and keyword_args.key_value_pairs or [],
starstar_arg=starstar_arg)
elif keyword_args.key_value_pairs:
self.mkw = keyword_args
else:
assert self.metaclass is not None
if force_py3_semantics or self.bases or self.mkw or self.metaclass:
if self.metaclass is None:
if starstar_arg:
# **kwargs may contain 'metaclass' arg
mkdict = self.mkw
else:
mkdict = None
if (not mkdict and
self.bases.is_sequence_constructor and
not self.bases.args):
pass # no base classes => no inherited metaclass
else:
self.metaclass = ExprNodes.PyClassMetaclassNode(
pos, mkw=mkdict, bases=self.bases)
needs_metaclass_calculation = False
else:
needs_metaclass_calculation = True
self.dict = ExprNodes.PyClassNamespaceNode(
pos, name=name, doc=doc_node,
metaclass=self.metaclass, bases=self.bases, mkw=self.mkw)
self.classobj = ExprNodes.Py3ClassNode(
pos, name=name,
bases=self.bases, dict=self.dict, doc=doc_node,
metaclass=self.metaclass, mkw=self.mkw,
calculate_metaclass=needs_metaclass_calculation,
allow_py2_metaclass=allow_py2_metaclass)
else:
# no bases, no metaclass => old style class creation
self.dict = ExprNodes.DictNode(pos, key_value_pairs=[])
self.classobj = ExprNodes.ClassNode(
pos, name=name,
bases=bases, dict=self.dict, doc=doc_node)
self.target = ExprNodes.NameNode(pos, name=name)
self.class_cell = ExprNodes.ClassCellInjectorNode(self.pos)
def as_cclass(self):
"""
Return this node as if it were declared as an extension class
"""
if self.is_py3_style_class:
error(self.classobj.pos, "Python3 style class could not be represented as C class")
return
bases = self.classobj.bases.args
if len(bases) == 0:
base_class_name = None
base_class_module = None
elif len(bases) == 1:
base = bases[0]
path = []
from ExprNodes import AttributeNode, NameNode
while isinstance(base, AttributeNode):
path.insert(0, base.attribute)
base = base.obj
if isinstance(base, NameNode):
path.insert(0, base.name)
base_class_name = path[-1]
if len(path) > 1:
base_class_module = u'.'.join(path[:-1])
else:
base_class_module = None
else:
error(self.classobj.bases.args.pos, "Invalid base class")
else:
error(self.classobj.bases.args.pos, "C class may only have one base class")
return None
return CClassDefNode(self.pos,
visibility = 'private',
module_name = None,
class_name = self.name,
base_class_module = base_class_module,
base_class_name = base_class_name,
decorators = self.decorators,
body = self.body,
in_pxd = False,
doc = self.doc)
def create_scope(self, env):
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
cenv = self.scope = PyClassScope(name = self.name, outer_scope = genv)
return cenv
def analyse_declarations(self, env):
class_result = self.classobj
if self.decorators:
from ExprNodes import SimpleCallNode
for decorator in self.decorators[::-1]:
class_result = SimpleCallNode(
decorator.pos,
function = decorator.decorator,
args = [class_result])
self.decorators = None
self.class_result = class_result
self.class_result.analyse_declarations(env)
self.target.analyse_target_declaration(env)
cenv = self.create_scope(env)
cenv.directives = env.directives
cenv.class_obj_cname = self.target.entry.cname
self.body.analyse_declarations(cenv)
def analyse_expressions(self, env):
if self.bases:
self.bases = self.bases.analyse_expressions(env)
if self.metaclass:
self.metaclass = self.metaclass.analyse_expressions(env)
if self.mkw:
self.mkw = self.mkw.analyse_expressions(env)
self.dict = self.dict.analyse_expressions(env)
self.class_result = self.class_result.analyse_expressions(env)
genv = env.global_scope()
cenv = self.scope
self.body = self.body.analyse_expressions(cenv)
self.target.analyse_target_expression(env, self.classobj)
self.class_cell = self.class_cell.analyse_expressions(cenv)
return self
def generate_function_definitions(self, env, code):
self.generate_lambda_definitions(self.scope, code)
self.body.generate_function_definitions(self.scope, code)
def generate_execution_code(self, code):
code.pyclass_stack.append(self)
cenv = self.scope
if self.bases:
self.bases.generate_evaluation_code(code)
if self.mkw:
self.mkw.generate_evaluation_code(code)
if self.metaclass:
self.metaclass.generate_evaluation_code(code)
self.dict.generate_evaluation_code(code)
cenv.namespace_cname = cenv.class_obj_cname = self.dict.result()
self.class_cell.generate_evaluation_code(code)
self.body.generate_execution_code(code)
self.class_result.generate_evaluation_code(code)
self.class_cell.generate_injection_code(
code, self.class_result.result())
self.class_cell.generate_disposal_code(code)
cenv.namespace_cname = cenv.class_obj_cname = self.classobj.result()
self.target.generate_assignment_code(self.class_result, code)
self.dict.generate_disposal_code(code)
self.dict.free_temps(code)
if self.metaclass:
self.metaclass.generate_disposal_code(code)
self.metaclass.free_temps(code)
if self.mkw:
self.mkw.generate_disposal_code(code)
self.mkw.free_temps(code)
if self.bases:
self.bases.generate_disposal_code(code)
self.bases.free_temps(code)
code.pyclass_stack.pop()
class CClassDefNode(ClassDefNode):
# An extension type definition.
#
# visibility 'private' or 'public' or 'extern'
# typedef_flag boolean
# api boolean
# module_name string or None For import of extern type objects
# class_name string Unqualified name of class
# as_name string or None Name to declare as in this scope
# base_class_module string or None Module containing the base class
# base_class_name string or None Name of the base class
# objstruct_name string or None Specified C name of object struct
# typeobj_name string or None Specified C name of type object
# in_pxd boolean Is in a .pxd file
# decorators [DecoratorNode] list of decorators or None
# doc string or None
# body StatNode or None
# entry Symtab.Entry
# base_type PyExtensionType or None
# buffer_defaults_node DictNode or None Declares defaults for a buffer
# buffer_defaults_pos
child_attrs = ["body"]
buffer_defaults_node = None
buffer_defaults_pos = None
typedef_flag = False
api = False
objstruct_name = None
typeobj_name = None
decorators = None
shadow = False
def buffer_defaults(self, env):
if not hasattr(self, '_buffer_defaults'):
import Buffer
if self.buffer_defaults_node:
self._buffer_defaults = Buffer.analyse_buffer_options(
self.buffer_defaults_pos,
env, [], self.buffer_defaults_node,
need_complete=False)
else:
self._buffer_defaults = None
return self._buffer_defaults
def declare(self, env):
if self.module_name and self.visibility != 'extern':
module_path = self.module_name.split(".")
home_scope = env.find_imported_module(module_path, self.pos)
if not home_scope:
return None
else:
home_scope = env
self.entry = home_scope.declare_c_class(
name = self.class_name,
pos = self.pos,
defining = 0,
implementing = 0,
module_name = self.module_name,
base_type = None,
objstruct_cname = self.objstruct_name,
typeobj_cname = self.typeobj_name,
visibility = self.visibility,
typedef_flag = self.typedef_flag,
api = self.api,
buffer_defaults = self.buffer_defaults(env),
shadow = self.shadow)
def analyse_declarations(self, env):
#print "CClassDefNode.analyse_declarations:", self.class_name
#print "...visibility =", self.visibility
#print "...module_name =", self.module_name
if env.in_cinclude and not self.objstruct_name:
error(self.pos, "Object struct name specification required for "
"C class defined in 'extern from' block")
if self.decorators:
error(self.pos,
"Decorators not allowed on cdef classes (used on type '%s')" % self.class_name)
self.base_type = None
# Now that module imports are cached, we need to
# import the modules for extern classes.
if self.module_name:
self.module = None
for module in env.cimported_modules:
if module.name == self.module_name:
self.module = module
if self.module is None:
self.module = ModuleScope(self.module_name, None, env.context)
self.module.has_extern_class = 1
env.add_imported_module(self.module)
if self.base_class_name:
if self.base_class_module:
base_class_scope = env.find_module(self.base_class_module, self.pos)
else:
base_class_scope = env
if self.base_class_name == 'object':
# extension classes are special and don't need to inherit from object
if base_class_scope is None or base_class_scope.lookup('object') is None:
self.base_class_name = None
self.base_class_module = None
base_class_scope = None
if base_class_scope:
base_class_entry = base_class_scope.find(self.base_class_name, self.pos)
if base_class_entry:
if not base_class_entry.is_type:
error(self.pos, "'%s' is not a type name" % self.base_class_name)
elif not base_class_entry.type.is_extension_type and \
not (base_class_entry.type.is_builtin_type and
base_class_entry.type.objstruct_cname):
error(self.pos, "'%s' is not an extension type" % self.base_class_name)
elif not base_class_entry.type.is_complete():
error(self.pos, "Base class '%s' of type '%s' is incomplete" % (
self.base_class_name, self.class_name))
elif base_class_entry.type.scope and base_class_entry.type.scope.directives and \
base_class_entry.type.is_final_type:
error(self.pos, "Base class '%s' of type '%s' is final" % (
self.base_class_name, self.class_name))
elif base_class_entry.type.is_builtin_type and \
base_class_entry.type.name in ('tuple', 'str', 'bytes'):
error(self.pos, "inheritance from PyVarObject types like '%s' is not currently supported"
% base_class_entry.type.name)
else:
self.base_type = base_class_entry.type
if env.directives.get('freelist', 0) > 0:
warning(self.pos, "freelists cannot be used on subtypes, only the base class can manage them", 1)
has_body = self.body is not None
if has_body and self.base_type and not self.base_type.scope:
# To properly initialize inherited attributes, the base type must
# be analysed before this type.
self.base_type.defered_declarations.append(lambda : self.analyse_declarations(env))
return
if self.module_name and self.visibility != 'extern':
module_path = self.module_name.split(".")
home_scope = env.find_imported_module(module_path, self.pos)
if not home_scope:
return
else:
home_scope = env
if self.visibility == 'extern':
if (self.module_name == '__builtin__' and
self.class_name in Builtin.builtin_types and
env.qualified_name[:8] != 'cpython.'): # allow overloaded names for cimporting from cpython
warning(self.pos, "%s already a builtin Cython type" % self.class_name, 1)
self.entry = home_scope.declare_c_class(
name = self.class_name,
pos = self.pos,
defining = has_body and self.in_pxd,
implementing = has_body and not self.in_pxd,
module_name = self.module_name,
base_type = self.base_type,
objstruct_cname = self.objstruct_name,
typeobj_cname = self.typeobj_name,
visibility = self.visibility,
typedef_flag = self.typedef_flag,
api = self.api,
buffer_defaults = self.buffer_defaults(env),
shadow = self.shadow)
if self.shadow:
home_scope.lookup(self.class_name).as_variable = self.entry
if home_scope is not env and self.visibility == 'extern':
env.add_imported_entry(self.class_name, self.entry, self.pos)
self.scope = scope = self.entry.type.scope
if scope is not None:
scope.directives = env.directives
if self.doc and Options.docstrings:
scope.doc = embed_position(self.pos, self.doc)
if has_body:
self.body.analyse_declarations(scope)
if self.in_pxd:
scope.defined = 1
else:
scope.implemented = 1
env.allocate_vtable_names(self.entry)
for thunk in self.entry.type.defered_declarations:
thunk()
def analyse_expressions(self, env):
if self.body:
scope = self.entry.type.scope
self.body = self.body.analyse_expressions(scope)
return self
def generate_function_definitions(self, env, code):
if self.body:
self.generate_lambda_definitions(self.scope, code)
self.body.generate_function_definitions(self.scope, code)
def generate_execution_code(self, code):
# This is needed to generate evaluation code for
# default values of method arguments.
if self.body:
self.body.generate_execution_code(code)
def annotate(self, code):
if self.body:
self.body.annotate(code)
class PropertyNode(StatNode):
# Definition of a property in an extension type.
#
# name string
# doc EncodedString or None Doc string
# entry Symtab.Entry
# body StatListNode
child_attrs = ["body"]
def analyse_declarations(self, env):
self.entry = env.declare_property(self.name, self.doc, self.pos)
self.entry.scope.directives = env.directives
self.body.analyse_declarations(self.entry.scope)
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
def generate_execution_code(self, code):
pass
def annotate(self, code):
self.body.annotate(code)
class GlobalNode(StatNode):
# Global variable declaration.
#
# names [string]
child_attrs = []
def analyse_declarations(self, env):
for name in self.names:
env.declare_global(name, self.pos)
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class NonlocalNode(StatNode):
# Nonlocal variable declaration via the 'nonlocal' keyword.
#
# names [string]
child_attrs = []
def analyse_declarations(self, env):
for name in self.names:
env.declare_nonlocal(name, self.pos)
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class ExprStatNode(StatNode):
# Expression used as a statement.
#
# expr ExprNode
child_attrs = ["expr"]
def analyse_declarations(self, env):
import ExprNodes
if isinstance(self.expr, ExprNodes.GeneralCallNode):
func = self.expr.function.as_cython_attribute()
if func == u'declare':
args, kwds = self.expr.explicit_args_kwds()
if len(args):
error(self.expr.pos, "Variable names must be specified.")
for var, type_node in kwds.key_value_pairs:
type = type_node.analyse_as_type(env)
if type is None:
error(type_node.pos, "Unknown type")
else:
env.declare_var(var.value, type, var.pos, is_cdef = True)
self.__class__ = PassStatNode
def analyse_expressions(self, env):
self.expr.result_is_used = False # hint that .result() may safely be left empty
self.expr = self.expr.analyse_expressions(env)
return self
def nogil_check(self, env):
if self.expr.type.is_pyobject and self.expr.is_temp:
self.gil_error()
gil_message = "Discarding owned Python object"
def generate_execution_code(self, code):
self.expr.generate_evaluation_code(code)
if not self.expr.is_temp and self.expr.result():
code.putln("%s;" % self.expr.result())
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.expr.generate_function_definitions(env, code)
def annotate(self, code):
self.expr.annotate(code)
class AssignmentNode(StatNode):
# Abstract base class for assignment nodes.
#
# The analyse_expressions and generate_execution_code
# phases of assignments are split into two sub-phases
# each, to enable all the right hand sides of a
# parallel assignment to be evaluated before assigning
# to any of the left hand sides.
def analyse_expressions(self, env):
return self.analyse_types(env)
# def analyse_expressions(self, env):
# self.analyse_expressions_1(env)
# self.analyse_expressions_2(env)
def generate_execution_code(self, code):
self.generate_rhs_evaluation_code(code)
self.generate_assignment_code(code)
class SingleAssignmentNode(AssignmentNode):
# The simplest case:
#
# a = b
#
# lhs ExprNode Left hand side
# rhs ExprNode Right hand side
# first bool Is this guaranteed the first assignment to lhs?
child_attrs = ["lhs", "rhs"]
first = False
declaration_only = False
def analyse_declarations(self, env):
import ExprNodes
# handle declarations of the form x = cython.foo()
if isinstance(self.rhs, ExprNodes.CallNode):
func_name = self.rhs.function.as_cython_attribute()
if func_name:
args, kwds = self.rhs.explicit_args_kwds()
if func_name in ['declare', 'typedef']:
if len(args) > 2 or kwds is not None:
error(self.rhs.pos, "Can only declare one type at a time.")
return
type = args[0].analyse_as_type(env)
if type is None:
error(args[0].pos, "Unknown type")
return
lhs = self.lhs
if func_name == 'declare':
if isinstance(lhs, ExprNodes.NameNode):
vars = [(lhs.name, lhs.pos)]
elif isinstance(lhs, ExprNodes.TupleNode):
vars = [(var.name, var.pos) for var in lhs.args]
else:
error(lhs.pos, "Invalid declaration")
return
for var, pos in vars:
env.declare_var(var, type, pos, is_cdef = True)
if len(args) == 2:
# we have a value
self.rhs = args[1]
else:
self.declaration_only = True
else:
self.declaration_only = True
if not isinstance(lhs, ExprNodes.NameNode):
error(lhs.pos, "Invalid declaration.")
env.declare_typedef(lhs.name, type, self.pos, visibility='private')
elif func_name in ['struct', 'union']:
self.declaration_only = True
if len(args) > 0 or kwds is None:
error(self.rhs.pos, "Struct or union members must be given by name.")
return
members = []
for member, type_node in kwds.key_value_pairs:
type = type_node.analyse_as_type(env)
if type is None:
error(type_node.pos, "Unknown type")
else:
members.append((member.value, type, member.pos))
if len(members) < len(kwds.key_value_pairs):
return
if not isinstance(self.lhs, ExprNodes.NameNode):
error(self.lhs.pos, "Invalid declaration.")
name = self.lhs.name
scope = StructOrUnionScope(name)
env.declare_struct_or_union(name, func_name, scope, False, self.rhs.pos)
for member, type, pos in members:
scope.declare_var(member, type, pos)
elif func_name == 'fused_type':
# dtype = cython.fused_type(...)
self.declaration_only = True
if kwds:
error(self.rhs.function.pos,
"fused_type does not take keyword arguments")
fusednode = FusedTypeNode(self.rhs.pos,
name = self.lhs.name, types=args)
fusednode.analyse_declarations(env)
if self.declaration_only:
return
else:
self.lhs.analyse_target_declaration(env)
def analyse_types(self, env, use_temp = 0):
import ExprNodes
self.rhs = self.rhs.analyse_types(env)
self.lhs = self.lhs.analyse_target_types(env)
self.lhs.gil_assignment_check(env)
if self.lhs.memslice_broadcast or self.rhs.memslice_broadcast:
self.lhs.memslice_broadcast = True
self.rhs.memslice_broadcast = True
is_index_node = isinstance(self.lhs, ExprNodes.IndexNode)
if (is_index_node and not self.rhs.type.is_memoryviewslice and
(self.lhs.memslice_slice or self.lhs.is_memslice_copy) and
(self.lhs.type.dtype.assignable_from(self.rhs.type) or
self.rhs.type.is_pyobject)):
# scalar slice assignment
self.lhs.is_memslice_scalar_assignment = True
dtype = self.lhs.type.dtype
else:
dtype = self.lhs.type
rhs = self.rhs.coerce_to(dtype, env)
if use_temp or rhs.is_attribute or (
not rhs.is_name and not rhs.is_literal and
rhs.type.is_pyobject):
# things like (cdef) attribute access are not safe (traverses pointers)
rhs = rhs.coerce_to_temp(env)
elif rhs.type.is_pyobject:
rhs = rhs.coerce_to_simple(env)
self.rhs = rhs
return self
def generate_rhs_evaluation_code(self, code):
self.rhs.generate_evaluation_code(code)
def generate_assignment_code(self, code):
self.lhs.generate_assignment_code(self.rhs, code)
def generate_function_definitions(self, env, code):
self.rhs.generate_function_definitions(env, code)
def annotate(self, code):
self.lhs.annotate(code)
self.rhs.annotate(code)
class CascadedAssignmentNode(AssignmentNode):
# An assignment with multiple left hand sides:
#
# a = b = c
#
# lhs_list [ExprNode] Left hand sides
# rhs ExprNode Right hand sides
#
# Used internally:
#
# coerced_rhs_list [ExprNode] RHS coerced to type of each LHS
child_attrs = ["lhs_list", "rhs", "coerced_rhs_list"]
coerced_rhs_list = None
def analyse_declarations(self, env):
for lhs in self.lhs_list:
lhs.analyse_target_declaration(env)
def analyse_types(self, env, use_temp = 0):
from ExprNodes import CloneNode, ProxyNode
rhs = self.rhs.analyse_types(env)
if use_temp or rhs.is_attribute or (
not rhs.is_name and not rhs.is_literal and
rhs.type.is_pyobject):
rhs = rhs.coerce_to_temp(env)
else:
rhs = rhs.coerce_to_simple(env)
self.rhs = ProxyNode(rhs)
self.coerced_rhs_list = []
for lhs in self.lhs_list:
lhs.analyse_target_types(env)
lhs.gil_assignment_check(env)
rhs = CloneNode(self.rhs)
rhs = rhs.coerce_to(lhs.type, env)
self.coerced_rhs_list.append(rhs)
return self
def generate_rhs_evaluation_code(self, code):
self.rhs.generate_evaluation_code(code)
def generate_assignment_code(self, code):
for i in range(len(self.lhs_list)):
lhs = self.lhs_list[i]
rhs = self.coerced_rhs_list[i]
rhs.generate_evaluation_code(code)
lhs.generate_assignment_code(rhs, code)
# Assignment has disposed of the cloned RHS
self.rhs.generate_disposal_code(code)
self.rhs.free_temps(code)
def generate_function_definitions(self, env, code):
self.rhs.generate_function_definitions(env, code)
def annotate(self, code):
for i in range(len(self.lhs_list)):
self.lhs_list[i].annotate(code)
self.coerced_rhs_list[i].annotate(code)
self.rhs.annotate(code)
class ParallelAssignmentNode(AssignmentNode):
# A combined packing/unpacking assignment:
#
# a, b, c = d, e, f
#
# This has been rearranged by the parser into
#
# a = d ; b = e ; c = f
#
# but we must evaluate all the right hand sides
# before assigning to any of the left hand sides.
#
# stats [AssignmentNode] The constituent assignments
child_attrs = ["stats"]
def analyse_declarations(self, env):
for stat in self.stats:
stat.analyse_declarations(env)
def analyse_expressions(self, env):
self.stats = [ stat.analyse_types(env, use_temp = 1)
for stat in self.stats ]
return self
# def analyse_expressions(self, env):
# for stat in self.stats:
# stat.analyse_expressions_1(env, use_temp = 1)
# for stat in self.stats:
# stat.analyse_expressions_2(env)
def generate_execution_code(self, code):
for stat in self.stats:
stat.generate_rhs_evaluation_code(code)
for stat in self.stats:
stat.generate_assignment_code(code)
def generate_function_definitions(self, env, code):
for stat in self.stats:
stat.generate_function_definitions(env, code)
def annotate(self, code):
for stat in self.stats:
stat.annotate(code)
class InPlaceAssignmentNode(AssignmentNode):
# An in place arithmetic operand:
#
# a += b
# a -= b
# ...
#
# lhs ExprNode Left hand side
# rhs ExprNode Right hand side
# operator char one of "+-*/%^&|"
#
# This code is a bit tricky because in order to obey Python
# semantics the sub-expressions (e.g. indices) of the lhs must
# not be evaluated twice. So we must re-use the values calculated
# in evaluation phase for the assignment phase as well.
# Fortunately, the type of the lhs node is fairly constrained
# (it must be a NameNode, AttributeNode, or IndexNode).
child_attrs = ["lhs", "rhs"]
def analyse_declarations(self, env):
self.lhs.analyse_target_declaration(env)
def analyse_types(self, env):
self.rhs = self.rhs.analyse_types(env)
self.lhs = self.lhs.analyse_target_types(env)
# When assigning to a fully indexed buffer or memoryview, coerce the rhs
if (self.lhs.is_subscript and
(self.lhs.memslice_index or self.lhs.is_buffer_access)):
self.rhs = self.rhs.coerce_to(self.lhs.type, env)
elif self.lhs.type.is_string and self.operator in '+-':
# use pointer arithmetic for char* LHS instead of string concat
self.rhs = self.rhs.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
return self
def generate_execution_code(self, code):
self.rhs.generate_evaluation_code(code)
self.lhs.generate_subexpr_evaluation_code(code)
c_op = self.operator
if c_op == "//":
c_op = "/"
elif c_op == "**":
error(self.pos, "No C inplace power operator")
if self.lhs.is_subscript and self.lhs.is_buffer_access:
if self.lhs.type.is_pyobject:
error(self.pos, "In-place operators not allowed on object buffers in this release.")
if (c_op in ('/', '%') and self.lhs.type.is_int
and not code.globalstate.directives['cdivision']):
error(self.pos, "In-place non-c divide operators not allowed on int buffers.")
self.lhs.generate_buffer_setitem_code(self.rhs, code, c_op)
else:
# C++
# TODO: make sure overload is declared
code.putln("%s %s= %s;" % (self.lhs.result(), c_op, self.rhs.result()))
self.lhs.generate_subexpr_disposal_code(code)
self.lhs.free_subexpr_temps(code)
self.rhs.generate_disposal_code(code)
self.rhs.free_temps(code)
def annotate(self, code):
self.lhs.annotate(code)
self.rhs.annotate(code)
def create_binop_node(self):
import ExprNodes
return ExprNodes.binop_node(self.pos, self.operator, self.lhs, self.rhs)
class PrintStatNode(StatNode):
# print statement
#
# arg_tuple TupleNode
# stream ExprNode or None (stdout)
# append_newline boolean
child_attrs = ["arg_tuple", "stream"]
def analyse_expressions(self, env):
if self.stream:
stream = self.stream.analyse_expressions(env)
self.stream = stream.coerce_to_pyobject(env)
arg_tuple = self.arg_tuple.analyse_expressions(env)
self.arg_tuple = arg_tuple.coerce_to_pyobject(env)
env.use_utility_code(printing_utility_code)
if len(self.arg_tuple.args) == 1 and self.append_newline:
env.use_utility_code(printing_one_utility_code)
return self
nogil_check = Node.gil_error
gil_message = "Python print statement"
def generate_execution_code(self, code):
if self.stream:
self.stream.generate_evaluation_code(code)
stream_result = self.stream.py_result()
else:
stream_result = '0'
if len(self.arg_tuple.args) == 1 and self.append_newline:
arg = self.arg_tuple.args[0]
arg.generate_evaluation_code(code)
code.putln(
"if (__Pyx_PrintOne(%s, %s) < 0) %s" % (
stream_result,
arg.py_result(),
code.error_goto(self.pos)))
arg.generate_disposal_code(code)
arg.free_temps(code)
else:
self.arg_tuple.generate_evaluation_code(code)
code.putln(
"if (__Pyx_Print(%s, %s, %d) < 0) %s" % (
stream_result,
self.arg_tuple.py_result(),
self.append_newline,
code.error_goto(self.pos)))
self.arg_tuple.generate_disposal_code(code)
self.arg_tuple.free_temps(code)
if self.stream:
self.stream.generate_disposal_code(code)
self.stream.free_temps(code)
def generate_function_definitions(self, env, code):
if self.stream:
self.stream.generate_function_definitions(env, code)
self.arg_tuple.generate_function_definitions(env, code)
def annotate(self, code):
if self.stream:
self.stream.annotate(code)
self.arg_tuple.annotate(code)
class ExecStatNode(StatNode):
# exec statement
#
# args [ExprNode]
child_attrs = ["args"]
def analyse_expressions(self, env):
for i, arg in enumerate(self.args):
arg = arg.analyse_expressions(env)
arg = arg.coerce_to_pyobject(env)
self.args[i] = arg
env.use_utility_code(Builtin.pyexec_utility_code)
return self
nogil_check = Node.gil_error
gil_message = "Python exec statement"
def generate_execution_code(self, code):
args = []
for arg in self.args:
arg.generate_evaluation_code(code)
args.append( arg.py_result() )
args = tuple(args + ['0', '0'][:3-len(args)])
temp_result = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
code.putln("%s = __Pyx_PyExec3(%s, %s, %s);" % (
(temp_result,) + args))
for arg in self.args:
arg.generate_disposal_code(code)
arg.free_temps(code)
code.putln(
code.error_goto_if_null(temp_result, self.pos))
code.put_gotref(temp_result)
code.put_decref_clear(temp_result, py_object_type)
code.funcstate.release_temp(temp_result)
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
class DelStatNode(StatNode):
# del statement
#
# args [ExprNode]
child_attrs = ["args"]
ignore_nonexisting = False
def analyse_declarations(self, env):
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_expressions(self, env):
for i, arg in enumerate(self.args):
arg = self.args[i] = arg.analyse_target_expression(env, None)
if arg.type.is_pyobject or (arg.is_name and
arg.type.is_memoryviewslice):
if arg.is_name and arg.entry.is_cglobal:
error(arg.pos, "Deletion of global C variable")
elif arg.type.is_ptr and arg.type.base_type.is_cpp_class:
self.cpp_check(env)
elif arg.type.is_cpp_class:
error(arg.pos, "Deletion of non-heap C++ object")
elif arg.is_subscript and arg.base.type is Builtin.bytearray_type:
pass # del ba[i]
else:
error(arg.pos, "Deletion of non-Python, non-C++ object")
#arg.release_target_temp(env)
return self
def nogil_check(self, env):
for arg in self.args:
if arg.type.is_pyobject:
self.gil_error()
gil_message = "Deleting Python object"
def generate_execution_code(self, code):
for arg in self.args:
if (arg.type.is_pyobject or
arg.type.is_memoryviewslice or
arg.is_subscript and arg.base.type is Builtin.bytearray_type):
arg.generate_deletion_code(
code, ignore_nonexisting=self.ignore_nonexisting)
elif arg.type.is_ptr and arg.type.base_type.is_cpp_class:
arg.generate_result_code(code)
code.putln("delete %s;" % arg.result())
# else error reported earlier
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
class PassStatNode(StatNode):
# pass statement
child_attrs = []
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class IndirectionNode(StatListNode):
"""
This adds an indirection so that the node can be shared and a subtree can
be removed at any time by clearing self.stats.
"""
def __init__(self, stats):
super(IndirectionNode, self).__init__(stats[0].pos, stats=stats)
class BreakStatNode(StatNode):
child_attrs = []
is_terminator = True
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
if not code.break_label:
error(self.pos, "break statement not inside loop")
else:
code.put_goto(code.break_label)
class ContinueStatNode(StatNode):
child_attrs = []
is_terminator = True
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
if code.funcstate.in_try_finally:
error(self.pos, "continue statement inside try of try...finally")
elif not code.continue_label:
error(self.pos, "continue statement not inside loop")
else:
code.put_goto(code.continue_label)
class ReturnStatNode(StatNode):
# return statement
#
# value ExprNode or None
# return_type PyrexType
# in_generator return inside of generator => raise StopIteration
child_attrs = ["value"]
is_terminator = True
in_generator = False
# Whether we are in a parallel section
in_parallel = False
def analyse_expressions(self, env):
return_type = env.return_type
self.return_type = return_type
if not return_type:
error(self.pos, "Return not inside a function body")
return self
if self.value:
self.value = self.value.analyse_types(env)
if return_type.is_void or return_type.is_returncode:
error(self.value.pos,
"Return with value in void function")
else:
self.value = self.value.coerce_to(env.return_type, env)
else:
if (not return_type.is_void
and not return_type.is_pyobject
and not return_type.is_returncode):
error(self.pos, "Return value required")
return self
def nogil_check(self, env):
if self.return_type.is_pyobject:
self.gil_error()
gil_message = "Returning Python object"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
if not self.return_type:
# error reported earlier
return
if self.return_type.is_pyobject:
code.put_xdecref(Naming.retval_cname,
self.return_type)
if self.value:
self.value.generate_evaluation_code(code)
if self.return_type.is_memoryviewslice:
import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=Naming.retval_cname,
lhs_type=self.return_type,
lhs_pos=self.value.pos,
rhs=self.value,
code=code,
have_gil=self.in_nogil_context)
elif self.in_generator:
# return value == raise StopIteration(value), but uncatchable
code.putln(
"%s = NULL; PyErr_SetObject(PyExc_StopIteration, %s);" % (
Naming.retval_cname,
self.value.result_as(self.return_type)))
self.value.generate_disposal_code(code)
else:
self.value.make_owned_reference(code)
code.putln(
"%s = %s;" % (
Naming.retval_cname,
self.value.result_as(self.return_type)))
self.value.generate_post_assignment_code(code)
self.value.free_temps(code)
else:
if self.return_type.is_pyobject:
code.put_init_to_py_none(Naming.retval_cname, self.return_type)
elif self.return_type.is_returncode:
self.put_return(code, self.return_type.default_value)
for cname, type in code.funcstate.temps_holding_reference():
code.put_decref_clear(cname, type)
code.put_goto(code.return_label)
def put_return(self, code, value):
if self.in_parallel:
code.putln_openmp("#pragma omp critical(__pyx_returning)")
code.putln("%s = %s;" % (Naming.retval_cname, value))
def generate_function_definitions(self, env, code):
if self.value is not None:
self.value.generate_function_definitions(env, code)
def annotate(self, code):
if self.value:
self.value.annotate(code)
class RaiseStatNode(StatNode):
# raise statement
#
# exc_type ExprNode or None
# exc_value ExprNode or None
# exc_tb ExprNode or None
# cause ExprNode or None
child_attrs = ["exc_type", "exc_value", "exc_tb", "cause"]
is_terminator = True
def analyse_expressions(self, env):
if self.exc_type:
exc_type = self.exc_type.analyse_types(env)
self.exc_type = exc_type.coerce_to_pyobject(env)
if self.exc_value:
exc_value = self.exc_value.analyse_types(env)
self.exc_value = exc_value.coerce_to_pyobject(env)
if self.exc_tb:
exc_tb = self.exc_tb.analyse_types(env)
self.exc_tb = exc_tb.coerce_to_pyobject(env)
if self.cause:
cause = self.cause.analyse_types(env)
self.cause = cause.coerce_to_pyobject(env)
# special cases for builtin exceptions
self.builtin_exc_name = None
if self.exc_type and not self.exc_value and not self.exc_tb:
exc = self.exc_type
import ExprNodes
if (isinstance(exc, ExprNodes.SimpleCallNode) and
not (exc.args or (exc.arg_tuple is not None and
exc.arg_tuple.args))):
exc = exc.function # extract the exception type
if exc.is_name and exc.entry.is_builtin:
self.builtin_exc_name = exc.name
if self.builtin_exc_name == 'MemoryError':
self.exc_type = None # has a separate implementation
return self
nogil_check = Node.gil_error
gil_message = "Raising exception"
def generate_execution_code(self, code):
if self.builtin_exc_name == 'MemoryError':
code.putln('PyErr_NoMemory(); %s' % code.error_goto(self.pos))
return
if self.exc_type:
self.exc_type.generate_evaluation_code(code)
type_code = self.exc_type.py_result()
else:
type_code = "0"
if self.exc_value:
self.exc_value.generate_evaluation_code(code)
value_code = self.exc_value.py_result()
else:
value_code = "0"
if self.exc_tb:
self.exc_tb.generate_evaluation_code(code)
tb_code = self.exc_tb.py_result()
else:
tb_code = "0"
if self.cause:
self.cause.generate_evaluation_code(code)
cause_code = self.cause.py_result()
else:
cause_code = "0"
code.globalstate.use_utility_code(raise_utility_code)
code.putln(
"__Pyx_Raise(%s, %s, %s, %s);" % (
type_code,
value_code,
tb_code,
cause_code))
for obj in (self.exc_type, self.exc_value, self.exc_tb, self.cause):
if obj:
obj.generate_disposal_code(code)
obj.free_temps(code)
code.putln(
code.error_goto(self.pos))
def generate_function_definitions(self, env, code):
if self.exc_type is not None:
self.exc_type.generate_function_definitions(env, code)
if self.exc_value is not None:
self.exc_value.generate_function_definitions(env, code)
if self.exc_tb is not None:
self.exc_tb.generate_function_definitions(env, code)
if self.cause is not None:
self.cause.generate_function_definitions(env, code)
def annotate(self, code):
if self.exc_type:
self.exc_type.annotate(code)
if self.exc_value:
self.exc_value.annotate(code)
if self.exc_tb:
self.exc_tb.annotate(code)
if self.cause:
self.cause.annotate(code)
class ReraiseStatNode(StatNode):
child_attrs = []
is_terminator = True
def analyse_expressions(self, env):
return self
nogil_check = Node.gil_error
gil_message = "Raising exception"
def generate_execution_code(self, code):
vars = code.funcstate.exc_vars
if vars:
code.globalstate.use_utility_code(restore_exception_utility_code)
code.put_giveref(vars[0])
code.put_giveref(vars[1])
# fresh exceptions may not have a traceback yet (-> finally!)
code.put_xgiveref(vars[2])
code.putln("__Pyx_ErrRestore(%s, %s, %s);" % tuple(vars))
for varname in vars:
code.put("%s = 0; " % varname)
code.putln()
code.putln(code.error_goto(self.pos))
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ReRaiseException", "Exceptions.c"))
code.putln("__Pyx_ReraiseException(); %s" % code.error_goto(self.pos))
class AssertStatNode(StatNode):
# assert statement
#
# cond ExprNode
# value ExprNode or None
child_attrs = ["cond", "value"]
def analyse_expressions(self, env):
self.cond = self.cond.analyse_boolean_expression(env)
if self.value:
value = self.value.analyse_types(env)
if value.type is Builtin.tuple_type or not value.type.is_builtin_type:
# prevent tuple values from being interpreted as argument value tuples
from ExprNodes import TupleNode
value = TupleNode(value.pos, args=[value], slow=True)
self.value = value.analyse_types(env, skip_children=True)
else:
self.value = value.coerce_to_pyobject(env)
return self
nogil_check = Node.gil_error
gil_message = "Raising exception"
def generate_execution_code(self, code):
code.putln("#ifndef CYTHON_WITHOUT_ASSERTIONS")
code.putln("if (unlikely(!Py_OptimizeFlag)) {")
self.cond.generate_evaluation_code(code)
code.putln(
"if (unlikely(!%s)) {" %
self.cond.result())
if self.value:
self.value.generate_evaluation_code(code)
code.putln(
"PyErr_SetObject(PyExc_AssertionError, %s);" %
self.value.py_result())
self.value.generate_disposal_code(code)
self.value.free_temps(code)
else:
code.putln(
"PyErr_SetNone(PyExc_AssertionError);")
code.putln(
code.error_goto(self.pos))
code.putln(
"}")
self.cond.generate_disposal_code(code)
self.cond.free_temps(code)
code.putln(
"}")
code.putln("#endif")
def generate_function_definitions(self, env, code):
self.cond.generate_function_definitions(env, code)
if self.value is not None:
self.value.generate_function_definitions(env, code)
def annotate(self, code):
self.cond.annotate(code)
if self.value:
self.value.annotate(code)
class IfStatNode(StatNode):
# if statement
#
# if_clauses [IfClauseNode]
# else_clause StatNode or None
child_attrs = ["if_clauses", "else_clause"]
def analyse_declarations(self, env):
for if_clause in self.if_clauses:
if_clause.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
self.if_clauses = [ if_clause.analyse_expressions(env)
for if_clause in self.if_clauses ]
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
end_label = code.new_label()
for if_clause in self.if_clauses:
if_clause.generate_execution_code(code, end_label)
if self.else_clause:
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(end_label)
def generate_function_definitions(self, env, code):
for clause in self.if_clauses:
clause.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
for if_clause in self.if_clauses:
if_clause.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class IfClauseNode(Node):
# if or elif clause in an if statement
#
# condition ExprNode
# body StatNode
child_attrs = ["condition", "body"]
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.condition = \
self.condition.analyse_temp_boolean_expression(env)
self.body = self.body.analyse_expressions(env)
return self
def generate_execution_code(self, code, end_label):
self.condition.generate_evaluation_code(code)
code.putln(
"if (%s) {" %
self.condition.result())
self.condition.generate_disposal_code(code)
self.condition.free_temps(code)
self.body.generate_execution_code(code)
if not self.body.is_terminator:
code.put_goto(end_label)
code.putln("}")
def generate_function_definitions(self, env, code):
self.condition.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def annotate(self, code):
self.condition.annotate(code)
self.body.annotate(code)
class SwitchCaseNode(StatNode):
# Generated in the optimization of an if-elif-else node
#
# conditions [ExprNode]
# body StatNode
child_attrs = ['conditions', 'body']
def generate_execution_code(self, code):
for cond in self.conditions:
code.mark_pos(cond.pos)
cond.generate_evaluation_code(code)
code.putln("case %s:" % cond.result())
self.body.generate_execution_code(code)
code.putln("break;")
def generate_function_definitions(self, env, code):
for cond in self.conditions:
cond.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def annotate(self, code):
for cond in self.conditions:
cond.annotate(code)
self.body.annotate(code)
class SwitchStatNode(StatNode):
# Generated in the optimization of an if-elif-else node
#
# test ExprNode
# cases [SwitchCaseNode]
# else_clause StatNode or None
child_attrs = ['test', 'cases', 'else_clause']
def generate_execution_code(self, code):
self.test.generate_evaluation_code(code)
code.putln("switch (%s) {" % self.test.result())
for case in self.cases:
case.generate_execution_code(code)
if self.else_clause is not None:
code.putln("default:")
self.else_clause.generate_execution_code(code)
code.putln("break;")
else:
# Always generate a default clause to prevent C compiler warnings
# about unmatched enum values (it was not the user who decided to
# generate the switch statement, so shouldn't be bothered).
code.putln("default: break;")
code.putln("}")
def generate_function_definitions(self, env, code):
self.test.generate_function_definitions(env, code)
for case in self.cases:
case.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.test.annotate(code)
for case in self.cases:
case.annotate(code)
if self.else_clause is not None:
self.else_clause.annotate(code)
class LoopNode(object):
pass
class WhileStatNode(LoopNode, StatNode):
# while statement
#
# condition ExprNode
# body StatNode
# else_clause StatNode
child_attrs = ["condition", "body", "else_clause"]
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
if self.condition:
self.condition = self.condition.analyse_temp_boolean_expression(env)
self.body = self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
def generate_execution_code(self, code):
old_loop_labels = code.new_loop_labels()
code.putln(
"while (1) {")
if self.condition:
self.condition.generate_evaluation_code(code)
self.condition.generate_disposal_code(code)
code.putln(
"if (!%s) break;" %
self.condition.result())
self.condition.free_temps(code)
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
code.putln("}")
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
code.mark_pos(self.else_clause.pos)
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(break_label)
def generate_function_definitions(self, env, code):
if self.condition:
self.condition.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
if self.condition:
self.condition.annotate(code)
self.body.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class DictIterationNextNode(Node):
# Helper node for calling PyDict_Next() inside of a WhileStatNode
# and checking the dictionary size for changes. Created in
# Optimize.py.
child_attrs = ['dict_obj', 'expected_size', 'pos_index_var',
'coerced_key_var', 'coerced_value_var', 'coerced_tuple_var',
'key_target', 'value_target', 'tuple_target', 'is_dict_flag']
coerced_key_var = key_ref = None
coerced_value_var = value_ref = None
coerced_tuple_var = tuple_ref = None
def __init__(self, dict_obj, expected_size, pos_index_var,
key_target, value_target, tuple_target, is_dict_flag):
Node.__init__(
self, dict_obj.pos,
dict_obj = dict_obj,
expected_size = expected_size,
pos_index_var = pos_index_var,
key_target = key_target,
value_target = value_target,
tuple_target = tuple_target,
is_dict_flag = is_dict_flag,
is_temp = True,
type = PyrexTypes.c_bint_type)
def analyse_expressions(self, env):
import ExprNodes
self.dict_obj = self.dict_obj.analyse_types(env)
self.expected_size = self.expected_size.analyse_types(env)
if self.pos_index_var:
self.pos_index_var = self.pos_index_var.analyse_types(env)
if self.key_target:
self.key_target = self.key_target.analyse_target_types(env)
self.key_ref = ExprNodes.TempNode(self.key_target.pos, PyrexTypes.py_object_type)
self.coerced_key_var = self.key_ref.coerce_to(self.key_target.type, env)
if self.value_target:
self.value_target = self.value_target.analyse_target_types(env)
self.value_ref = ExprNodes.TempNode(self.value_target.pos, type=PyrexTypes.py_object_type)
self.coerced_value_var = self.value_ref.coerce_to(self.value_target.type, env)
if self.tuple_target:
self.tuple_target = self.tuple_target.analyse_target_types(env)
self.tuple_ref = ExprNodes.TempNode(self.tuple_target.pos, PyrexTypes.py_object_type)
self.coerced_tuple_var = self.tuple_ref.coerce_to(self.tuple_target.type, env)
self.is_dict_flag = self.is_dict_flag.analyse_types(env)
return self
def generate_function_definitions(self, env, code):
self.dict_obj.generate_function_definitions(env, code)
def generate_execution_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("dict_iter", "Optimize.c"))
self.dict_obj.generate_evaluation_code(code)
assignments = []
temp_addresses = []
for var, result, target in [(self.key_ref, self.coerced_key_var, self.key_target),
(self.value_ref, self.coerced_value_var, self.value_target),
(self.tuple_ref, self.coerced_tuple_var, self.tuple_target)]:
if target is None:
addr = 'NULL'
else:
assignments.append((var, result, target))
var.allocate(code)
addr = '&%s' % var.result()
temp_addresses.append(addr)
result_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
code.putln("%s = __Pyx_dict_iter_next(%s, %s, &%s, %s, %s, %s, %s);" % (
result_temp,
self.dict_obj.py_result(),
self.expected_size.result(),
self.pos_index_var.result(),
temp_addresses[0],
temp_addresses[1],
temp_addresses[2],
self.is_dict_flag.result()
))
code.putln("if (unlikely(%s == 0)) break;" % result_temp)
code.putln(code.error_goto_if("%s == -1" % result_temp, self.pos))
code.funcstate.release_temp(result_temp)
# evaluate all coercions before the assignments
for var, result, target in assignments:
code.put_gotref(var.result())
for var, result, target in assignments:
result.generate_evaluation_code(code)
for var, result, target in assignments:
target.generate_assignment_code(result, code)
var.release(code)
def ForStatNode(pos, **kw):
if 'iterator' in kw:
return ForInStatNode(pos, **kw)
else:
return ForFromStatNode(pos, **kw)
class ForInStatNode(LoopNode, StatNode):
# for statement
#
# target ExprNode
# iterator IteratorNode
# body StatNode
# else_clause StatNode
# item NextNode used internally
child_attrs = ["target", "iterator", "body", "else_clause"]
item = None
def analyse_declarations(self, env):
import ExprNodes
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
self.item = ExprNodes.NextNode(self.iterator)
def analyse_expressions(self, env):
self.target = self.target.analyse_target_types(env)
self.iterator = self.iterator.analyse_expressions(env)
import ExprNodes
self.item = ExprNodes.NextNode(self.iterator) # must rewrap after analysis
self.item = self.item.analyse_expressions(env)
if (self.iterator.type.is_ptr or self.iterator.type.is_array) and \
self.target.type.assignable_from(self.iterator.type):
# C array slice optimization.
pass
else:
self.item = self.item.coerce_to(self.target.type, env)
self.body = self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
def generate_execution_code(self, code):
old_loop_labels = code.new_loop_labels()
self.iterator.generate_evaluation_code(code)
code.putln("for (;;) {")
self.item.generate_evaluation_code(code)
self.target.generate_assignment_code(self.item, code)
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
code.putln("}")
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
# in nested loops, the 'else' block can contain a
# 'continue' statement for the outer loop, but we may need
# to generate cleanup code before taking that path, so we
# intercept it here
orig_continue_label = code.continue_label
code.continue_label = code.new_label('outer_continue')
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
if code.label_used(code.continue_label):
code.put_goto(break_label)
code.put_label(code.continue_label)
self.iterator.generate_disposal_code(code)
code.put_goto(orig_continue_label)
code.set_loop_labels(old_loop_labels)
if code.label_used(break_label):
code.put_label(break_label)
self.iterator.generate_disposal_code(code)
self.iterator.free_temps(code)
def generate_function_definitions(self, env, code):
self.target.generate_function_definitions(env, code)
self.iterator.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.target.annotate(code)
self.iterator.annotate(code)
self.body.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
self.item.annotate(code)
class ForFromStatNode(LoopNode, StatNode):
# for name from expr rel name rel expr
#
# target NameNode
# bound1 ExprNode
# relation1 string
# relation2 string
# bound2 ExprNode
# step ExprNode or None
# body StatNode
# else_clause StatNode or None
#
# Used internally:
#
# from_range bool
# is_py_target bool
# loopvar_node ExprNode (usually a NameNode or temp node)
# py_loopvar_node PyTempNode or None
child_attrs = ["target", "bound1", "bound2", "step", "body", "else_clause"]
is_py_target = False
loopvar_node = None
py_loopvar_node = None
from_range = False
gil_message = "For-loop using object bounds or target"
def nogil_check(self, env):
for x in (self.target, self.bound1, self.bound2):
if x.type.is_pyobject:
self.gil_error()
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
import ExprNodes
self.target = self.target.analyse_target_types(env)
self.bound1 = self.bound1.analyse_types(env)
self.bound2 = self.bound2.analyse_types(env)
if self.step is not None:
if isinstance(self.step, ExprNodes.UnaryMinusNode):
warning(self.step.pos, "Probable infinite loop in for-from-by statement. Consider switching the directions of the relations.", 2)
self.step = self.step.analyse_types(env)
if self.target.type.is_numeric:
loop_type = self.target.type
else:
loop_type = PyrexTypes.c_int_type
if not self.bound1.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound1.type)
if not self.bound2.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound2.type)
if self.step is not None and not self.step.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.step.type)
self.bound1 = self.bound1.coerce_to(loop_type, env)
self.bound2 = self.bound2.coerce_to(loop_type, env)
if not self.bound2.is_literal:
self.bound2 = self.bound2.coerce_to_temp(env)
if self.step is not None:
self.step = self.step.coerce_to(loop_type, env)
if not self.step.is_literal:
self.step = self.step.coerce_to_temp(env)
target_type = self.target.type
if not (target_type.is_pyobject or target_type.is_numeric):
error(self.target.pos,
"for-from loop variable must be c numeric type or Python object")
if target_type.is_numeric:
self.is_py_target = False
if isinstance(self.target, ExprNodes.IndexNode) and self.target.is_buffer_access:
raise error(self.pos, "Buffer indexing not allowed as for loop target.")
self.loopvar_node = self.target
self.py_loopvar_node = None
else:
self.is_py_target = True
c_loopvar_node = ExprNodes.TempNode(self.pos, loop_type, env)
self.loopvar_node = c_loopvar_node
self.py_loopvar_node = \
ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env)
self.body = self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
def generate_execution_code(self, code):
old_loop_labels = code.new_loop_labels()
from_range = self.from_range
self.bound1.generate_evaluation_code(code)
self.bound2.generate_evaluation_code(code)
offset, incop = self.relation_table[self.relation1]
if self.step is not None:
self.step.generate_evaluation_code(code)
step = self.step.result()
incop = "%s=%s" % (incop[0], step)
import ExprNodes
if isinstance(self.loopvar_node, ExprNodes.TempNode):
self.loopvar_node.allocate(code)
if isinstance(self.py_loopvar_node, ExprNodes.TempNode):
self.py_loopvar_node.allocate(code)
if from_range:
loopvar_name = code.funcstate.allocate_temp(self.target.type, False)
else:
loopvar_name = self.loopvar_node.result()
code.putln(
"for (%s = %s%s; %s %s %s; %s%s) {" % (
loopvar_name,
self.bound1.result(), offset,
loopvar_name, self.relation2, self.bound2.result(),
loopvar_name, incop))
if self.py_loopvar_node:
self.py_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(self.py_loopvar_node, code)
elif from_range:
code.putln("%s = %s;" % (
self.target.result(), loopvar_name))
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
if self.py_loopvar_node:
# This mess is to make for..from loops with python targets behave
# exactly like those with C targets with regards to re-assignment
# of the loop variable.
import ExprNodes
if self.target.entry.is_pyglobal:
# We know target is a NameNode, this is the only ugly case.
target_node = ExprNodes.PyTempNode(self.target.pos, None)
target_node.allocate(code)
interned_cname = code.intern_identifier(self.target.entry.name)
if self.target.entry.scope.is_module_scope:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
lookup_func = '__Pyx_GetModuleGlobalName(%s)'
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c"))
lookup_func = '__Pyx_GetNameInClass(%s, %%s)' % (
self.target.entry.scope.namespace_cname)
code.putln("%s = %s; %s" % (
target_node.result(),
lookup_func % interned_cname,
code.error_goto_if_null(target_node.result(), self.target.pos)))
code.put_gotref(target_node.result())
else:
target_node = self.target
from_py_node = ExprNodes.CoerceFromPyTypeNode(
self.loopvar_node.type, target_node, self.target.entry.scope)
from_py_node.temp_code = loopvar_name
from_py_node.generate_result_code(code)
if self.target.entry.is_pyglobal:
code.put_decref(target_node.result(), target_node.type)
target_node.release(code)
code.putln("}")
if self.py_loopvar_node:
# This is potentially wasteful, but we don't want the semantics to
# depend on whether or not the loop is a python type.
self.py_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(self.py_loopvar_node, code)
if from_range:
code.funcstate.release_temp(loopvar_name)
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(break_label)
self.bound1.generate_disposal_code(code)
self.bound1.free_temps(code)
self.bound2.generate_disposal_code(code)
self.bound2.free_temps(code)
if isinstance(self.loopvar_node, ExprNodes.TempNode):
self.loopvar_node.release(code)
if isinstance(self.py_loopvar_node, ExprNodes.TempNode):
self.py_loopvar_node.release(code)
if self.step is not None:
self.step.generate_disposal_code(code)
self.step.free_temps(code)
relation_table = {
# {relop : (initial offset, increment op)}
'<=': ("", "++"),
'<' : ("+1", "++"),
'>=': ("", "--"),
'>' : ("-1", "--")
}
def generate_function_definitions(self, env, code):
self.target.generate_function_definitions(env, code)
self.bound1.generate_function_definitions(env, code)
self.bound2.generate_function_definitions(env, code)
if self.step is not None:
self.step.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.target.annotate(code)
self.bound1.annotate(code)
self.bound2.annotate(code)
if self.step:
self.step.annotate(code)
self.body.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class WithStatNode(StatNode):
"""
Represents a Python with statement.
Implemented by the WithTransform as follows:
MGR = EXPR
EXIT = MGR.__exit__
VALUE = MGR.__enter__()
EXC = True
try:
try:
TARGET = VALUE # optional
BODY
except:
EXC = False
if not EXIT(*EXCINFO):
raise
finally:
if EXC:
EXIT(None, None, None)
MGR = EXIT = VALUE = None
"""
# manager The with statement manager object
# target ExprNode the target lhs of the __enter__() call
# body StatNode
# enter_call ExprNode the call to the __enter__() method
# exit_var String the cname of the __exit__() method reference
child_attrs = ["manager", "enter_call", "target", "body"]
enter_call = None
def analyse_declarations(self, env):
self.manager.analyse_declarations(env)
self.enter_call.analyse_declarations(env)
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.manager = self.manager.analyse_types(env)
self.enter_call = self.enter_call.analyse_types(env)
self.body = self.body.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
self.manager.generate_function_definitions(env, code)
self.enter_call.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def generate_execution_code(self, code):
code.putln("/*with:*/ {")
self.manager.generate_evaluation_code(code)
self.exit_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c"))
code.putln("%s = __Pyx_PyObject_LookupSpecial(%s, %s); %s" % (
self.exit_var,
self.manager.py_result(),
code.intern_identifier(EncodedString('__exit__')),
code.error_goto_if_null(self.exit_var, self.pos),
))
code.put_gotref(self.exit_var)
# need to free exit_var in the face of exceptions during setup
old_error_label = code.new_error_label()
intermediate_error_label = code.error_label
self.enter_call.generate_evaluation_code(code)
if not self.target:
self.enter_call.generate_disposal_code(code)
self.enter_call.free_temps(code)
else:
# Otherwise, the node will be cleaned up by the
# WithTargetAssignmentStatNode after assigning its result
# to the target of the 'with' statement.
pass
self.manager.generate_disposal_code(code)
self.manager.free_temps(code)
code.error_label = old_error_label
self.body.generate_execution_code(code)
if code.label_used(intermediate_error_label):
step_over_label = code.new_label()
code.put_goto(step_over_label)
code.put_label(intermediate_error_label)
code.put_decref_clear(self.exit_var, py_object_type)
code.put_goto(old_error_label)
code.put_label(step_over_label)
code.funcstate.release_temp(self.exit_var)
code.putln('}')
class WithTargetAssignmentStatNode(AssignmentNode):
# The target assignment of the 'with' statement value (return
# value of the __enter__() call).
#
# This is a special cased assignment that steals the RHS reference
# and frees its temp.
#
# lhs ExprNode the assignment target
# rhs CloneNode a (coerced) CloneNode for the orig_rhs (not owned by this node)
# orig_rhs ExprNode the original ExprNode of the rhs. this node will clean up the
# temps of the orig_rhs. basically, it takes ownership of the node
# when the WithStatNode is done with it.
child_attrs = ["lhs"]
def analyse_declarations(self, env):
self.lhs.analyse_target_declaration(env)
def analyse_expressions(self, env):
self.rhs = self.rhs.analyse_types(env)
self.lhs = self.lhs.analyse_target_types(env)
self.lhs.gil_assignment_check(env)
self.rhs = self.rhs.coerce_to(self.lhs.type, env)
return self
def generate_execution_code(self, code):
if self.orig_rhs.type.is_pyobject:
# make sure rhs gets freed on errors, see below
old_error_label = code.new_error_label()
intermediate_error_label = code.error_label
self.rhs.generate_evaluation_code(code)
self.lhs.generate_assignment_code(self.rhs, code)
if self.orig_rhs.type.is_pyobject:
self.orig_rhs.generate_disposal_code(code)
code.error_label = old_error_label
if code.label_used(intermediate_error_label):
step_over_label = code.new_label()
code.put_goto(step_over_label)
code.put_label(intermediate_error_label)
self.orig_rhs.generate_disposal_code(code)
code.put_goto(old_error_label)
code.put_label(step_over_label)
self.orig_rhs.free_temps(code)
def annotate(self, code):
self.lhs.annotate(code)
self.rhs.annotate(code)
class TryExceptStatNode(StatNode):
# try .. except statement
#
# body StatNode
# except_clauses [ExceptClauseNode]
# else_clause StatNode or None
child_attrs = ["body", "except_clauses", "else_clause"]
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
for except_clause in self.except_clauses:
except_clause.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(env)
default_clause_seen = 0
for i, except_clause in enumerate(self.except_clauses):
except_clause = self.except_clauses[i] = except_clause.analyse_expressions(env)
if default_clause_seen:
error(except_clause.pos, "default 'except:' must be last")
if not except_clause.pattern:
default_clause_seen = 1
self.has_default_clause = default_clause_seen
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
nogil_check = Node.gil_error
gil_message = "Try-except statement"
def generate_execution_code(self, code):
old_return_label = code.return_label
old_break_label = code.break_label
old_continue_label = code.continue_label
old_error_label = code.new_error_label()
our_error_label = code.error_label
except_end_label = code.new_label('exception_handled')
except_error_label = code.new_label('except_error')
except_return_label = code.new_label('except_return')
try_return_label = code.new_label('try_return')
try_break_label = code.new_label('try_break')
try_continue_label = code.new_label('try_continue')
try_end_label = code.new_label('try_end')
exc_save_vars = [code.funcstate.allocate_temp(py_object_type, False)
for _ in xrange(3)]
code.putln("{")
save_exc = code.insertion_point()
code.putln(
"/*try:*/ {")
code.return_label = try_return_label
code.break_label = try_break_label
code.continue_label = try_continue_label
self.body.generate_execution_code(code)
code.putln(
"}")
temps_to_clean_up = code.funcstate.all_free_managed_temps()
can_raise = code.label_used(our_error_label)
if can_raise:
# inject code before the try block to save away the exception state
code.globalstate.use_utility_code(reset_exception_utility_code)
save_exc.putln("__Pyx_ExceptionSave(%s);" %
', '.join(['&%s' % var for var in exc_save_vars]))
for var in exc_save_vars:
save_exc.put_xgotref(var)
def restore_saved_exception():
for name in exc_save_vars:
code.put_xgiveref(name)
code.putln("__Pyx_ExceptionReset(%s);" %
', '.join(exc_save_vars))
else:
# try block cannot raise exceptions, but we had to allocate the temps above,
# so just keep the C compiler from complaining about them being unused
save_exc.putln("if (%s); else {/*mark used*/};" % '||'.join(exc_save_vars))
def restore_saved_exception():
pass
code.error_label = except_error_label
code.return_label = except_return_label
if self.else_clause:
code.putln(
"/*else:*/ {")
self.else_clause.generate_execution_code(code)
code.putln(
"}")
if can_raise:
for var in exc_save_vars:
code.put_xdecref_clear(var, py_object_type)
code.put_goto(try_end_label)
code.put_label(our_error_label)
for temp_name, temp_type in temps_to_clean_up:
code.put_xdecref_clear(temp_name, temp_type)
for except_clause in self.except_clauses:
except_clause.generate_handling_code(code, except_end_label)
if not self.has_default_clause:
code.put_goto(except_error_label)
for exit_label, old_label in [(except_error_label, old_error_label),
(try_break_label, old_break_label),
(try_continue_label, old_continue_label),
(try_return_label, old_return_label),
(except_return_label, old_return_label)]:
if code.label_used(exit_label):
if not code.label_used(try_end_label):
code.put_goto(try_end_label)
code.put_label(exit_label)
restore_saved_exception()
code.put_goto(old_label)
if code.label_used(except_end_label):
if not code.label_used(try_end_label):
code.put_goto(try_end_label)
code.put_label(except_end_label)
restore_saved_exception()
if code.label_used(try_end_label):
code.put_label(try_end_label)
code.putln("}")
for cname in exc_save_vars:
code.funcstate.release_temp(cname)
code.return_label = old_return_label
code.break_label = old_break_label
code.continue_label = old_continue_label
code.error_label = old_error_label
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
for except_clause in self.except_clauses:
except_clause.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.body.annotate(code)
for except_node in self.except_clauses:
except_node.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class ExceptClauseNode(Node):
# Part of try ... except statement.
#
# pattern [ExprNode]
# target ExprNode or None
# body StatNode
# excinfo_target TupleNode(3*ResultRefNode) or None optional target for exception info (not owned here!)
# match_flag string result of exception match
# exc_value ExcValueNode used internally
# function_name string qualified name of enclosing function
# exc_vars (string * 3) local exception variables
# is_except_as bool Py3-style "except ... as xyz"
# excinfo_target is never set by the parser, but can be set by a transform
# in order to extract more extensive information about the exception as a
# sys.exc_info()-style tuple into a target variable
child_attrs = ["pattern", "target", "body", "exc_value"]
exc_value = None
excinfo_target = None
is_except_as = False
def analyse_declarations(self, env):
if self.target:
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.function_name = env.qualified_name
if self.pattern:
# normalise/unpack self.pattern into a list
for i, pattern in enumerate(self.pattern):
pattern = pattern.analyse_expressions(env)
self.pattern[i] = pattern.coerce_to_pyobject(env)
if self.target:
import ExprNodes
self.exc_value = ExprNodes.ExcValueNode(self.pos)
self.target = self.target.analyse_target_expression(env, self.exc_value)
self.body = self.body.analyse_expressions(env)
return self
def generate_handling_code(self, code, end_label):
code.mark_pos(self.pos)
if self.pattern:
exc_tests = []
for pattern in self.pattern:
pattern.generate_evaluation_code(code)
exc_tests.append("PyErr_ExceptionMatches(%s)" % pattern.py_result())
match_flag = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
code.putln(
"%s = %s;" % (match_flag, ' || '.join(exc_tests)))
for pattern in self.pattern:
pattern.generate_disposal_code(code)
pattern.free_temps(code)
code.putln(
"if (%s) {" %
match_flag)
code.funcstate.release_temp(match_flag)
else:
code.putln("/*except:*/ {")
if (not getattr(self.body, 'stats', True)
and self.excinfo_target is None
and self.target is None):
# most simple case: no exception variable, empty body (pass)
# => reset the exception state, done
code.putln("PyErr_Restore(0,0,0);")
code.put_goto(end_label)
code.putln("}")
return
exc_vars = [code.funcstate.allocate_temp(py_object_type,
manage_ref=True)
for _ in xrange(3)]
code.put_add_traceback(self.function_name)
# We always have to fetch the exception value even if
# there is no target, because this also normalises the
# exception and stores it in the thread state.
code.globalstate.use_utility_code(get_exception_utility_code)
exc_args = "&%s, &%s, &%s" % tuple(exc_vars)
code.putln("if (__Pyx_GetException(%s) < 0) %s" % (exc_args,
code.error_goto(self.pos)))
for x in exc_vars:
code.put_gotref(x)
if self.target:
self.exc_value.set_var(exc_vars[1])
self.exc_value.generate_evaluation_code(code)
self.target.generate_assignment_code(self.exc_value, code)
if self.excinfo_target is not None:
for tempvar, node in zip(exc_vars, self.excinfo_target.args):
node.set_var(tempvar)
old_break_label, old_continue_label = code.break_label, code.continue_label
code.break_label = code.new_label('except_break')
code.continue_label = code.new_label('except_continue')
old_exc_vars = code.funcstate.exc_vars
code.funcstate.exc_vars = exc_vars
self.body.generate_execution_code(code)
code.funcstate.exc_vars = old_exc_vars
for var in exc_vars:
code.put_decref_clear(var, py_object_type)
code.put_goto(end_label)
for new_label, old_label in [(code.break_label, old_break_label),
(code.continue_label, old_continue_label)]:
if code.label_used(new_label):
code.put_label(new_label)
for var in exc_vars:
code.put_decref_clear(var, py_object_type)
code.put_goto(old_label)
code.break_label = old_break_label
code.continue_label = old_continue_label
for temp in exc_vars:
code.funcstate.release_temp(temp)
code.putln(
"}")
def generate_function_definitions(self, env, code):
if self.target is not None:
self.target.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def annotate(self, code):
if self.pattern:
for pattern in self.pattern:
pattern.annotate(code)
if self.target:
self.target.annotate(code)
self.body.annotate(code)
class TryFinallyStatNode(StatNode):
# try ... finally statement
#
# body StatNode
# finally_clause StatNode
#
# The plan is that we funnel all continue, break
# return and error gotos into the beginning of the
# finally block, setting a variable to remember which
# one we're doing. At the end of the finally block, we
# switch on the variable to figure out where to go.
# In addition, if we're doing an error, we save the
# exception on entry to the finally block and restore
# it on exit.
child_attrs = ["body", "finally_clause"]
preserve_exception = 1
# handle exception case, in addition to return/break/continue
handle_error_case = True
func_return_type = None
disallow_continue_in_try_finally = 0
# There doesn't seem to be any point in disallowing
# continue in the try block, since we have no problem
# handling it.
is_try_finally_in_nogil = False
def create_analysed(pos, env, body, finally_clause):
node = TryFinallyStatNode(pos, body=body, finally_clause=finally_clause)
return node
create_analysed = staticmethod(create_analysed)
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
self.finally_clause.analyse_declarations(env)
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(env)
self.finally_clause = self.finally_clause.analyse_expressions(env)
if env.return_type and not env.return_type.is_void:
self.func_return_type = env.return_type
return self
nogil_check = Node.gil_error
gil_message = "Try-finally statement"
def generate_execution_code(self, code):
old_error_label = code.error_label
old_labels = code.all_new_labels()
new_labels = code.get_all_labels()
new_error_label = code.error_label
if not self.handle_error_case:
code.error_label = old_error_label
catch_label = code.new_label()
code.putln("/*try:*/ {")
if self.disallow_continue_in_try_finally:
was_in_try_finally = code.funcstate.in_try_finally
code.funcstate.in_try_finally = 1
self.body.generate_execution_code(code)
if self.disallow_continue_in_try_finally:
code.funcstate.in_try_finally = was_in_try_finally
code.putln("}")
code.set_all_labels(old_labels)
temps_to_clean_up = code.funcstate.all_free_managed_temps()
code.mark_pos(self.finally_clause.pos)
code.putln("/*finally:*/ {")
def fresh_finally_clause(_next=[self.finally_clause]):
# generate the original subtree once and always keep a fresh copy
node = _next[0]
node_copy = copy.deepcopy(node)
if node is self.finally_clause:
_next[0] = node_copy
else:
node = node_copy
return node
preserve_error = self.preserve_exception and code.label_used(new_error_label)
needs_success_cleanup = not self.finally_clause.is_terminator
if not self.body.is_terminator:
code.putln('/*normal exit:*/{')
fresh_finally_clause().generate_execution_code(code)
if not self.finally_clause.is_terminator:
code.put_goto(catch_label)
code.putln('}')
if preserve_error:
code.putln('/*exception exit:*/{')
if self.is_try_finally_in_nogil:
code.declare_gilstate()
if needs_success_cleanup:
exc_lineno_cnames = tuple([
code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
for _ in range(2)])
exc_filename_cname = code.funcstate.allocate_temp(
PyrexTypes.CPtrType(PyrexTypes.c_const_type(PyrexTypes.c_char_type)),
manage_ref=False)
else:
exc_lineno_cnames = exc_filename_cname = None
exc_vars = tuple([
code.funcstate.allocate_temp(py_object_type, manage_ref=False)
for _ in range(6)])
code.put_label(new_error_label)
self.put_error_catcher(
code, temps_to_clean_up, exc_vars, exc_lineno_cnames, exc_filename_cname)
finally_old_labels = code.all_new_labels()
code.putln('{')
old_exc_vars = code.funcstate.exc_vars
code.funcstate.exc_vars = exc_vars[:3]
fresh_finally_clause().generate_execution_code(code)
code.funcstate.exc_vars = old_exc_vars
code.putln('}')
if needs_success_cleanup:
self.put_error_uncatcher(code, exc_vars, exc_lineno_cnames, exc_filename_cname)
if exc_lineno_cnames:
for cname in exc_lineno_cnames:
code.funcstate.release_temp(cname)
if exc_filename_cname:
code.funcstate.release_temp(exc_filename_cname)
code.put_goto(old_error_label)
for new_label, old_label in zip(code.get_all_labels(), finally_old_labels):
if not code.label_used(new_label):
continue
code.put_label(new_label)
self.put_error_cleaner(code, exc_vars)
code.put_goto(old_label)
for cname in exc_vars:
code.funcstate.release_temp(cname)
code.putln('}')
code.set_all_labels(old_labels)
return_label = code.return_label
for i, (new_label, old_label) in enumerate(zip(new_labels, old_labels)):
if not code.label_used(new_label):
continue
if new_label == new_error_label and preserve_error:
continue # handled above
code.put('%s: ' % new_label)
code.putln('{')
ret_temp = None
if old_label == return_label and not self.finally_clause.is_terminator:
# store away return value for later reuse
if (self.func_return_type and
not self.is_try_finally_in_nogil and
not isinstance(self.finally_clause, GILExitNode)):
ret_temp = code.funcstate.allocate_temp(
self.func_return_type, manage_ref=False)
code.putln("%s = %s;" % (ret_temp, Naming.retval_cname))
if self.func_return_type.is_pyobject:
code.putln("%s = 0;" % Naming.retval_cname)
fresh_finally_clause().generate_execution_code(code)
if ret_temp:
code.putln("%s = %s;" % (Naming.retval_cname, ret_temp))
if self.func_return_type.is_pyobject:
code.putln("%s = 0;" % ret_temp)
code.funcstate.release_temp(ret_temp)
ret_temp = None
if not self.finally_clause.is_terminator:
code.put_goto(old_label)
code.putln('}')
# End finally
code.put_label(catch_label)
code.putln(
"}")
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
self.finally_clause.generate_function_definitions(env, code)
def put_error_catcher(self, code, temps_to_clean_up, exc_vars,
exc_lineno_cnames, exc_filename_cname):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.globalstate.use_utility_code(get_exception_utility_code)
code.globalstate.use_utility_code(swap_exception_utility_code)
code.putln(' '.join(["%s = 0;"]*len(exc_vars)) % exc_vars)
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
for temp_name, type in temps_to_clean_up:
code.put_xdecref_clear(temp_name, type)
# not using preprocessor here to avoid warnings about
# unused utility functions and/or temps
code.putln("if (PY_MAJOR_VERSION >= 3)"
" __Pyx_ExceptionSwap(&%s, &%s, &%s);" % exc_vars[3:])
code.putln("if ((PY_MAJOR_VERSION < 3) ||"
# if __Pyx_GetException() fails in Py3,
# store the newly raised exception instead
" unlikely(__Pyx_GetException(&%s, &%s, &%s) < 0)) "
"__Pyx_ErrFetch(&%s, &%s, &%s);" % (exc_vars[:3] * 2))
for var in exc_vars:
code.put_xgotref(var)
if exc_lineno_cnames:
code.putln("%s = %s; %s = %s; %s = %s;" % (
exc_lineno_cnames[0], Naming.lineno_cname,
exc_lineno_cnames[1], Naming.clineno_cname,
exc_filename_cname, Naming.filename_cname))
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
def put_error_uncatcher(self, code, exc_vars, exc_lineno_cnames, exc_filename_cname):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.globalstate.use_utility_code(reset_exception_utility_code)
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
# not using preprocessor here to avoid warnings about
# unused utility functions and/or temps
code.putln("if (PY_MAJOR_VERSION >= 3) {")
for var in exc_vars[3:]:
code.put_xgiveref(var)
code.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars[3:])
code.putln("}")
for var in exc_vars[:3]:
code.put_xgiveref(var)
code.putln("__Pyx_ErrRestore(%s, %s, %s);" % exc_vars[:3])
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
code.putln(' '.join(["%s = 0;"]*len(exc_vars)) % exc_vars)
if exc_lineno_cnames:
code.putln("%s = %s; %s = %s; %s = %s;" % (
Naming.lineno_cname, exc_lineno_cnames[0],
Naming.clineno_cname, exc_lineno_cnames[1],
Naming.filename_cname, exc_filename_cname))
def put_error_cleaner(self, code, exc_vars):
code.globalstate.use_utility_code(reset_exception_utility_code)
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
# not using preprocessor here to avoid warnings about
# unused utility functions and/or temps
code.putln("if (PY_MAJOR_VERSION >= 3) {")
for var in exc_vars[3:]:
code.put_xgiveref(var)
code.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars[3:])
code.putln("}")
for var in exc_vars[:3]:
code.put_xdecref_clear(var, py_object_type)
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
code.putln(' '.join(["%s = 0;"]*3) % exc_vars[3:])
def annotate(self, code):
self.body.annotate(code)
self.finally_clause.annotate(code)
class NogilTryFinallyStatNode(TryFinallyStatNode):
"""
A try/finally statement that may be used in nogil code sections.
"""
preserve_exception = False
nogil_check = None
class GILStatNode(NogilTryFinallyStatNode):
# 'with gil' or 'with nogil' statement
#
# state string 'gil' or 'nogil'
state_temp = None
def __init__(self, pos, state, body):
self.state = state
self.create_state_temp_if_needed(pos, state, body)
TryFinallyStatNode.__init__(self, pos,
body=body,
finally_clause=GILExitNode(
pos, state=state, state_temp=self.state_temp))
def create_state_temp_if_needed(self, pos, state, body):
from ParseTreeTransforms import YieldNodeCollector
collector = YieldNodeCollector()
collector.visitchildren(body)
if not collector.yields:
return
if state == 'gil':
temp_type = PyrexTypes.c_gilstate_type
else:
temp_type = PyrexTypes.c_threadstate_ptr_type
import ExprNodes
self.state_temp = ExprNodes.TempNode(pos, temp_type)
def analyse_declarations(self, env):
env._in_with_gil_block = (self.state == 'gil')
if self.state == 'gil':
env.has_with_gil_block = True
return super(GILStatNode, self).analyse_declarations(env)
def analyse_expressions(self, env):
env.use_utility_code(
UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c"))
was_nogil = env.nogil
env.nogil = self.state == 'nogil'
node = TryFinallyStatNode.analyse_expressions(self, env)
env.nogil = was_nogil
return node
def generate_execution_code(self, code):
code.mark_pos(self.pos)
code.begin_block()
if self.state_temp:
self.state_temp.allocate(code)
variable = self.state_temp.result()
else:
variable = None
old_trace_config = code.funcstate.can_trace
if self.state == 'gil':
code.put_ensure_gil(variable=variable)
# FIXME: not that easy, tracing may not be possible at all here
#code.funcstate.can_trace = True
else:
code.put_release_gil(variable=variable)
code.funcstate.can_trace = False
TryFinallyStatNode.generate_execution_code(self, code)
if self.state_temp:
self.state_temp.release(code)
code.funcstate.can_trace = old_trace_config
code.end_block()
class GILExitNode(StatNode):
"""
Used as the 'finally' block in a GILStatNode
state string 'gil' or 'nogil'
"""
child_attrs = []
state_temp = None
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
if self.state_temp:
variable = self.state_temp.result()
else:
variable = None
if self.state == 'gil':
code.put_release_ensured_gil(variable)
else:
code.put_acquire_gil(variable)
class EnsureGILNode(GILExitNode):
"""
Ensure the GIL in nogil functions for cleanup before returning.
"""
def generate_execution_code(self, code):
code.put_ensure_gil(declare_gilstate=False)
utility_code_for_cimports = {
# utility code (or inlining c) in a pxd (or pyx) file.
# TODO: Consider a generic user-level mechanism for importing
'cpython.array' : ("ArrayAPI", "arrayarray.h"),
'cpython.array.array' : ("ArrayAPI", "arrayarray.h"),
}
class CImportStatNode(StatNode):
# cimport statement
#
# module_name string Qualified name of module being imported
# as_name string or None Name specified in "as" clause, if any
child_attrs = []
def analyse_declarations(self, env):
if not env.is_module_scope:
error(self.pos, "cimport only allowed at module level")
return
module_scope = env.find_module(self.module_name, self.pos)
if "." in self.module_name:
names = [EncodedString(name) for name in self.module_name.split(".")]
top_name = names[0]
top_module_scope = env.context.find_submodule(top_name)
module_scope = top_module_scope
for name in names[1:]:
submodule_scope = module_scope.find_submodule(name)
module_scope.declare_module(name, submodule_scope, self.pos)
module_scope = submodule_scope
if self.as_name:
env.declare_module(self.as_name, module_scope, self.pos)
else:
env.add_imported_module(module_scope)
env.declare_module(top_name, top_module_scope, self.pos)
else:
name = self.as_name or self.module_name
env.declare_module(name, module_scope, self.pos)
if self.module_name in utility_code_for_cimports:
env.use_utility_code(UtilityCode.load_cached(
*utility_code_for_cimports[self.module_name]))
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class FromCImportStatNode(StatNode):
# from ... cimport statement
#
# module_name string Qualified name of module
# imported_names [(pos, name, as_name, kind)] Names to be imported
child_attrs = []
def analyse_declarations(self, env):
if not env.is_module_scope:
error(self.pos, "cimport only allowed at module level")
return
module_scope = env.find_module(self.module_name, self.pos)
env.add_imported_module(module_scope)
for pos, name, as_name, kind in self.imported_names:
if name == "*":
for local_name, entry in module_scope.entries.items():
env.add_imported_entry(local_name, entry, pos)
else:
entry = module_scope.lookup(name)
if entry:
if kind and not self.declaration_matches(entry, kind):
entry.redeclared(pos)
entry.used = 1
else:
if kind == 'struct' or kind == 'union':
entry = module_scope.declare_struct_or_union(name,
kind = kind, scope = None, typedef_flag = 0, pos = pos)
elif kind == 'class':
entry = module_scope.declare_c_class(name, pos = pos,
module_name = self.module_name)
else:
submodule_scope = env.context.find_module(name, relative_to = module_scope, pos = self.pos)
if submodule_scope.parent_module is module_scope:
env.declare_module(as_name or name, submodule_scope, self.pos)
else:
error(pos, "Name '%s' not declared in module '%s'"
% (name, self.module_name))
if entry:
local_name = as_name or name
env.add_imported_entry(local_name, entry, pos)
if self.module_name.startswith('cpython'): # enough for now
if self.module_name in utility_code_for_cimports:
env.use_utility_code(UtilityCode.load_cached(
*utility_code_for_cimports[self.module_name]))
for _, name, _, _ in self.imported_names:
fqname = '%s.%s' % (self.module_name, name)
if fqname in utility_code_for_cimports:
env.use_utility_code(UtilityCode.load_cached(
*utility_code_for_cimports[fqname]))
def declaration_matches(self, entry, kind):
if not entry.is_type:
return 0
type = entry.type
if kind == 'class':
if not type.is_extension_type:
return 0
else:
if not type.is_struct_or_union:
return 0
if kind != type.kind:
return 0
return 1
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class FromImportStatNode(StatNode):
# from ... import statement
#
# module ImportNode
# items [(string, NameNode)]
# interned_items [(string, NameNode, ExprNode)]
# item PyTempNode used internally
# import_star boolean used internally
child_attrs = ["module"]
import_star = 0
def analyse_declarations(self, env):
for name, target in self.items:
if name == "*":
if not env.is_module_scope:
error(self.pos, "import * only allowed at module level")
return
env.has_import_star = 1
self.import_star = 1
else:
target.analyse_target_declaration(env)
def analyse_expressions(self, env):
import ExprNodes
self.module = self.module.analyse_expressions(env)
self.item = ExprNodes.RawCNameExprNode(self.pos, py_object_type)
self.interned_items = []
for name, target in self.items:
if name == '*':
for _, entry in env.entries.items():
if not entry.is_type and entry.type.is_extension_type:
env.use_utility_code(UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c"))
break
else:
entry = env.lookup(target.name)
# check whether or not entry is already cimported
if (entry.is_type and entry.type.name == name
and hasattr(entry.type, 'module_name')):
if entry.type.module_name == self.module.module_name.value:
# cimported with absolute name
continue
try:
# cimported with relative name
module = env.find_module(self.module.module_name.value,
pos=None)
if entry.type.module_name == module.qualified_name:
continue
except AttributeError:
pass
target = target.analyse_target_expression(env, None) # FIXME?
if target.type is py_object_type:
coerced_item = None
else:
coerced_item = self.item.coerce_to(target.type, env)
self.interned_items.append((name, target, coerced_item))
return self
def generate_execution_code(self, code):
self.module.generate_evaluation_code(code)
if self.import_star:
code.putln(
'if (%s(%s) < 0) %s;' % (
Naming.import_star,
self.module.py_result(),
code.error_goto(self.pos)))
item_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
self.item.set_cname(item_temp)
if self.interned_items:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ImportFrom", "ImportExport.c"))
for name, target, coerced_item in self.interned_items:
code.putln(
'%s = __Pyx_ImportFrom(%s, %s); %s' % (
item_temp,
self.module.py_result(),
code.intern_identifier(name),
code.error_goto_if_null(item_temp, self.pos)))
code.put_gotref(item_temp)
if coerced_item is None:
target.generate_assignment_code(self.item, code)
else:
coerced_item.allocate_temp_result(code)
coerced_item.generate_result_code(code)
target.generate_assignment_code(coerced_item, code)
code.put_decref_clear(item_temp, py_object_type)
code.funcstate.release_temp(item_temp)
self.module.generate_disposal_code(code)
self.module.free_temps(code)
class ParallelNode(Node):
"""
Base class for cython.parallel constructs.
"""
nogil_check = None
class ParallelStatNode(StatNode, ParallelNode):
"""
Base class for 'with cython.parallel.parallel():' and 'for i in prange():'.
assignments { Entry(var) : (var.pos, inplace_operator_or_None) }
assignments to variables in this parallel section
parent parent ParallelStatNode or None
is_parallel indicates whether this node is OpenMP parallel
(true for #pragma omp parallel for and
#pragma omp parallel)
is_parallel is true for:
#pragma omp parallel
#pragma omp parallel for
sections, but NOT for
#pragma omp for
We need this to determine the sharing attributes.
privatization_insertion_point a code insertion point used to make temps
private (esp. the "nsteps" temp)
args tuple the arguments passed to the parallel construct
kwargs DictNode the keyword arguments passed to the parallel
construct (replaced by its compile time value)
"""
child_attrs = ['body', 'num_threads']
body = None
is_prange = False
is_nested_prange = False
error_label_used = False
num_threads = None
chunksize = None
parallel_exc = (
Naming.parallel_exc_type,
Naming.parallel_exc_value,
Naming.parallel_exc_tb,
)
parallel_pos_info = (
Naming.parallel_filename,
Naming.parallel_lineno,
Naming.parallel_clineno,
)
pos_info = (
Naming.filename_cname,
Naming.lineno_cname,
Naming.clineno_cname,
)
critical_section_counter = 0
def __init__(self, pos, **kwargs):
super(ParallelStatNode, self).__init__(pos, **kwargs)
# All assignments in this scope
self.assignments = kwargs.get('assignments') or {}
# All seen closure cnames and their temporary cnames
self.seen_closure_vars = set()
# Dict of variables that should be declared (first|last|)private or
# reduction { Entry: (op, lastprivate) }.
# If op is not None, it's a reduction.
self.privates = {}
# [NameNode]
self.assigned_nodes = []
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
self.num_threads = None
if self.kwargs:
# Try to find num_threads and chunksize keyword arguments
pairs = []
for dictitem in self.kwargs.key_value_pairs:
if dictitem.key.value == 'num_threads':
self.num_threads = dictitem.value
elif self.is_prange and dictitem.key.value == 'chunksize':
self.chunksize = dictitem.value
else:
pairs.append(dictitem)
self.kwargs.key_value_pairs = pairs
try:
self.kwargs = self.kwargs.compile_time_value(env)
except Exception, e:
error(self.kwargs.pos, "Only compile-time values may be "
"supplied as keyword arguments")
else:
self.kwargs = {}
for kw, val in self.kwargs.iteritems():
if kw not in self.valid_keyword_arguments:
error(self.pos, "Invalid keyword argument: %s" % kw)
else:
setattr(self, kw, val)
def analyse_expressions(self, env):
if self.num_threads:
self.num_threads = self.num_threads.analyse_expressions(env)
if self.chunksize:
self.chunksize = self.chunksize.analyse_expressions(env)
self.body = self.body.analyse_expressions(env)
self.analyse_sharing_attributes(env)
if self.num_threads is not None:
if (self.parent and self.parent.num_threads is not None and not
self.parent.is_prange):
error(self.pos,
"num_threads already declared in outer section")
elif self.parent and not self.parent.is_prange:
error(self.pos,
"num_threads must be declared in the parent parallel section")
elif (self.num_threads.type.is_int and
self.num_threads.is_literal and
self.num_threads.compile_time_value(env) <= 0):
error(self.pos,
"argument to num_threads must be greater than 0")
if not self.num_threads.is_simple():
self.num_threads = self.num_threads.coerce_to(
PyrexTypes.c_int_type, env).coerce_to_temp(env)
return self
def analyse_sharing_attributes(self, env):
"""
Analyse the privates for this block and set them in self.privates.
This should be called in a post-order fashion during the
analyse_expressions phase
"""
for entry, (pos, op) in self.assignments.iteritems():
if self.is_prange and not self.is_parallel:
# closely nested prange in a with parallel block, disallow
# assigning to privates in the with parallel block (we
# consider it too implicit and magicky for users)
if entry in self.parent.assignments:
error(pos,
"Cannot assign to private of outer parallel block")
continue
if not self.is_prange and op:
# Again possible, but considered to magicky
error(pos, "Reductions not allowed for parallel blocks")
continue
# By default all variables should have the same values as if
# executed sequentially
lastprivate = True
self.propagate_var_privatization(entry, pos, op, lastprivate)
def propagate_var_privatization(self, entry, pos, op, lastprivate):
"""
Propagate the sharing attributes of a variable. If the privatization is
determined by a parent scope, done propagate further.
If we are a prange, we propagate our sharing attributes outwards to
other pranges. If we are a prange in parallel block and the parallel
block does not determine the variable private, we propagate to the
parent of the parent. Recursion stops at parallel blocks, as they have
no concept of lastprivate or reduction.
So the following cases propagate:
sum is a reduction for all loops:
for i in prange(n):
for j in prange(n):
for k in prange(n):
sum += i * j * k
sum is a reduction for both loops, local_var is private to the
parallel with block:
for i in prange(n):
with parallel:
local_var = ... # private to the parallel
for j in prange(n):
sum += i * j
Nested with parallel blocks are disallowed, because they wouldn't
allow you to propagate lastprivates or reductions:
#pragma omp parallel for lastprivate(i)
for i in prange(n):
sum = 0
#pragma omp parallel private(j, sum)
with parallel:
#pragma omp parallel
with parallel:
#pragma omp for lastprivate(j) reduction(+:sum)
for j in prange(n):
sum += i
# sum and j are well-defined here
# sum and j are undefined here
# sum and j are undefined here
"""
self.privates[entry] = (op, lastprivate)
if entry.type.is_memoryviewslice:
error(pos, "Memoryview slices can only be shared in parallel sections")
return
if self.is_prange:
if not self.is_parallel and entry not in self.parent.assignments:
# Parent is a parallel with block
parent = self.parent.parent
else:
parent = self.parent
# We don't need to propagate privates, only reductions and
# lastprivates
if parent and (op or lastprivate):
parent.propagate_var_privatization(entry, pos, op, lastprivate)
def _allocate_closure_temp(self, code, entry):
"""
Helper function that allocate a temporary for a closure variable that
is assigned to.
"""
if self.parent:
return self.parent._allocate_closure_temp(code, entry)
if entry.cname in self.seen_closure_vars:
return entry.cname
cname = code.funcstate.allocate_temp(entry.type, True)
# Add both the actual cname and the temp cname, as the actual cname
# will be replaced with the temp cname on the entry
self.seen_closure_vars.add(entry.cname)
self.seen_closure_vars.add(cname)
self.modified_entries.append((entry, entry.cname))
code.putln("%s = %s;" % (cname, entry.cname))
entry.cname = cname
def initialize_privates_to_nan(self, code, exclude=None):
first = True
for entry, (op, lastprivate) in self.privates.iteritems():
if not op and (not exclude or entry != exclude):
invalid_value = entry.type.invalid_value()
if invalid_value:
if first:
code.putln("/* Initialize private variables to "
"invalid values */")
first = False
code.putln("%s = %s;" % (entry.cname,
entry.type.cast_code(invalid_value)))
def evaluate_before_block(self, code, expr):
c = self.begin_of_parallel_control_block_point_after_decls
# we need to set the owner to ourselves temporarily, as
# allocate_temp may generate a comment in the middle of our pragma
# otherwise when DebugFlags.debug_temp_code_comments is in effect
owner = c.funcstate.owner
c.funcstate.owner = c
expr.generate_evaluation_code(c)
c.funcstate.owner = owner
return expr.result()
def put_num_threads(self, code):
"""
Write self.num_threads if set as the num_threads OpenMP directive
"""
if self.num_threads is not None:
code.put(" num_threads(%s)" % self.evaluate_before_block(code,
self.num_threads))
def declare_closure_privates(self, code):
"""
If a variable is in a scope object, we need to allocate a temp and
assign the value from the temp to the variable in the scope object
after the parallel section. This kind of copying should be done only
in the outermost parallel section.
"""
self.modified_entries = []
for entry in self.assignments:
if entry.from_closure or entry.in_closure:
self._allocate_closure_temp(code, entry)
def release_closure_privates(self, code):
"""
Release any temps used for variables in scope objects. As this is the
outermost parallel block, we don't need to delete the cnames from
self.seen_closure_vars.
"""
for entry, original_cname in self.modified_entries:
code.putln("%s = %s;" % (original_cname, entry.cname))
code.funcstate.release_temp(entry.cname)
entry.cname = original_cname
def privatize_temps(self, code, exclude_temps=()):
"""
Make any used temporaries private. Before the relevant code block
code.start_collecting_temps() should have been called.
"""
if self.is_parallel:
c = self.privatization_insertion_point
self.temps = temps = code.funcstate.stop_collecting_temps()
privates, firstprivates = [], []
for temp, type in temps:
if type.is_pyobject or type.is_memoryviewslice:
firstprivates.append(temp)
else:
privates.append(temp)
if privates:
c.put(" private(%s)" % ", ".join(privates))
if firstprivates:
c.put(" firstprivate(%s)" % ", ".join(firstprivates))
if self.breaking_label_used:
shared_vars = [Naming.parallel_why]
if self.error_label_used:
shared_vars.extend(self.parallel_exc)
c.put(" private(%s, %s, %s)" % self.pos_info)
c.put(" shared(%s)" % ', '.join(shared_vars))
def cleanup_temps(self, code):
# Now clean up any memoryview slice and object temporaries
if self.is_parallel and not self.is_nested_prange:
code.putln("/* Clean up any temporaries */")
for temp, type in self.temps:
if type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(temp, have_gil=False)
elif type.is_pyobject:
code.put_xdecref(temp, type)
code.putln("%s = NULL;" % temp)
def setup_parallel_control_flow_block(self, code):
"""
Sets up a block that surrounds the parallel block to determine
how the parallel section was exited. Any kind of return is
trapped (break, continue, return, exceptions). This is the idea:
{
int why = 0;
#pragma omp parallel
{
return # -> goto new_return_label;
goto end_parallel;
new_return_label:
why = 3;
goto end_parallel;
end_parallel:;
#pragma omp flush(why) # we need to flush for every iteration
}
if (why == 3)
goto old_return_label;
}
"""
self.old_loop_labels = code.new_loop_labels()
self.old_error_label = code.new_error_label()
self.old_return_label = code.return_label
code.return_label = code.new_label(name="return")
code.begin_block() # parallel control flow block
self.begin_of_parallel_control_block_point = code.insertion_point()
self.begin_of_parallel_control_block_point_after_decls = code.insertion_point()
self.undef_builtin_expect_apple_gcc_bug(code)
def begin_parallel_block(self, code):
"""
Each OpenMP thread in a parallel section that contains a with gil block
must have the thread-state initialized. The call to
PyGILState_Release() then deallocates our threadstate. If we wouldn't
do this, each with gil block would allocate and deallocate one, thereby
losing exception information before it can be saved before leaving the
parallel section.
"""
self.begin_of_parallel_block = code.insertion_point()
def end_parallel_block(self, code):
"""
To ensure all OpenMP threads have thread states, we ensure the GIL
in each thread (which creates a thread state if it doesn't exist),
after which we release the GIL.
On exit, reacquire the GIL and release the thread state.
If compiled without OpenMP support (at the C level), then we still have
to acquire the GIL to decref any object temporaries.
"""
if self.error_label_used:
begin_code = self.begin_of_parallel_block
end_code = code
begin_code.putln("#ifdef _OPENMP")
begin_code.put_ensure_gil(declare_gilstate=True)
begin_code.putln("Py_BEGIN_ALLOW_THREADS")
begin_code.putln("#endif /* _OPENMP */")
end_code.putln("#ifdef _OPENMP")
end_code.putln("Py_END_ALLOW_THREADS")
end_code.putln("#else")
end_code.put_safe("{\n")
end_code.put_ensure_gil()
end_code.putln("#endif /* _OPENMP */")
self.cleanup_temps(end_code)
end_code.put_release_ensured_gil()
end_code.putln("#ifndef _OPENMP")
end_code.put_safe("}\n")
end_code.putln("#endif /* _OPENMP */")
def trap_parallel_exit(self, code, should_flush=False):
"""
Trap any kind of return inside a parallel construct. 'should_flush'
indicates whether the variable should be flushed, which is needed by
prange to skip the loop. It also indicates whether we need to register
a continue (we need this for parallel blocks, but not for prange
loops, as it is a direct jump there).
It uses the same mechanism as try/finally:
1 continue
2 break
3 return
4 error
"""
save_lastprivates_label = code.new_label()
dont_return_label = code.new_label()
self.any_label_used = False
self.breaking_label_used = False
self.error_label_used = False
self.parallel_private_temps = []
all_labels = code.get_all_labels()
# Figure this out before starting to generate any code
for label in all_labels:
if code.label_used(label):
self.breaking_label_used = (self.breaking_label_used or
label != code.continue_label)
self.any_label_used = True
if self.any_label_used:
code.put_goto(dont_return_label)
for i, label in enumerate(all_labels):
if not code.label_used(label):
continue
is_continue_label = label == code.continue_label
code.put_label(label)
if not (should_flush and is_continue_label):
if label == code.error_label:
self.error_label_used = True
self.fetch_parallel_exception(code)
code.putln("%s = %d;" % (Naming.parallel_why, i + 1))
if (self.breaking_label_used and self.is_prange and not
is_continue_label):
code.put_goto(save_lastprivates_label)
else:
code.put_goto(dont_return_label)
if self.any_label_used:
if self.is_prange and self.breaking_label_used:
# Don't rely on lastprivate, save our lastprivates
code.put_label(save_lastprivates_label)
self.save_parallel_vars(code)
code.put_label(dont_return_label)
if should_flush and self.breaking_label_used:
code.putln_openmp("#pragma omp flush(%s)" % Naming.parallel_why)
def save_parallel_vars(self, code):
"""
The following shenanigans are instated when we break, return or
propagate errors from a prange. In this case we cannot rely on
lastprivate() to do its job, as no iterations may have executed yet
in the last thread, leaving the values undefined. It is most likely
that the breaking thread has well-defined values of the lastprivate
variables, so we keep those values.
"""
section_name = ("__pyx_parallel_lastprivates%d" %
self.critical_section_counter)
code.putln_openmp("#pragma omp critical(%s)" % section_name)
ParallelStatNode.critical_section_counter += 1
code.begin_block() # begin critical section
c = self.begin_of_parallel_control_block_point
temp_count = 0
for entry, (op, lastprivate) in self.privates.iteritems():
if not lastprivate or entry.type.is_pyobject:
continue
type_decl = entry.type.declaration_code("")
temp_cname = "__pyx_parallel_temp%d" % temp_count
private_cname = entry.cname
temp_count += 1
invalid_value = entry.type.invalid_value()
if invalid_value:
init = ' = ' + invalid_value
else:
init = ''
# Declare the parallel private in the outer block
c.putln("%s %s%s;" % (type_decl, temp_cname, init))
# Initialize before escaping
code.putln("%s = %s;" % (temp_cname, private_cname))
self.parallel_private_temps.append((temp_cname, private_cname))
code.end_block() # end critical section
def fetch_parallel_exception(self, code):
"""
As each OpenMP thread may raise an exception, we need to fetch that
exception from the threadstate and save it for after the parallel
section where it can be re-raised in the master thread.
Although it would seem that __pyx_filename, __pyx_lineno and
__pyx_clineno are only assigned to under exception conditions (i.e.,
when we have the GIL), and thus should be allowed to be shared without
any race condition, they are in fact subject to the same race
conditions that they were previously when they were global variables
and functions were allowed to release the GIL:
thread A thread B
acquire
set lineno
release
acquire
set lineno
release
acquire
fetch exception
release
skip the fetch
deallocate threadstate deallocate threadstate
"""
code.begin_block()
code.put_ensure_gil(declare_gilstate=True)
code.putln_openmp("#pragma omp flush(%s)" % Naming.parallel_exc_type)
code.putln(
"if (!%s) {" % Naming.parallel_exc_type)
code.putln("__Pyx_ErrFetch(&%s, &%s, &%s);" % self.parallel_exc)
pos_info = chain(*zip(self.parallel_pos_info, self.pos_info))
code.funcstate.uses_error_indicator = True
code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info))
code.put_gotref(Naming.parallel_exc_type)
code.putln(
"}")
code.put_release_ensured_gil()
code.end_block()
def restore_parallel_exception(self, code):
"Re-raise a parallel exception"
code.begin_block()
code.put_ensure_gil(declare_gilstate=True)
code.put_giveref(Naming.parallel_exc_type)
code.putln("__Pyx_ErrRestore(%s, %s, %s);" % self.parallel_exc)
pos_info = chain(*zip(self.pos_info, self.parallel_pos_info))
code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info))
code.put_release_ensured_gil()
code.end_block()
def restore_labels(self, code):
"""
Restore all old labels. Call this before the 'else' clause to for
loops and always before ending the parallel control flow block.
"""
code.set_all_labels(self.old_loop_labels + (self.old_return_label,
self.old_error_label))
def end_parallel_control_flow_block(self, code,
break_=False, continue_=False):
"""
This ends the parallel control flow block and based on how the parallel
section was exited, takes the corresponding action. The break_ and
continue_ parameters indicate whether these should be propagated
outwards:
for i in prange(...):
with cython.parallel.parallel():
continue
Here break should be trapped in the parallel block, and propagated to
the for loop.
"""
c = self.begin_of_parallel_control_block_point
# Firstly, always prefer errors over returning, continue or break
if self.error_label_used:
c.putln("const char *%s = NULL; int %s = 0, %s = 0;" %
self.parallel_pos_info)
c.putln("PyObject *%s = NULL, *%s = NULL, *%s = NULL;" %
self.parallel_exc)
code.putln(
"if (%s) {" % Naming.parallel_exc_type)
code.putln("/* This may have been overridden by a continue, "
"break or return in another thread. Prefer the error. */")
code.putln("%s = 4;" % Naming.parallel_why)
code.putln(
"}")
if continue_:
any_label_used = self.any_label_used
else:
any_label_used = self.breaking_label_used
if any_label_used:
# __pyx_parallel_why is used, declare and initialize
c.putln("int %s;" % Naming.parallel_why)
c.putln("%s = 0;" % Naming.parallel_why)
code.putln(
"if (%s) {" % Naming.parallel_why)
for temp_cname, private_cname in self.parallel_private_temps:
code.putln("%s = %s;" % (private_cname, temp_cname))
code.putln("switch (%s) {" % Naming.parallel_why)
if continue_:
code.put(" case 1: ")
code.put_goto(code.continue_label)
if break_:
code.put(" case 2: ")
code.put_goto(code.break_label)
code.put(" case 3: ")
code.put_goto(code.return_label)
if self.error_label_used:
code.globalstate.use_utility_code(restore_exception_utility_code)
code.putln(" case 4:")
self.restore_parallel_exception(code)
code.put_goto(code.error_label)
code.putln("}") # end switch
code.putln(
"}") # end if
code.end_block() # end parallel control flow block
self.redef_builtin_expect_apple_gcc_bug(code)
# FIXME: improve with version number for OS X Lion
buggy_platform_macro_condition = "(defined(__APPLE__) || defined(__OSX__))"
have_expect_condition = "(defined(__GNUC__) && " \
"(__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))"
redef_condition = "(%s && %s)" % (buggy_platform_macro_condition, have_expect_condition)
def undef_builtin_expect_apple_gcc_bug(self, code):
"""
A bug on OS X Lion disallows __builtin_expect macros. This code avoids them
"""
if not self.parent:
code.undef_builtin_expect(self.redef_condition)
def redef_builtin_expect_apple_gcc_bug(self, code):
if not self.parent:
code.redef_builtin_expect(self.redef_condition)
class ParallelWithBlockNode(ParallelStatNode):
"""
This node represents a 'with cython.parallel.parallel():' block
"""
valid_keyword_arguments = ['num_threads']
num_threads = None
def analyse_declarations(self, env):
super(ParallelWithBlockNode, self).analyse_declarations(env)
if self.args:
error(self.pos, "cython.parallel.parallel() does not take "
"positional arguments")
def generate_execution_code(self, code):
self.declare_closure_privates(code)
self.setup_parallel_control_flow_block(code)
code.putln("#ifdef _OPENMP")
code.put("#pragma omp parallel ")
if self.privates:
privates = [e.cname for e in self.privates
if not e.type.is_pyobject]
code.put('private(%s)' % ', '.join(privates))
self.privatization_insertion_point = code.insertion_point()
self.put_num_threads(code)
code.putln("")
code.putln("#endif /* _OPENMP */")
code.begin_block() # parallel block
self.begin_parallel_block(code)
self.initialize_privates_to_nan(code)
code.funcstate.start_collecting_temps()
self.body.generate_execution_code(code)
self.trap_parallel_exit(code)
self.privatize_temps(code)
self.end_parallel_block(code)
code.end_block() # end parallel block
continue_ = code.label_used(code.continue_label)
break_ = code.label_used(code.break_label)
self.restore_labels(code)
self.end_parallel_control_flow_block(code, break_=break_,
continue_=continue_)
self.release_closure_privates(code)
class ParallelRangeNode(ParallelStatNode):
"""
This node represents a 'for i in cython.parallel.prange():' construct.
target NameNode the target iteration variable
else_clause Node or None the else clause of this loop
"""
child_attrs = ['body', 'target', 'else_clause', 'args', 'num_threads',
'chunksize']
body = target = else_clause = args = None
start = stop = step = None
is_prange = True
nogil = None
schedule = None
valid_keyword_arguments = ['schedule', 'nogil', 'num_threads', 'chunksize']
def __init__(self, pos, **kwds):
super(ParallelRangeNode, self).__init__(pos, **kwds)
# Pretend to be a ForInStatNode for control flow analysis
self.iterator = PassStatNode(pos)
def analyse_declarations(self, env):
super(ParallelRangeNode, self).analyse_declarations(env)
self.target.analyse_target_declaration(env)
if self.else_clause is not None:
self.else_clause.analyse_declarations(env)
if not self.args or len(self.args) > 3:
error(self.pos, "Invalid number of positional arguments to prange")
return
if len(self.args) == 1:
self.stop, = self.args
elif len(self.args) == 2:
self.start, self.stop = self.args
else:
self.start, self.stop, self.step = self.args
if hasattr(self.schedule, 'decode'):
self.schedule = self.schedule.decode('ascii')
if self.schedule not in (None, 'static', 'dynamic', 'guided',
'runtime'):
error(self.pos, "Invalid schedule argument to prange: %s" %
(self.schedule,))
def analyse_expressions(self, env):
was_nogil = env.nogil
if self.nogil:
env.nogil = True
if self.target is None:
error(self.pos, "prange() can only be used as part of a for loop")
return self
self.target = self.target.analyse_target_types(env)
if not self.target.type.is_numeric:
# Not a valid type, assume one for now anyway
if not self.target.type.is_pyobject:
# nogil_check will catch the is_pyobject case
error(self.target.pos,
"Must be of numeric type, not %s" % self.target.type)
self.index_type = PyrexTypes.c_py_ssize_t_type
else:
self.index_type = self.target.type
if not self.index_type.signed:
warning(self.target.pos,
"Unsigned index type not allowed before OpenMP 3.0",
level=2)
# Setup start, stop and step, allocating temps if needed
self.names = 'start', 'stop', 'step'
start_stop_step = self.start, self.stop, self.step
for node, name in zip(start_stop_step, self.names):
if node is not None:
node.analyse_types(env)
if not node.type.is_numeric:
error(node.pos, "%s argument must be numeric" % name)
continue
if not node.is_literal:
node = node.coerce_to_temp(env)
setattr(self, name, node)
# As we range from 0 to nsteps, computing the index along the
# way, we need a fitting type for 'i' and 'nsteps'
self.index_type = PyrexTypes.widest_numeric_type(
self.index_type, node.type)
if self.else_clause is not None:
self.else_clause = self.else_clause.analyse_expressions(env)
# Although not actually an assignment in this scope, it should be
# treated as such to ensure it is unpacked if a closure temp, and to
# ensure lastprivate behaviour and propagation. If the target index is
# not a NameNode, it won't have an entry, and an error was issued by
# ParallelRangeTransform
if hasattr(self.target, 'entry'):
self.assignments[self.target.entry] = self.target.pos, None
node = super(ParallelRangeNode, self).analyse_expressions(env)
if node.chunksize:
if not node.schedule:
error(node.chunksize.pos,
"Must provide schedule with chunksize")
elif node.schedule == 'runtime':
error(node.chunksize.pos,
"Chunksize not valid for the schedule runtime")
elif (node.chunksize.type.is_int and
node.chunksize.is_literal and
node.chunksize.compile_time_value(env) <= 0):
error(node.chunksize.pos, "Chunksize must not be negative")
node.chunksize = node.chunksize.coerce_to(
PyrexTypes.c_int_type, env).coerce_to_temp(env)
if node.nogil:
env.nogil = was_nogil
node.is_nested_prange = node.parent and node.parent.is_prange
if node.is_nested_prange:
parent = node
while parent.parent and parent.parent.is_prange:
parent = parent.parent
parent.assignments.update(node.assignments)
parent.privates.update(node.privates)
parent.assigned_nodes.extend(node.assigned_nodes)
return node
def nogil_check(self, env):
names = 'start', 'stop', 'step', 'target'
nodes = self.start, self.stop, self.step, self.target
for name, node in zip(names, nodes):
if node is not None and node.type.is_pyobject:
error(node.pos, "%s may not be a Python object "
"as we don't have the GIL" % name)
def generate_execution_code(self, code):
"""
Generate code in the following steps
1) copy any closure variables determined thread-private
into temporaries
2) allocate temps for start, stop and step
3) generate a loop that calculates the total number of steps,
which then computes the target iteration variable for every step:
for i in prange(start, stop, step):
...
becomes
nsteps = (stop - start) / step;
i = start;
#pragma omp parallel for lastprivate(i)
for (temp = 0; temp < nsteps; temp++) {
i = start + step * temp;
...
}
Note that accumulation of 'i' would have a data dependency
between iterations.
Also, you can't do this
for (i = start; i < stop; i += step)
...
as the '<' operator should become '>' for descending loops.
'for i from x < i < y:' does not suffer from this problem
as the relational operator is known at compile time!
4) release our temps and write back any private closure variables
"""
self.declare_closure_privates(code)
# This can only be a NameNode
target_index_cname = self.target.entry.cname
# This will be used as the dict to format our code strings, holding
# the start, stop , step, temps and target cnames
fmt_dict = {
'target': target_index_cname,
}
# Setup start, stop and step, allocating temps if needed
start_stop_step = self.start, self.stop, self.step
defaults = '0', '0', '1'
for node, name, default in zip(start_stop_step, self.names, defaults):
if node is None:
result = default
elif node.is_literal:
result = node.get_constant_c_result_code()
else:
node.generate_evaluation_code(code)
result = node.result()
fmt_dict[name] = result
fmt_dict['i'] = code.funcstate.allocate_temp(self.index_type, False)
fmt_dict['nsteps'] = code.funcstate.allocate_temp(self.index_type, False)
# TODO: check if the step is 0 and if so, raise an exception in a
# 'with gil' block. For now, just abort
code.putln("if (%(step)s == 0) abort();" % fmt_dict)
self.setup_parallel_control_flow_block(code) # parallel control flow block
self.control_flow_var_code_point = code.insertion_point()
# Note: nsteps is private in an outer scope if present
code.putln("%(nsteps)s = (%(stop)s - %(start)s) / %(step)s;" % fmt_dict)
# The target iteration variable might not be initialized, do it only if
# we are executing at least 1 iteration, otherwise we should leave the
# target unaffected. The target iteration variable is firstprivate to
# shut up compiler warnings caused by lastprivate, as the compiler
# erroneously believes that nsteps may be <= 0, leaving the private
# target index uninitialized
code.putln("if (%(nsteps)s > 0)" % fmt_dict)
code.begin_block() # if block
self.generate_loop(code, fmt_dict)
code.end_block() # end if block
self.restore_labels(code)
if self.else_clause:
if self.breaking_label_used:
code.put("if (%s < 2)" % Naming.parallel_why)
code.begin_block() # else block
code.putln("/* else */")
self.else_clause.generate_execution_code(code)
code.end_block() # end else block
# ------ cleanup ------
self.end_parallel_control_flow_block(code) # end parallel control flow block
# And finally, release our privates and write back any closure
# variables
for temp in start_stop_step:
if temp is not None:
temp.generate_disposal_code(code)
temp.free_temps(code)
code.funcstate.release_temp(fmt_dict['i'])
code.funcstate.release_temp(fmt_dict['nsteps'])
self.release_closure_privates(code)
def generate_loop(self, code, fmt_dict):
if self.is_nested_prange:
code.putln("#if 0")
else:
code.putln("#ifdef _OPENMP")
if not self.is_parallel:
code.put("#pragma omp for")
self.privatization_insertion_point = code.insertion_point()
reduction_codepoint = self.parent.privatization_insertion_point
else:
code.put("#pragma omp parallel")
self.privatization_insertion_point = code.insertion_point()
reduction_codepoint = self.privatization_insertion_point
code.putln("")
code.putln("#endif /* _OPENMP */")
code.begin_block() # pragma omp parallel begin block
# Initialize the GIL if needed for this thread
self.begin_parallel_block(code)
if self.is_nested_prange:
code.putln("#if 0")
else:
code.putln("#ifdef _OPENMP")
code.put("#pragma omp for")
for entry, (op, lastprivate) in self.privates.iteritems():
# Don't declare the index variable as a reduction
if op and op in "+*-&^|" and entry != self.target.entry:
if entry.type.is_pyobject:
error(self.pos, "Python objects cannot be reductions")
else:
#code.put(" reduction(%s:%s)" % (op, entry.cname))
# This is the only way reductions + nesting works in gcc4.5
reduction_codepoint.put(
" reduction(%s:%s)" % (op, entry.cname))
else:
if entry == self.target.entry:
code.put(" firstprivate(%s)" % entry.cname)
code.put(" lastprivate(%s)" % entry.cname)
continue
if not entry.type.is_pyobject:
if lastprivate:
private = 'lastprivate'
else:
private = 'private'
code.put(" %s(%s)" % (private, entry.cname))
if self.schedule:
if self.chunksize:
chunksize = ", %s" % self.evaluate_before_block(code,
self.chunksize)
else:
chunksize = ""
code.put(" schedule(%s%s)" % (self.schedule, chunksize))
self.put_num_threads(reduction_codepoint)
code.putln("")
code.putln("#endif /* _OPENMP */")
code.put("for (%(i)s = 0; %(i)s < %(nsteps)s; %(i)s++)" % fmt_dict)
code.begin_block() # for loop block
guard_around_body_codepoint = code.insertion_point()
# Start if guard block around the body. This may be unnecessary, but
# at least it doesn't spoil indentation
code.begin_block()
code.putln("%(target)s = %(start)s + %(step)s * %(i)s;" % fmt_dict)
self.initialize_privates_to_nan(code, exclude=self.target.entry)
if self.is_parallel:
code.funcstate.start_collecting_temps()
self.body.generate_execution_code(code)
self.trap_parallel_exit(code, should_flush=True)
self.privatize_temps(code)
if self.breaking_label_used:
# Put a guard around the loop body in case return, break or
# exceptions might be used
guard_around_body_codepoint.putln("if (%s < 2)" % Naming.parallel_why)
code.end_block() # end guard around loop body
code.end_block() # end for loop block
if self.is_parallel:
# Release the GIL and deallocate the thread state
self.end_parallel_block(code)
code.end_block() # pragma omp parallel end block
class CnameDecoratorNode(StatNode):
"""
This node is for the cname decorator in CythonUtilityCode:
@cname('the_cname')
cdef func(...):
...
In case of a cdef class the cname specifies the objstruct_cname.
node the node to which the cname decorator is applied
cname the cname the node should get
"""
child_attrs = ['node']
def analyse_declarations(self, env):
self.node.analyse_declarations(env)
node = self.node
if isinstance(node, CompilerDirectivesNode):
node = node.body.stats[0]
self.is_function = isinstance(node, FuncDefNode)
is_struct_or_enum = isinstance(node, (CStructOrUnionDefNode,
CEnumDefNode))
e = node.entry
if self.is_function:
e.cname = self.cname
e.func_cname = self.cname
e.used = True
if e.pyfunc_cname and '.' in e.pyfunc_cname:
e.pyfunc_cname = self.mangle(e.pyfunc_cname)
elif is_struct_or_enum:
e.cname = e.type.cname = self.cname
else:
scope = node.scope
e.cname = self.cname
e.type.objstruct_cname = self.cname + '_obj'
e.type.typeobj_cname = Naming.typeobj_prefix + self.cname
e.type.typeptr_cname = self.cname + '_type'
e.type.scope.namespace_cname = e.type.typeptr_cname
e.as_variable.cname = py_object_type.cast_code(e.type.typeptr_cname)
scope.scope_prefix = self.cname + "_"
for name, entry in scope.entries.iteritems():
if entry.func_cname:
entry.func_cname = self.mangle(entry.cname)
if entry.pyfunc_cname:
entry.pyfunc_cname = self.mangle(entry.pyfunc_cname)
def mangle(self, cname):
if '.' in cname:
# remove __pyx_base from func_cname
cname = cname.split('.')[-1]
return '%s_%s' % (self.cname, cname)
def analyse_expressions(self, env):
self.node = self.node.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
"Ensure a prototype for every @cname method in the right place"
if self.is_function and env.is_c_class_scope:
# method in cdef class, generate a prototype in the header
h_code = code.globalstate['utility_code_proto']
if isinstance(self.node, DefNode):
self.node.generate_function_header(
h_code, with_pymethdef=False, proto_only=True)
else:
import ModuleNode
entry = self.node.entry
cname = entry.cname
entry.cname = entry.func_cname
ModuleNode.generate_cfunction_declaration(
entry,
env.global_scope(),
h_code,
definition=True)
entry.cname = cname
self.node.generate_function_definitions(env, code)
def generate_execution_code(self, code):
self.node.generate_execution_code(code)
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
if Options.gcc_branch_hints:
branch_prediction_macros = """
/* Test for GCC > 2.95 */
#if defined(__GNUC__) \
&& (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
"""
else:
branch_prediction_macros = """
#define likely(x) (x)
#define unlikely(x) (x)
"""
#------------------------------------------------------------------------------------
printing_utility_code = UtilityCode.load_cached("Print", "Printing.c")
printing_one_utility_code = UtilityCode.load_cached("PrintOne", "Printing.c")
#------------------------------------------------------------------------------------
# Exception raising code
#
# Exceptions are raised by __Pyx_Raise() and stored as plain
# type/value/tb in PyThreadState->curexc_*. When being caught by an
# 'except' statement, curexc_* is moved over to exc_* by
# __Pyx_GetException()
restore_exception_utility_code = UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c")
raise_utility_code = UtilityCode.load_cached("RaiseException", "Exceptions.c")
get_exception_utility_code = UtilityCode.load_cached("GetException", "Exceptions.c")
swap_exception_utility_code = UtilityCode.load_cached("SwapException", "Exceptions.c")
reset_exception_utility_code = UtilityCode.load_cached("SaveResetException", "Exceptions.c")
traceback_utility_code = UtilityCode.load_cached("AddTraceback", "Exceptions.c")
#------------------------------------------------------------------------------------
get_exception_tuple_utility_code = UtilityCode(proto="""
static PyObject *__Pyx_GetExceptionTuple(void); /*proto*/
""",
# I doubt that calling __Pyx_GetException() here is correct as it moves
# the exception from tstate->curexc_* to tstate->exc_*, which prevents
# exception handlers later on from receiving it.
impl = """
static PyObject *__Pyx_GetExceptionTuple(void) {
PyObject *type = NULL, *value = NULL, *tb = NULL;
if (__Pyx_GetException(&type, &value, &tb) == 0) {
PyObject* exc_info = PyTuple_New(3);
if (exc_info) {
Py_INCREF(type);
Py_INCREF(value);
Py_INCREF(tb);
PyTuple_SET_ITEM(exc_info, 0, type);
PyTuple_SET_ITEM(exc_info, 1, value);
PyTuple_SET_ITEM(exc_info, 2, tb);
return exc_info;
}
}
return NULL;
}
""",
requires=[get_exception_utility_code])
| bsd-3-clause |
carlos-lopez-garces/mapnik-trunk | scons/scons-local-1.2.0/SCons/Tool/packaging/__init__.py | 12 | 10691 | """SCons.Tool.Packaging
SCons Packaging Tool.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/__init__.py 3842 2008/12/20 22:59:52 scons"
import SCons.Environment
from SCons.Variables import *
from SCons.Errors import *
from SCons.Util import is_List, make_path_relative
from SCons.Warnings import warn, Warning
import os, imp
import SCons.Defaults
__all__ = [ 'src_targz', 'src_tarbz2', 'src_zip', 'tarbz2', 'targz', 'zip', 'rpm', 'msi', 'ipk' ]
#
# Utility and Builder function
#
def Tag(env, target, source, *more_tags, **kw_tags):
""" Tag a file with the given arguments, just sets the accordingly named
attribute on the file object.
TODO: FIXME
"""
if not target:
target=source
first_tag=None
else:
first_tag=source
if first_tag:
kw_tags[first_tag[0]] = ''
if len(kw_tags) == 0 and len(more_tags) == 0:
raise UserError, "No tags given."
# XXX: sanity checks
for x in more_tags:
kw_tags[x] = ''
if not SCons.Util.is_List(target):
target=[target]
else:
# hmm, sometimes the target list, is a list of a list
# make sure it is flattened prior to processing.
# TODO: perhaps some bug ?!?
target=env.Flatten(target)
for t in target:
for (k,v) in kw_tags.items():
# all file tags have to start with PACKAGING_, so we can later
# differentiate between "normal" object attributes and the
# packaging attributes. As the user should not be bothered with
# that, the prefix will be added here if missing.
#if not k.startswith('PACKAGING_'):
if k[:10] != 'PACKAGING_':
k='PACKAGING_'+k
setattr(t, k, v)
def Package(env, target=None, source=None, **kw):
""" Entry point for the package tool.
"""
# check if we need to find the source files ourself
if not source:
source = env.FindInstalledFiles()
if len(source)==0:
raise UserError, "No source for Package() given"
# decide which types of packages shall be built. Can be defined through
# four mechanisms: command line argument, keyword argument,
# environment argument and default selection( zip or tar.gz ) in that
# order.
try: kw['PACKAGETYPE']=env['PACKAGETYPE']
except KeyError: pass
if not kw.get('PACKAGETYPE'):
from SCons.Script import GetOption
kw['PACKAGETYPE'] = GetOption('package_type')
if kw['PACKAGETYPE'] == None:
if env['BUILDERS'].has_key('Tar'):
kw['PACKAGETYPE']='targz'
elif env['BUILDERS'].has_key('Zip'):
kw['PACKAGETYPE']='zip'
else:
raise UserError, "No type for Package() given"
PACKAGETYPE=kw['PACKAGETYPE']
if not is_List(PACKAGETYPE):
PACKAGETYPE=string.split(PACKAGETYPE, ',')
# load the needed packagers.
def load_packager(type):
try:
file,path,desc=imp.find_module(type, __path__)
return imp.load_module(type, file, path, desc)
except ImportError, e:
raise EnvironmentError("packager %s not available: %s"%(type,str(e)))
packagers=map(load_packager, PACKAGETYPE)
# set up targets and the PACKAGEROOT
try:
# fill up the target list with a default target name until the PACKAGETYPE
# list is of the same size as the target list.
if not target: target = []
size_diff = len(PACKAGETYPE)-len(target)
default_name = "%(NAME)s-%(VERSION)s"
if size_diff>0:
default_target = default_name%kw
target.extend( [default_target]*size_diff )
if not kw.has_key('PACKAGEROOT'):
kw['PACKAGEROOT'] = default_name%kw
except KeyError, e:
raise SCons.Errors.UserError( "Missing Packagetag '%s'"%e.args[0] )
# setup the source files
source=env.arg2nodes(source, env.fs.Entry)
# call the packager to setup the dependencies.
targets=[]
try:
for packager in packagers:
t=[target.pop(0)]
t=apply(packager.package, [env,t,source], kw)
targets.extend(t)
assert( len(target) == 0 )
except KeyError, e:
raise SCons.Errors.UserError( "Missing Packagetag '%s' for %s packager"\
% (e.args[0],packager.__name__) )
except TypeError, e:
# this exception means that a needed argument for the packager is
# missing. As our packagers get their "tags" as named function
# arguments we need to find out which one is missing.
from inspect import getargspec
args,varargs,varkw,defaults=getargspec(packager.package)
if defaults!=None:
args=args[:-len(defaults)] # throw away arguments with default values
map(args.remove, 'env target source'.split())
# now remove any args for which we have a value in kw.
#args=[x for x in args if not kw.has_key(x)]
args=filter(lambda x, kw=kw: not kw.has_key(x), args)
if len(args)==0:
raise # must be a different error, so reraise
elif len(args)==1:
raise SCons.Errors.UserError( "Missing Packagetag '%s' for %s packager"\
% (args[0],packager.__name__) )
else:
raise SCons.Errors.UserError( "Missing Packagetags '%s' for %s packager"\
% (", ".join(args),packager.__name__) )
target=env.arg2nodes(target, env.fs.Entry)
targets.extend(env.Alias( 'package', targets ))
return targets
#
# SCons tool initialization functions
#
added = None
def generate(env):
from SCons.Script import AddOption
global added
if not added:
added = 1
AddOption('--package-type',
dest='package_type',
default=None,
type="string",
action="store",
help='The type of package to create.')
try:
env['BUILDERS']['Package']
env['BUILDERS']['Tag']
except KeyError:
env['BUILDERS']['Package'] = Package
env['BUILDERS']['Tag'] = Tag
def exists(env):
return 1
# XXX
def options(opts):
opts.AddVariables(
EnumVariable( 'PACKAGETYPE',
'the type of package to create.',
None, allowed_values=map( str, __all__ ),
ignorecase=2
)
)
#
# Internal utility functions
#
def copy_attr(f1, f2):
""" copies the special packaging file attributes from f1 to f2.
"""
#pattrs = [x for x in dir(f1) if not hasattr(f2, x) and\
# x.startswith('PACKAGING_')]
copyit = lambda x, f2=f2: not hasattr(f2, x) and x[:10] == 'PACKAGING_'
pattrs = filter(copyit, dir(f1))
for attr in pattrs:
setattr(f2, attr, getattr(f1, attr))
def putintopackageroot(target, source, env, pkgroot, honor_install_location=1):
""" Uses the CopyAs builder to copy all source files to the directory given
in pkgroot.
If honor_install_location is set and the copied source file has an
PACKAGING_INSTALL_LOCATION attribute, the PACKAGING_INSTALL_LOCATION is
used as the new name of the source file under pkgroot.
The source file will not be copied if it is already under the the pkgroot
directory.
All attributes of the source file will be copied to the new file.
"""
# make sure the packageroot is a Dir object.
if SCons.Util.is_String(pkgroot): pkgroot=env.Dir(pkgroot)
if not SCons.Util.is_List(source): source=[source]
new_source = []
for file in source:
if SCons.Util.is_String(file): file = env.File(file)
if file.is_under(pkgroot):
new_source.append(file)
else:
if hasattr(file, 'PACKAGING_INSTALL_LOCATION') and\
honor_install_location:
new_name=make_path_relative(file.PACKAGING_INSTALL_LOCATION)
else:
new_name=make_path_relative(file.get_path())
new_file=pkgroot.File(new_name)
new_file=env.CopyAs(new_file, file)[0]
copy_attr(file, new_file)
new_source.append(new_file)
return (target, new_source)
def stripinstallbuilder(target, source, env):
""" strips the install builder action from the source list and stores
the final installation location as the "PACKAGING_INSTALL_LOCATION" of
the source of the source file. This effectively removes the final installed
files from the source list while remembering the installation location.
It also warns about files which have no install builder attached.
"""
def has_no_install_location(file):
return not (file.has_builder() and\
hasattr(file.builder, 'name') and\
(file.builder.name=="InstallBuilder" or\
file.builder.name=="InstallAsBuilder"))
if len(filter(has_no_install_location, source)):
warn(Warning, "there are files to package which have no\
InstallBuilder attached, this might lead to irreproducible packages")
n_source=[]
for s in source:
if has_no_install_location(s):
n_source.append(s)
else:
for ss in s.sources:
n_source.append(ss)
copy_attr(s, ss)
setattr(ss, 'PACKAGING_INSTALL_LOCATION', s.get_path())
return (target, n_source)
| lgpl-2.1 |
1013553207/django | tests/admin_views/customadmin.py | 379 | 2366 | """
A second, custom AdminSite -- see tests.CustomAdminSiteTests.
"""
from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.http import HttpResponse
from . import admin as base_admin, forms, models
class Admin2(admin.AdminSite):
app_index_template = 'custom_admin/app_index.html'
login_form = forms.CustomAdminAuthenticationForm
login_template = 'custom_admin/login.html'
logout_template = 'custom_admin/logout.html'
index_template = ['custom_admin/index.html'] # a list, to test fix for #18697
password_change_template = 'custom_admin/password_change_form.html'
password_change_done_template = 'custom_admin/password_change_done.html'
# A custom index view.
def index(self, request, extra_context=None):
return super(Admin2, self).index(request, {'foo': '*bar*'})
def get_urls(self):
return [
url(r'^my_view/$', self.admin_view(self.my_view), name='my_view'),
] + super(Admin2, self).get_urls()
def my_view(self, request):
return HttpResponse("Django is a magical pony!")
def password_change(self, request, extra_context=None):
return super(Admin2, self).password_change(request, {'spam': 'eggs'})
class UserLimitedAdmin(UserAdmin):
# used for testing password change on a user not in queryset
def get_queryset(self, request):
qs = super(UserLimitedAdmin, self).get_queryset(request)
return qs.filter(is_superuser=False)
class CustomPwdTemplateUserAdmin(UserAdmin):
change_user_password_template = ['admin/auth/user/change_password.html'] # a list, to test fix for #18697
site = Admin2(name="admin2")
site.register(models.Article, base_admin.ArticleAdmin)
site.register(models.Section, inlines=[base_admin.ArticleInline])
site.register(models.Thing, base_admin.ThingAdmin)
site.register(models.Fabric, base_admin.FabricAdmin)
site.register(models.ChapterXtra1, base_admin.ChapterXtra1Admin)
site.register(User, UserLimitedAdmin)
site.register(models.UndeletableObject, base_admin.UndeletableObjectAdmin)
site.register(models.Simple, base_admin.AttributeErrorRaisingAdmin)
simple_site = Admin2(name='admin4')
simple_site.register(User, CustomPwdTemplateUserAdmin)
| bsd-3-clause |
NiekB4/2016_Group07_Transit-BuiltEnvironment | PTStopCalc/utility_functions.py | 6 | 32334 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
SpatialDecision
A QGIS plugin
This is a SDSS template for the GEO1005 course
-------------------
begin : 2015-11-02
git sha : $Format:%H$
copyright : (C) 2015 by Jorge Gil, TU Delft
email : j.a.lopesgil@tudelft.nl
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4 import QtGui, QtCore
from qgis.core import *
from qgis.networkanalysis import *
from pyspatialite import dbapi2 as sqlite
import psycopg2 as pgsql
import numpy as np
import math
import os.path
#
# Layer functions
#
def getLegendLayers(iface, geom='all', provider='all'):
"""
Return list of layer objects in the legend, with specific geometry type and/or data provider
:param iface: QgsInterface
:param geom: string ('point', 'linestring', 'polygon')
:param provider: string
:return: list QgsVectorLayer
"""
layers_list = []
for layer in iface.legendInterface().layers():
add_layer = False
if layer.isValid() and layer.type() == QgsMapLayer.VectorLayer:
if layer.hasGeometryType() and (geom is 'all' or layer.geometryType() in geom):
if provider is 'all' or layer.dataProvider().name() in provider:
add_layer = True
if add_layer:
layers_list.append(layer)
return layers_list
def getCanvasLayers(iface, geom='all', provider='all'):
"""Return list of valid QgsVectorLayer in QgsMapCanvas, with specific geometry type and/or data provider"""
layers_list = []
for layer in iface.mapCanvas().layers():
add_layer = False
if layer.isValid() and layer.type() == QgsMapLayer.VectorLayer:
if layer.hasGeometryType() and (geom is 'all' or layer.geometryType() in geom):
if provider is 'all' or layer.dataProvider().name() in provider:
add_layer = True
if add_layer:
layers_list.append(layer)
return layers_list
def getRegistryLayers(geom='all', provider='all'):
"""Return list of valid QgsVectorLayer in QgsMapLayerRegistry, with specific geometry type and/or data provider"""
layers_list = []
for layer in QgsMapLayerRegistry.instance().mapLayers().values():
add_layer = False
if layer.isValid() and layer.type() == QgsMapLayer.VectorLayer:
if layer.hasGeometryType() and (geom is 'all' or layer.geometryType() in geom):
if provider is 'all' or layer.dataProvider().name() in provider:
add_layer = True
if add_layer:
layers_list.append(layer)
return layers_list
def isLayerProjected(layer):
projected = False
if layer:
projected = not layer.crs().geographicFlag()
return projected
def getLegendLayerByName(iface, name):
layer = None
for i in iface.legendInterface().layers():
if i.name() == name:
layer = i
return layer
def getCanvasLayerByName(iface, name):
layer = None
for i in iface.mapCanvas().layers():
if i.name() == name:
layer = i
return layer
def getLayersListNames(layerslist):
layer_names = [layer.name() for layer in layerslist]
return layer_names
def getLayerPath(layer):
path = ''
provider = layer.dataProvider()
provider_type = provider.name()
if provider_type == 'spatialite':
uri = QgsDataSourceURI(provider.dataSourceUri())
path = uri.database()
elif provider_type == 'ogr':
uri = provider.dataSourceUri()
path = os.path.dirname(uri)
return path
def reloadLayer(layer):
layer_name = layer.name()
layer_provider = layer.dataProvider().name()
new_layer = None
if layer_provider in ('spatialite','postgres'):
uri = QgsDataSourceURI(layer.dataProvider().dataSourceUri())
new_layer = QgsVectorLayer(uri.uri(), layer_name, layer_provider)
elif layer_provider == 'ogr':
uri = layer.dataProvider().dataSourceUri()
new_layer = QgsVectorLayer(uri.split("|")[0], layer_name, layer_provider)
QgsMapLayerRegistry.instance().removeMapLayer(layer.id())
if new_layer:
QgsMapLayerRegistry.instance().addMapLayer(new_layer)
return new_layer
#
# Field functions
#
def fieldExists(layer, name):
fields = getFieldNames(layer)
if name in fields:
return True
else:
return False
def getFieldNames(layer):
field_names = []
if layer and layer.dataProvider():
field_names = [field.name() for field in layer.dataProvider().fields()]
return field_names
def getNumericFields(layer, type='all'):
fields = []
if type == 'all':
types = (QtCore.QVariant.Int, QtCore.QVariant.LongLong, QtCore.QVariant.Double,
QtCore.QVariant.UInt, QtCore.QVariant.ULongLong)
else:
types = (type)
if layer and layer.dataProvider():
for field in layer.dataProvider().fields():
if field.type() in types:
fields.append(field)
return fields
def getNumericFieldNames(layer, type='all'):
field_names = []
if type == 'all':
types = (QtCore.QVariant.Int, QtCore.QVariant.LongLong, QtCore.QVariant.Double,
QtCore.QVariant.UInt, QtCore.QVariant.ULongLong)
else:
types = (type)
if layer and layer.dataProvider():
for field in layer.dataProvider().fields():
if field.type() in types:
field_names.append(field.name())
return field_names
def getFieldIndex(layer, name):
idx = layer.dataProvider().fields().indexFromName(name)
return idx
def fieldHasValues(layer, name):
if layer and fieldExists(layer, name):
# find fields that only have NULL values
idx = getFieldIndex(layer, name)
maxval = layer.maximumValue(idx)
minval = layer.minimumValue(idx)
if maxval == NULL and minval == NULL:
return False
else:
return True
def fieldHasNullValues(layer, name):
if layer and fieldExists(layer, name):
idx = getFieldIndex(layer, name)
vals = layer.uniqueValues(idx,1)
# depending on the provider list is empty or has NULL value in first position
if not vals or (len(vals) == 1 and vals[0] == NULL):
return True
else:
return False
def getFieldValues(layer, fieldname, null=True, selection=False):
attributes = []
ids = []
if fieldExists(layer, fieldname):
if selection:
features = layer.selectedFeatures()
else:
request = QgsFeatureRequest().setSubsetOfAttributes([getFieldIndex(layer, fieldname)])
features = layer.getFeatures(request)
if null:
for feature in features:
attributes.append(feature.attribute(fieldname))
ids.append(feature.id())
else:
for feature in features:
val = feature.attribute(fieldname)
if val != NULL:
attributes.append(val)
ids.append(feature.id())
return attributes, ids
def addFields(layer, names, types):
# types can be QVariant.Int, QVariant.Double, QVariant.String
res = False
if layer:
provider = layer.dataProvider()
caps = provider.capabilities()
if caps & QgsVectorDataProvider.AddAttributes:
fields = provider.fields()
for i, name in enumerate(names):
#add new field if it doesn't exist
if fields.indexFromName(name) == -1:
res = provider.addAttributes([QgsField(name, types[i])])
#apply changes if any made
if res:
layer.updateFields()
return res
def updateField(layer, name, expression):
res = False
if layer:
provider = layer.dataProvider()
caps = provider.capabilities()
if caps & QgsVectorDataProvider.AddAttributes:
#field = layer.fieldNameIndex(name)
calc = QgsExpression(expression)
layer.startEditing()
for feature in layer.getFeatures():
value = calc.evaluate(feature)
feature[name] = value
layer.updateFeature(feature)
#layer.changeAttributeValue(feature.id(), field, value)
layer.commitChanges()
res = True
return res
#
# Feature functions
#
def getFeaturesByListValues(layer, name, values=list):
features = {}
if layer:
if fieldExists(layer, name):
request = QgsFeatureRequest().setSubsetOfAttributes([getFieldIndex(layer, name)])
iterator = layer.getFeatures(request)
for feature in iterator:
att = feature.attribute(name)
if att in values:
features[feature.id()] = att
return features
def selectFeaturesByListValues(layer, name, values=list):
features = []
if layer:
if fieldExists(layer, name):
request = QgsFeatureRequest().setSubsetOfAttributes([getFieldIndex(layer, name)])
iterator = layer.getFeatures(request)
for feature in iterator:
att = feature.attribute(name)
if att in values:
features.append(feature.id())
layer.select(features)
def getFeaturesByRangeValues(layer, name, min, max):
features = {}
if layer:
if fieldExists(layer, name):
request = QgsFeatureRequest().setSubsetOfAttributes([getFieldIndex(layer, name)])
iterator = layer.getFeatures(request)
for feature in iterator:
att = feature.attribute(name)
if min <= att <= max:
features[feature.id()] = att
return features
def selectFeaturesByRangeValues(layer, name, min, max):
features = []
if layer:
if fieldExists(layer, name):
request = QgsFeatureRequest().setSubsetOfAttributes([getFieldIndex(layer, name)])
iterator = layer.getFeatures(request)
for feature in iterator:
att = feature.attribute(name)
if min <= att <= max:
features.append(feature.id())
layer.select(features)
def getFeaturesByExpression(layer, expression):
features = {}
if layer:
request = QgsFeatureRequest().setFilterExpression(expression)
iterator = layer.getFeatures(request)
for feature in iterator:
features[feature.id()] = feature.attributes()
return features
def selectFeaturesByExpression(layer, expression):
features = []
if layer:
request = QgsFeatureRequest().setFilterExpression(expression)
iterator = layer.getFeatures(request)
for feature in iterator:
features.append(feature.id())
layer.select(features)
def filterFeaturesByExpression(layer, expression):
success = False
if layer:
try:
success = layer.setSubsetString(expression)
except:
success = False
return success
def getAllFeatures(layer):
allfeatures = {}
if layer:
features = layer.getFeatures()
allfeatures = {feature.id(): feature.attributes() for feature in features}
return allfeatures
def getAllFeatureIds(layer):
ids = []
if layer:
features = layer.getFeatures()
ids = [feature.id() for feature in features]
return ids
def getAllFeatureValues(layer, name):
values = []
if layer:
features = layer.getFeatures()
values = [feature.attribute(name) for feature in features]
return values
def getAllFeatureSymbols(layer):
symbols = {}
if layer:
renderer = layer.rendererV2()
features = layer.getFeatures()
for feature in features:
symb = renderer.symbolsForFeature(feature)
if len(symb) > 0:
symbols = {feature.id(): symb[0].color()}
else:
symbols = {feature.id(): QColor(200,200,200,255)}
return symbols
def getAllFeatureData(layer):
data = {}
symbols = {}
if layer:
renderer = layer.rendererV2()
features = layer.getFeatures()
for feature in features:
data = {feature.id(): feature.attributes()}
symb = renderer.symbolsForFeature(feature)
if len(symb) > 0:
symbols = {feature.id(): symb[0].color()}
else:
symbols = {feature.id(): QColor(200,200,200,255)}
return data, symbols
def getFeaturesByIntersection(base_layer, intersect_layer, crosses):
features = []
# retrieve objects to be intersected (list comprehension, more pythonic)
intersect_geom = [QgsGeometry(feat.geometry()) for feat in intersect_layer.getFeatures()]
# retrieve base layer objects
base = base_layer.getFeatures()
# should improve with spatial index for large data sets
#index = createIndex(base_layer)
# loop through base features and intersecting elements
# appends if intersecting, when crosses = True
# does the opposite if crosses = False
for feat in base:
append = not crosses
base_geom = feat.geometry()
for intersect in intersect_geom:
if base_geom.intersects(intersect):
append = crosses
break
if append:
features.append(feat)
return features
def selectFeaturesByIntersection(base_layer, intersect_layer, crosses):
features = []
# retrieve objects to be intersected (list comprehension, more pythonic)
obstacles_geom = [QgsGeometry(feat.geometry()) for feat in intersect_layer.getFeatures()]
# retrieve base layer objects
base = base_layer.getFeatures()
# loop through base features and intersecting elements
for feat in base:
append = not crosses
base_geom = QgsGeometry(feat.geometry())
for obst in obstacles_geom:
if base_geom.intersects(obst):
append = crosses
break
if append:
features.append(feat.id())
base_layer.select(features)
def getFeaturesIntersections(base_layer, intersect_layer):
intersections = []
# retrieve objects to be intersected (list comprehension, more pythonic)
obstacles_geom = [QgsGeometry(feat.geometry()) for feat in intersect_layer.getFeatures()]
# retrieve base layer objects
base = base_layer.getFeatures()
# loop through base features and intersecting elements
for feat in base:
base_geom = QgsGeometry(feat.geometry())
for obst in obstacles_geom:
if base_geom.intersects(obst):
intersections.append(base_geom.intersection(obst))
return intersections
#
# Canvas functions
#
# Display a message in the QGIS canvas
def showMessage(iface, msg, type='Info', lev=1, dur=2):
iface.messageBar().pushMessage(type,msg,level=lev,duration=dur)
def updateRenderer(layer, attribute, settings):
"""
Creates a renderer for the layer based on this, and applies it
The renderer uses GradientColourRamp to calculate the symbol colours
@param layer: the selected QgsVectorLayer object
"""
geometry = layer.geometryType()
# create a colour ramp based on colour range type, inverting symbols if required
ramp = settings['ramp']
line_width = float(settings['line_width'])
# calculate ranges: EqualInterval = 0; Quantile = 1; Jenks = 2; StdDev = 3; Pretty = 4; Custom = 5
intervals = int(settings['intervals'])
interval_type = int(settings['interval_type'])
renderer = None
# set symbol type and line width
symbol = QgsSymbolV2.defaultSymbol(geometry)
if symbol:
if symbol.type() == 1: # line
symbol.setWidth(line_width)
elif symbol.type() == 2: # line
symbol = QgsFillSymbolV2.createSimple({'style': 'solid', 'color': 'black', 'width_border': '%s' % line_width})
elif symbol.type() == 0: # point
symbol.setSize(line_width)
renderer = QgsGraduatedSymbolRendererV2.createRenderer(layer, attribute, intervals, interval_type, symbol, ramp)
renderer.setMode(interval_type)
renderer.setSourceColorRamp(ramp)
return renderer
#
# Network functions
#
def makeUndirectedGraph(network_layer, points=list):
graph = None
tied_points = []
if network_layer:
director = QgsLineVectorLayerDirector(network_layer, -1, '', '', '', 3)
properter = QgsDistanceArcProperter()
director.addProperter(properter)
builder = QgsGraphBuilder(network_layer.crs())
tied_points = director.makeGraph(builder, points)
graph = builder.graph()
return graph, tied_points
def makeDirectedGraph(network_layer, points=list, direction_field=-1, one_way='', reverse_way='', two_way='', default_direction=3):
graph = None
tied_points = []
if network_layer:
director = QgsLineVectorLayerDirector(network_layer, direction_field, one_way, reverse_way, two_way, default_direction)
properter = QgsDistanceArcProperter()
director.addProperter(properter)
builder = QgsGraphBuilder(network_layer.crs())
tied_points = director.makeGraph(builder, points)
graph = builder.graph()
return graph, tied_points
def calculateRouteTree(graph, tied_points, origin, destination, impedance=0):
points = []
if tied_points:
try:
from_point = tied_points[origin]
to_point = tied_points[destination]
except:
return points
# analyse graph
if graph:
form_id = graph.findVertex(from_point)
tree = QgsGraphAnalyzer.shortestTree(graph, form_id, impedance)
form_id = tree.findVertex(from_point)
to_id = tree.findVertex(to_point)
# iterate to get all points in route
if to_id == -1:
pass
else:
while form_id != to_id:
l = tree.vertex(to_id).inArc()
if not l:
break
e = tree.arc(l[0])
points.insert(0, tree.vertex(e.inVertex()).point())
to_id = e.outVertex()
points.insert(0, from_point)
return points
def calculateRouteDijkstra(graph, tied_points, origin, destination, impedance=0):
points = []
if tied_points:
try:
from_point = tied_points[origin]
to_point = tied_points[destination]
except:
return points
# analyse graph
if graph:
from_id = graph.findVertex(from_point)
to_id = graph.findVertex(to_point)
(tree, cost) = QgsGraphAnalyzer.dijkstra(graph, from_id, impedance)
if tree[to_id] == -1:
pass
else:
curPos = to_id
while curPos != from_id:
points.append(graph.vertex(graph.arc(tree[curPos]).inVertex()).point())
curPos = graph.arc(tree[curPos]).outVertex()
points.append(from_point)
points.reverse()
return points
def calculateServiceArea(graph, tied_points, origin, cutoff, impedance=0):
points = {}
if tied_points:
try:
from_point = tied_points[origin]
except:
return points
# analyse graph
if graph:
from_id = graph.findVertex(from_point)
(tree, cost) = QgsGraphAnalyzer.dijkstra(graph, from_id, impedance)
i = 0
while i < len(cost):
if cost[i] <= cutoff and tree[i] != -1:
points[str(i)]=((graph.vertex(i).point()),cost)
i += 1
return points
#
# General functions
#
def getLastDir(tool_name=''):
path = ''
settings = QtCore.QSettings(tool_name,"")
path = settings.value("lastUsedDir",str(""))
return path
def setLastDir(filename, tool_name=''):
path = QtCore.QFileInfo(filename).absolutePath()
settings = QtCore.QSettings(tool_name,"")
settings.setValue("lastUsedDir", str(unicode(path)))
# check if a text string is of numeric type
def isNumeric(txt):
try:
int(txt)
return True
except ValueError:
try:
long(txt)
return True
except ValueError:
try:
float(txt)
return True
except ValueError:
return False
# convert a text string to a numeric value, if possible
def convertNumeric(txt):
try:
value = int(txt)
except ValueError:
try:
value = long(txt)
except ValueError:
try:
value = float(txt)
except ValueError:
value = ''
return value
def truncateNumber(num,digits=9):
if isNumeric(num):
truncated = str(num)
if '.' in truncated:
truncated = truncated[:digits]
truncated = truncated.rstrip('0').rstrip('.')
return convertNumeric(truncated)
# Function to create a spatial index for QgsVectorDataProvider
def createIndex(layer):
provider = layer.dataProvider()
caps = provider.capabilities()
if caps & QgsVectorDataProvider.CreateSpatialIndex:
feat = QgsFeature()
index = QgsSpatialIndex()
fit = provider.getFeatures()
while fit.nextFeature(feat):
index.insertFeature(feat)
return index
else:
return None
#------------------------------
# Layer creation functions
#------------------------------
def createTempLayer(name, geometry, srid, attributes, types):
#geometry can be 'POINT', 'LINESTRING' or 'POLYGON' or the 'MULTI' version of the previous
vlayer = QgsVectorLayer('%s?crs=EPSG:%s'% (geometry, srid), name, "memory")
provider = vlayer.dataProvider()
#create the required fields
if attributes:
vlayer.startEditing()
fields = []
for i, att in enumerate(attributes):
fields.append(QgsField(att, types[i]))
# add the fields to the layer
try:
provider.addAttributes(fields)
except:
return None
vlayer.commitChanges()
return vlayer
def loadTempLayer(layer):
QgsMapLayerRegistry.instance().addMapLayer(layer)
def insertTempFeatures(layer, geometry, attributes):
provider = layer.dataProvider()
geometry_type = provider.geometryType()
for i, geom in enumerate(geometry):
fet = QgsFeature()
if geometry_type in (1, 4):
fet.setGeometry(QgsGeometry.fromPoint(geom))
elif geometry_type in (2, 5):
fet.setGeometry(QgsGeometry.fromPolyline(geom))
elif geometry_type in (3, 6):
fet.setGeometry(QgsGeometry.fromPolygon(geom))
if attributes:
fet.setAttributes(attributes[i])
provider.addFeatures([fet])
provider.updateExtents()
def insertTempFeaturesGeom(layer, geometry, attributes):
provider = layer.dataProvider()
for i, geom in enumerate(geometry):
fet = QgsFeature()
fet.setGeometry(geom)
if attributes:
fet.setAttributes(attributes[i])
provider.addFeatures([fet])
provider.updateExtents()
def createTempLayerFull(name, srid, attributes, types, values, coords):
# create an instance of a memory vector layer
type = ''
if len(coords) == 2: type = 'Point'
elif len(coords) == 4: type = 'LineString'
vlayer = QgsVectorLayer('%s?crs=EPSG:%s'% (type, srid), name, "memory")
provider = vlayer.dataProvider()
#create the required fields
fields = []
for i, name in enumerate(attributes):
fields.append(QgsField(name, types[i]))
# add the fields to the layer
vlayer.startEditing()
try:
provider.addAttributes(fields)
except:
return None
# add features by iterating the values
features = []
for i, val in enumerate(values):
feat = QgsFeature()
# add geometry
try:
if type == 'Point':
feat.setGeometry(QgsGeometry.fromPoint([QgsPoint(float(val[coords[0]]),float(val[coords[1]]))]))
elif type == 'LineString':
feat.setGeometry(QgsGeometry.fromPolyline([QgsPoint(float(val[coords[0]]),float(val[coords[1]])),
QgsPoint(float(val[coords[2]]),float(val[coords[3]]))]))
except:
pass
# add attribute values
feat.setAttributes(list(val))
features.append(feat);
# add the features to the layer
try:
provider.addFeatures(features)
except:
return None
vlayer.commitChanges()
vlayer.updateExtents()
if not vlayer.isValid():
print "Layer failed to create!"
return None
return vlayer
#---------------------------------------------
# Shape file specific functions
#---------------------------------------------
def testShapeFileExists(path, name):
filename = path+"/"+name+".shp"
exists = os.path.isfile(filename)
return exists
def copyLayerToShapeFile(layer, path, name):
#Get layer provider
provider = layer.dataProvider()
filename = path+"/"+name+".shp"
fields = provider.fields()
if layer.hasGeometryType():
geometry = layer.wkbType()
else:
geometry = None
srid = layer.crs()
# create an instance of vector file writer, which will create the vector file.
writer = QgsVectorFileWriter(filename, "CP1250", fields, geometry, srid, "ESRI Shapefile")
if writer.hasError() != QgsVectorFileWriter.NoError:
print "Error when creating shapefile: ", writer.hasError()
return None
# add features by iterating the values
for feat in layer.getFeatures():
writer.addFeature(feat)
# delete the writer to flush features to disk
del writer
# open the newly created file
vlayer = QgsVectorLayer(filename, name, "ogr")
if not vlayer.isValid():
print "Layer failed to load!"
return None
return vlayer
def createShapeFileLayer(path, name, srid, attributes, types, geometrytype):
# create new empty layer with given attributes
# todo: created table has no attributes. not used
# use createShapeFileFullLayer instead
filename = path+"/"+name+".shp"
#create the required fields
fields = QgsFields()
for i, attr in enumerate(attributes):
fields.append(QgsField(attr, types[i]))
# create an instance of vector file writer, which will create the vector file.
writer = None
if 'point' in geometrytype.lower():
writer = QgsVectorFileWriter(filename, "CP1250", fields, QGis.WKBPoint, srid, "ESRI Shapefile")
elif 'line' in geometrytype.lower():
writer = QgsVectorFileWriter(filename, "CP1250", fields, QGis.WKBLineString, srid, "ESRI Shapefile")
elif 'polygon' in geometrytype.lower():
writer = QgsVectorFileWriter(filename, "CP1250", fields, QGis.WKBPolygon, srid, "ESRI Shapefile")
if writer.hasError() != QgsVectorFileWriter.NoError:
print "Error when creating shapefile: ", writer.hasError()
return None
# delete the writer to flush features to disk (optional)
del writer
# open the newly created file
vlayer = QgsVectorLayer(filename, name, "ogr")
if not vlayer.isValid():
print "Layer failed to open!"
return None
return vlayer
def createShapeFileFullLayer(path, name, srid, attributes, types, values, coords):
# create new layer with given attributes and data, including geometry (point and lines only)
filename = path+"/"+name+".shp"
#create the required fields
fields = QgsFields()
for i, attr in enumerate(attributes):
fields.append(QgsField(attr, types[i]))
# create an instance of vector file writer, which will create the vector file.
writer = None
if len(coords) == 2:
type = 'point'
writer = QgsVectorFileWriter(filename, "CP1250", fields, QGis.WKBPoint, srid, "ESRI Shapefile")
elif len(coords) == 4:
type = 'line'
writer = QgsVectorFileWriter(filename, "CP1250", fields, QGis.WKBLineString, srid, "ESRI Shapefile")
if writer.hasError() != QgsVectorFileWriter.NoError:
print "Error when creating shapefile: ", writer.hasError()
return None
# add features by iterating the values
feat = QgsFeature()
for i, val in enumerate(values):
# add geometry
try:
if type == 'point':
feat.setGeometry(QgsGeometry.fromPoint([QgsPoint(float(val[coords[0]]),float(val[coords[1]]))]))
elif type == 'line':
feat.setGeometry(QgsGeometry.fromPolyline([QgsPoint(float(val[coords[0]]),float(val[coords[1]])),
QgsPoint(float(val[coords[2]]),float(val[coords[3]]))]))
except: pass
# add attributes
attrs = []
for j, attr in enumerate(attributes):
attrs.append(val[j])
feat.setAttributes(attrs)
writer.addFeature(feat)
# delete the writer to flush features to disk (optional)
del writer
# open the newly created file
vlayer = QgsVectorLayer(filename, name, "ogr")
if not vlayer.isValid():
print "Layer failed to load!"
return None
return vlayer
def addShapeFileAttributes(layer, attributes, types, values):
# add attributes to an existing layer
attributes_pos = dict()
res = False
if layer:
provider = layer.dataProvider()
caps = provider.capabilities()
res = False
if caps & QgsVectorDataProvider.AddAttributes:
fields = provider.fields()
count = fields.count()
for i, name in enumerate(attributes):
#add new field if it doesn't exist
if fields.indexFromName(name) == -1:
res = provider.addAttributes([QgsField(name, types[i])])
# keep position of attributes that are added, since name can change
attributes_pos[i] = count
count += 1
#apply changes if any made
if res:
layer.updateFields()
# update attribute values by iterating the layer's features
res = False
if caps & QgsVectorDataProvider.ChangeAttributeValues:
#fields = provider.fields() #the fields must be retrieved again after the updateFields() method
iter = layer.getFeatures()
for i, feature in enumerate(iter):
fid = feature.id()
#to update the features the attribute/value pairs must be converted to a dictionary for each feature
attrs = {}
for j in attributes_pos.iterkeys():
field_id = attributes_pos[j]
val = values[i][j]
attrs.update({field_id: val})
#update the layer with the corresponding dictionary
res = provider.changeAttributeValues({fid: attrs})
#apply changes if any made
if res:
layer.updateFields()
return res
| gpl-2.0 |
Matt-Deacalion/django | tests/gis_tests/gdal_tests/test_envelope.py | 335 | 3667 | import unittest
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
if HAS_GDAL:
from django.contrib.gis.gdal import Envelope, GDALException
class TestPoint(object):
def __init__(self, x, y):
self.x = x
self.y = y
@skipUnless(HAS_GDAL, "GDAL is required")
class EnvelopeTest(unittest.TestCase):
def setUp(self):
self.e = Envelope(0, 0, 5, 5)
def test01_init(self):
"Testing Envelope initialization."
e1 = Envelope((0, 0, 5, 5))
Envelope(0, 0, 5, 5)
Envelope(0, '0', '5', 5) # Thanks to ww for this
Envelope(e1._envelope)
self.assertRaises(GDALException, Envelope, (5, 5, 0, 0))
self.assertRaises(GDALException, Envelope, 5, 5, 0, 0)
self.assertRaises(GDALException, Envelope, (0, 0, 5, 5, 3))
self.assertRaises(GDALException, Envelope, ())
self.assertRaises(ValueError, Envelope, 0, 'a', 5, 5)
self.assertRaises(TypeError, Envelope, 'foo')
self.assertRaises(GDALException, Envelope, (1, 1, 0, 0))
try:
Envelope(0, 0, 0, 0)
except GDALException:
self.fail("shouldn't raise an exception for min_x == max_x or min_y == max_y")
def test02_properties(self):
"Testing Envelope properties."
e = Envelope(0, 0, 2, 3)
self.assertEqual(0, e.min_x)
self.assertEqual(0, e.min_y)
self.assertEqual(2, e.max_x)
self.assertEqual(3, e.max_y)
self.assertEqual((0, 0), e.ll)
self.assertEqual((2, 3), e.ur)
self.assertEqual((0, 0, 2, 3), e.tuple)
self.assertEqual('POLYGON((0.0 0.0,0.0 3.0,2.0 3.0,2.0 0.0,0.0 0.0))', e.wkt)
self.assertEqual('(0.0, 0.0, 2.0, 3.0)', str(e))
def test03_equivalence(self):
"Testing Envelope equivalence."
e1 = Envelope(0.523, 0.217, 253.23, 523.69)
e2 = Envelope((0.523, 0.217, 253.23, 523.69))
self.assertEqual(e1, e2)
self.assertEqual((0.523, 0.217, 253.23, 523.69), e1)
def test04_expand_to_include_pt_2_params(self):
"Testing Envelope expand_to_include -- point as two parameters."
self.e.expand_to_include(2, 6)
self.assertEqual((0, 0, 5, 6), self.e)
self.e.expand_to_include(-1, -1)
self.assertEqual((-1, -1, 5, 6), self.e)
def test05_expand_to_include_pt_2_tuple(self):
"Testing Envelope expand_to_include -- point as a single 2-tuple parameter."
self.e.expand_to_include((10, 10))
self.assertEqual((0, 0, 10, 10), self.e)
self.e.expand_to_include((-10, -10))
self.assertEqual((-10, -10, 10, 10), self.e)
def test06_expand_to_include_extent_4_params(self):
"Testing Envelope expand_to_include -- extent as 4 parameters."
self.e.expand_to_include(-1, 1, 3, 7)
self.assertEqual((-1, 0, 5, 7), self.e)
def test06_expand_to_include_extent_4_tuple(self):
"Testing Envelope expand_to_include -- extent as a single 4-tuple parameter."
self.e.expand_to_include((-1, 1, 3, 7))
self.assertEqual((-1, 0, 5, 7), self.e)
def test07_expand_to_include_envelope(self):
"Testing Envelope expand_to_include with Envelope as parameter."
self.e.expand_to_include(Envelope(-1, 1, 3, 7))
self.assertEqual((-1, 0, 5, 7), self.e)
def test08_expand_to_include_point(self):
"Testing Envelope expand_to_include with Point as parameter."
self.e.expand_to_include(TestPoint(-1, 1))
self.assertEqual((-1, 0, 5, 5), self.e)
self.e.expand_to_include(TestPoint(10, 10))
self.assertEqual((-1, 0, 10, 10), self.e)
| bsd-3-clause |
lemarcudal/sha_thedivision | test/Lib/site-packages/pip/_vendor/requests/packages/urllib3/__init__.py | 152 | 2854 | """
urllib3 - Thread-safe connection pooling and re-using.
"""
from __future__ import absolute_import
import warnings
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = '1.15.1'
__all__ = (
'HTTPConnectionPool',
'HTTPSConnectionPool',
'PoolManager',
'ProxyManager',
'HTTPResponse',
'Retry',
'Timeout',
'add_stderr_logger',
'connection_from_url',
'disable_warnings',
'encode_multipart_formdata',
'get_host',
'make_headers',
'proxy_from_url',
)
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s', __name__)
return handler
# ... Clean up.
del NullHandler
# All warning filters *must* be appended unless you're really certain that they
# shouldn't be: otherwise, it's very hard for users to use most Python
# mechanisms to silence them.
# SecurityWarning's always go off by default.
warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
# SubjectAltNameWarning's should go off once per host
warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
append=True)
# SNIMissingWarnings should go off only once.
warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter('ignore', category)
| apache-2.0 |
rswyatt/kittypooclub | node_modules/node-gyp/gyp/pylib/gyp/MSVSToolFile.py | 2736 | 1804 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
| gpl-2.0 |
fdibaldassarre/mload | src/Interface/Gtk/ShowLatestWindow.py | 1 | 5180 | #!/usr/bin/env python3
from gi.repository import Gtk
from gi.repository import Pango
from datetime import datetime
from datetime import timedelta
DATE_FORMAT = '%d %B %Y'
DATE_FORMAT_ORDER = '%Y %m %d'
MAX_LABEL_SIZE = 30
class ShowLatestWindow(Gtk.Window):
def __init__(self, interface):
self.interface = interface
self.history_manager = self.interface.mmachine.history_manager
events = self.getLatestEvents()
Gtk.Window.__init__(self, title = "Latest updates")
main_size = self.interface.main_window.get_size()
self.set_size_request(200, main_size[1])
self.set_border_width(10)
self.set_modal(True)
self.set_resizable(False)
self.set_transient_for(self.interface.main_window)
self.main_grid = Gtk.Grid()
self.createEventsGrid(events)
# close button
close_button = Gtk.Button("Close")
close_button.connect("clicked", self.close)
# add to the window
base_grid = Gtk.Grid()
base_grid.set_orientation(Gtk.Orientation.VERTICAL)
base_grid.add(self.main_grid)
base_grid.add(close_button)
self.add(base_grid)
def run(self):
main_x_off, main_y_off = self.interface.main_window.get_position()
main_width, main_height = self.interface.main_window.get_size()
self.move( main_x_off + main_width, main_y_off )
self.show_all()
self.show()
def close(self, widget = None, data = None):
self.destroy()
def getLatestEvents(self):
# get the last updates in 3 days TODO
events = self.interface.mmachine.history_manager.getLatestEvents()
return events
def createEventsGrid(self, events):
# clear the main grid
children = self.main_grid.get_children()
for child in children:
child.destroy()
# add the events
if len(events) == 0:
label = Gtk.Label("No update in the last month")
label.set_hexpand(True)
label.set_vexpand(True)
self.main_grid.add(label)
return True
# else
# order events by date
now = datetime.now()
yesterday = now - timedelta(hours = 24)
dates_print = {}
ordered_events = {}
for event in events:
# get date
date = event.getDateAsString(DATE_FORMAT_ORDER)
# add date
if not date in ordered_events:
ordered_events[date] = {}
# set date print
date_str = event.getDateAsString(DATE_FORMAT)
if date_str == now.strftime(DATE_FORMAT):
date_str = 'Today'
elif date_str == yesterday.strftime(DATE_FORMAT):
date_str = 'Yesterday'
dates_print[date] = date_str
# add manga
manga = event.getManga()
if not manga in ordered_events[date]:
ordered_events[date][manga] = []
# add chapters
chapter = event.getChapter()
if not chapter in ordered_events[date][manga]:
ordered_events[date][manga].append(chapter)
dates = list(ordered_events.keys())
dates.sort()
dates.reverse()
# create the grid
grid = Gtk.Grid()
grid.set_orientation(Gtk.Orientation.VERTICAL)
grid.set_row_spacing(5)
for date in dates:
date_grid = Gtk.Grid()
date_grid.set_orientation(Gtk.Orientation.VERTICAL)
# add date label
label = Gtk.Label()
date_str = dates_print[date]
label.set_markup('<span weight="bold">' + date_str + '</span>')
label.set_justify(Gtk.Justification.LEFT)
label.set_alignment(0,0)
label.set_hexpand(True)
date_grid.add(label)
manga_events = ordered_events[date]
manga_list = list(manga_events.keys())
manga_list.sort()
for manga_name in manga_list:
chapters = manga_events[manga_name]
chapters = sorted( chapters, key = lambda chapter : ("000" + chapter)[-4:] if len(chapter) <= 4 else chapter )
chapters.reverse()
manga_grid = Gtk.Grid()
manga_grid.set_orientation(Gtk.Orientation.HORIZONTAL)
# create manga name label
name_label = Gtk.Label(manga_name)
name_label.set_ellipsize(Pango.EllipsizeMode.END)
name_label.set_justify(Gtk.Justification.LEFT)
name_label.set_alignment(0,0)
name_label.set_max_width_chars(MAX_LABEL_SIZE)
#name_label.set_hexpand(True)
# create chapter label
chapters_str = ', '.join(chapters)
chapters_label = Gtk.Label()
chapters_label.set_markup(': <span style="italic">' + chapters_str +'</span>')
chapters_label.set_ellipsize(Pango.EllipsizeMode.MIDDLE)
chapters_label.set_justify(Gtk.Justification.LEFT)
chapters_label.set_alignment(0,0)
#chapters_label.set_hexpand(True)
# add to manga grid
manga_grid.add(name_label)
manga_grid.add(chapters_label)
date_grid.add(manga_grid)
# add date grid to grid
grid.add(date_grid)
## add scroll
scroll = Gtk.ScrolledWindow()
scroll.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
scroll.add_with_viewport(grid)
scroll.set_vexpand(True)
self.main_grid.add(scroll)
def create(interface):
slm = ShowLatestWindow(interface)
slm.run()
return slm
| gpl-3.0 |
pidydx/grr | grr/gui/plugins/artifact_view_test.py | 2 | 5689 | #!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Test the artifact rendering interface."""
import os
from grr.gui import gui_test_lib
from grr.gui import runtests_test
from grr.lib import artifact
from grr.lib import artifact_registry
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import parsers
class TestCmdProcessor(parsers.CommandParser):
output_types = ["SoftwarePackage"]
supported_artifacts = ["TestCmdArtifact"]
class TestArtifactRender(gui_test_lib.GRRSeleniumTest):
"""Test the Cron view GUI."""
def _UploadCustomArtifacts(self):
artifact_registry.REGISTRY.ClearRegistry()
test_artifacts_file = os.path.join(config_lib.CONFIG["Test.data_dir"],
"artifacts", "test_artifacts.json")
with open(test_artifacts_file, "rb") as fd:
artifact.UploadArtifactYamlFile(fd.read(), token=self.token)
def _LoadSystemArtifacts(self):
artifact_registry.REGISTRY.ClearRegistry()
test_artifacts_file = os.path.join(config_lib.CONFIG["Test.data_dir"],
"artifacts", "test_artifacts.json")
artifact_registry.REGISTRY.AddFileSource(test_artifacts_file)
def setUp(self):
super(TestArtifactRender, self).setUp()
with self.ACLChecksDisabled():
self.client_id = self.SetupClients(1, system="linux")[0]
self.RequestAndGrantClientApproval(self.client_id)
def testArtifactRendering(self):
with self.ACLChecksDisabled():
self._LoadSystemArtifacts()
self.Open("/")
self.Type("client_query", self.client_id.Basename())
self.Click("client_query_submit")
self.WaitUntilEqual(self.client_id, self.GetText, "css=span[type=subject]")
# Choose client 1
self.Click("css=td:contains('%s')" % self.client_id.Basename())
# First screen should be the Host Information already.
self.WaitUntil(self.IsTextPresent, "Host-0")
self.Click("css=a[grrtarget='client.launchFlows']")
self.Click("css=#_Collectors")
self.assertEqual("ArtifactCollectorFlow",
self.GetText("link=ArtifactCollectorFlow"))
self.Click("link=ArtifactCollectorFlow")
self.WaitUntil(self.IsTextPresent, "Artifact list")
self.Click("css=grr-artifacts-list-form button:contains('All Platforms')")
self.Click("css=grr-artifacts-list-form li:contains('Linux')")
# Check search works. Note that test artifacts names are used (see
# test_data/artifacts/test_artifacts.json for details.
self.WaitUntil(self.IsTextPresent, "TestCmdArtifact")
self.WaitUntil(self.IsTextPresent, "TestFilesArtifact")
self.Type("css=grr-artifacts-list-form input[type=text]", u"Cmd")
self.WaitUntil(self.IsTextPresent, "TestCmdArtifact")
self.WaitUntilNot(self.IsTextPresent, "TestFilesArtifact")
# Check we can add to the list.
self.Click("css=grr-artifacts-list-form tr:contains('TestCmdArtifact')")
self.Click("css=grr-artifacts-list-form button:contains('Add')")
# Selected artifacts should be highlighted in bold.
self.WaitUntil(self.IsElementPresent, "css=grr-artifacts-list-form "
"strong:contains('TestCmdArtifact')")
# Check the artifact description loaded.
self.WaitUntil(self.IsTextPresent, "Test command artifact for dpkg.")
self.WaitUntil(self.IsTextPresent, "TestCmdProcessor")
def testSystemArtifactsAreNotMarkedInStartFlowForm(self):
with self.ACLChecksDisabled():
self._LoadSystemArtifacts()
self.Open("/#/clients/%s/launch-flow" % self.client_id.Basename())
self.Click("css=#_Collectors")
self.Click("link=ArtifactCollectorFlow")
self.WaitUntil(self.IsElementPresent, "css=*:contains('TestCmdArtifact')")
self.WaitUntilNot(self.IsElementPresent,
"css=span[title~='Custom Uploaded Artifact'] > i.fa-user")
def testCustomArtifactsAreMarkedInStartFlowForm(self):
with self.ACLChecksDisabled():
self._UploadCustomArtifacts()
self.Open("/#/clients/%s/launch-flow" % self.client_id.Basename())
self.Click("css=#_Collectors")
self.Click("link=ArtifactCollectorFlow")
self.WaitUntil(self.IsElementPresent, "css=*:contains('TestCmdArtifact') > "
"span[title~='Custom Uploaded Artifact'] > i.fa-user")
def testSystemArtifactsAreNotMarkedInFlowArguments(self):
with self.ACLChecksDisabled():
self._UploadCustomArtifacts()
self.Open("/#/clients/%s/launch-flow" % self.client_id.Basename())
self.Click("css=#_Collectors")
self.Click("link=ArtifactCollectorFlow")
self.DoubleClick(
"css=grr-artifacts-list-form tr:contains('TestCmdArtifact')")
self.Click("css=button.Launch")
self.WaitUntil(self.IsElementPresent,
"css=grr-artifact-name:contains('TestCmdArtifact')")
self.WaitUntilNot(self.IsElementPresent,
"css=span[title~='Custom Uploaded Artifact'] > i.fa-user")
def testCustomArtifactsAreMarkedInFlowArguments(self):
with self.ACLChecksDisabled():
self._UploadCustomArtifacts()
self.Open("/#/clients/%s/launch-flow" % self.client_id.Basename())
self.Click("css=#_Collectors")
self.Click("link=ArtifactCollectorFlow")
self.DoubleClick(
"css=grr-artifacts-list-form tr:contains('TestCmdArtifact')")
self.Click("css=button.Launch")
self.WaitUntil(self.IsElementPresent,
"css=grr-artifact-name:contains('TestCmdArtifact') "
"span[title~='Custom Uploaded Artifact'] > i.fa-user")
def main(argv):
# Run the full test suite
runtests_test.SeleniumTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 |
nvoron23/avos | openstack_dashboard/dashboards/project/data_processing/clusters/workflows/scale.py | 32 | 6747 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from openstack_dashboard.api import sahara as saharaclient
import openstack_dashboard.dashboards.project.data_processing. \
cluster_templates.workflows.create as clt_create_flow
import openstack_dashboard.dashboards.project.data_processing. \
clusters.workflows.create as cl_create_flow
from openstack_dashboard.dashboards.project.data_processing.utils \
import workflow_helpers
from saharaclient.api import base as api_base
LOG = logging.getLogger(__name__)
class NodeGroupsStep(clt_create_flow.ConfigureNodegroups):
pass
class ScaleCluster(cl_create_flow.ConfigureCluster,
workflow_helpers.StatusFormatMixin):
slug = "scale_cluster"
name = _("Scale Cluster")
finalize_button_name = _("Scale")
success_url = "horizon:project:data_processing.clusters:index"
default_steps = (NodeGroupsStep, )
def __init__(self, request, context_seed, entry_point, *args, **kwargs):
ScaleCluster._cls_registry = set([])
self.success_message = _("Scaled cluster successfully started.")
cluster_id = context_seed["cluster_id"]
try:
cluster = saharaclient.cluster_get(request, cluster_id)
plugin = cluster.plugin_name
hadoop_version = cluster.hadoop_version
# Initialize deletable node groups.
deletable = dict()
for group in cluster.node_groups:
deletable[group["name"]] = "false"
request.GET = request.GET.copy()
request.GET.update({
"cluster_id": cluster_id,
"plugin_name": plugin,
"hadoop_version": hadoop_version,
"deletable": deletable
})
super(ScaleCluster, self).__init__(request, context_seed,
entry_point, *args,
**kwargs)
# Initialize node groups.
for step in self.steps:
if not isinstance(step, clt_create_flow.ConfigureNodegroups):
continue
ng_action = step.action
template_ngs = cluster.node_groups
if 'forms_ids' in request.POST:
continue
ng_action.groups = []
for i, templ_ng in enumerate(template_ngs):
group_name = "group_name_%d" % i
template_id = "template_id_%d" % i
count = "count_%d" % i
serialized = "serialized_%d" % i
serialized_val = base64.urlsafe_b64encode(json.dumps(
workflow_helpers.clean_node_group(templ_ng)))
ng_action.groups.append({
"name": templ_ng["name"],
"template_id": templ_ng["node_group_template_id"],
"count": templ_ng["count"],
"id": i,
"deletable": "false",
"serialized": serialized_val
})
workflow_helpers.build_node_group_fields(ng_action,
group_name,
template_id,
count,
serialized)
except Exception:
exceptions.handle(request,
_("Unable to fetch cluster to scale"))
def format_status_message(self, message):
# Scaling form requires special handling because it has no Cluster name
# in it's context
error_description = getattr(self, 'error_description', None)
if error_description:
return error_description
else:
return self.success_message
def handle(self, request, context):
cluster_id = request.GET["cluster_id"]
try:
cluster = saharaclient.cluster_get(request, cluster_id)
existing_node_groups = set([])
for ng in cluster.node_groups:
existing_node_groups.add(ng["name"])
scale_object = dict()
ids = json.loads(context["ng_forms_ids"])
for _id in ids:
name = context["ng_group_name_%s" % _id]
template_id = context["ng_template_id_%s" % _id]
count = context["ng_count_%s" % _id]
if name not in existing_node_groups:
if "add_node_groups" not in scale_object:
scale_object["add_node_groups"] = []
scale_object["add_node_groups"].append(
{"name": name,
"node_group_template_id": template_id,
"count": int(count)})
else:
old_count = None
for ng in cluster.node_groups:
if name == ng["name"]:
old_count = ng["count"]
break
if old_count != count:
if "resize_node_groups" not in scale_object:
scale_object["resize_node_groups"] = []
scale_object["resize_node_groups"].append(
{"name": name,
"count": int(count)}
)
except Exception:
scale_object = {}
exceptions.handle(request,
_("Unable to fetch cluster to scale."))
try:
saharaclient.cluster_scale(request, cluster_id, scale_object)
return True
except api_base.APIException as e:
self.error_description = str(e)
return False
except Exception:
exceptions.handle(request,
_("Scale cluster operation failed"))
return False
| apache-2.0 |
albertoconnor/website | newsletter/migrations/0001_initial.py | 2 | 2249 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
import modelcluster.fields
import wagtail.wagtailcore.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),
('articles', '0024_auto_20150722_1928'),
('images', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='NewsletterArticleLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('override_text', wagtail.wagtailcore.fields.RichTextField(default='', help_text='Text to describe article.', blank=True)),
('article', models.ForeignKey(related_name='newsletter_links', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='articles.ArticlePage', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='NewsletterPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('issue_date', models.DateField(auto_now=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.AddField(
model_name='newsletterarticlelink',
name='newsletter',
field=modelcluster.fields.ParentalKey(related_name='article_links', to='newsletter.NewsletterPage'),
),
migrations.AddField(
model_name='newsletterarticlelink',
name='override_image',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='images.AttributedImage', help_text='Circular Image to accompany article if article image not selected', null=True),
),
]
| mit |
gonzolino/heat | heat/engine/clients/os/designate.py | 4 | 3712 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designateclient import exceptions
from designateclient import v1 as client
from designateclient.v1 import domains
from designateclient.v1 import records
from heat.common import exception as heat_exception
from heat.engine.clients import client_plugin
from heat.engine import constraints
CLIENT_NAME = 'designate'
class DesignateClientPlugin(client_plugin.ClientPlugin):
exceptions_module = [exceptions]
service_types = [DNS] = ['dns']
def _create(self):
args = self._get_client_args(service_name=CLIENT_NAME,
service_type=self.DNS)
return client.Client(auth_url=args['auth_url'],
project_id=args['project_id'],
token=args['token'](),
endpoint=args['os_endpoint'],
cacert=args['cacert'],
insecure=args['insecure'])
def is_not_found(self, ex):
return isinstance(ex, exceptions.NotFound)
def get_domain_id(self, domain_id_or_name):
try:
domain_obj = self.client().domains.get(domain_id_or_name)
return domain_obj.id
except exceptions.NotFound:
for domain in self.client().domains.list():
if domain.name == domain_id_or_name:
return domain.id
raise heat_exception.EntityNotFound(entity='Designate Domain',
name=domain_id_or_name)
def domain_create(self, **kwargs):
domain = domains.Domain(**kwargs)
return self.client().domains.create(domain)
def domain_update(self, **kwargs):
# Designate mandates to pass the Domain object with updated properties
domain = self.client().domains.get(kwargs['id'])
for key in kwargs.keys():
setattr(domain, key, kwargs[key])
return self.client().domains.update(domain)
def record_create(self, **kwargs):
domain_id = self.get_domain_id(kwargs.pop('domain'))
record = records.Record(**kwargs)
return self.client().records.create(domain_id, record)
def record_update(self, **kwargs):
# Designate mandates to pass the Record object with updated properties
domain_id = self.get_domain_id(kwargs.pop('domain'))
record = self.client().records.get(domain_id, kwargs['id'])
for key in kwargs.keys():
setattr(record, key, kwargs[key])
return self.client().records.update(record.domain_id, record)
def record_delete(self, **kwargs):
domain_id = self.get_domain_id(kwargs.pop('domain'))
return self.client().records.delete(domain_id,
kwargs.pop('id'))
def record_show(self, **kwargs):
domain_id = self.get_domain_id(kwargs.pop('domain'))
return self.client().records.get(domain_id,
kwargs.pop('id'))
class DesignateDomainConstraint(constraints.BaseCustomConstraint):
resource_client_name = CLIENT_NAME
resource_getter_name = 'get_domain_id'
| apache-2.0 |
hashmaparraylist/shadowsocks | shadowsocks/crypto/util.py | 1032 | 4287 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import logging
def find_library_nt(name):
# modified from ctypes.util
# ctypes.util.find_library just returns first result he found
# but we want to try them all
# because on Windows, users may have both 32bit and 64bit version installed
results = []
for directory in os.environ['PATH'].split(os.pathsep):
fname = os.path.join(directory, name)
if os.path.isfile(fname):
results.append(fname)
if fname.lower().endswith(".dll"):
continue
fname = fname + ".dll"
if os.path.isfile(fname):
results.append(fname)
return results
def find_library(possible_lib_names, search_symbol, library_name):
import ctypes.util
from ctypes import CDLL
paths = []
if type(possible_lib_names) not in (list, tuple):
possible_lib_names = [possible_lib_names]
lib_names = []
for lib_name in possible_lib_names:
lib_names.append(lib_name)
lib_names.append('lib' + lib_name)
for name in lib_names:
if os.name == "nt":
paths.extend(find_library_nt(name))
else:
path = ctypes.util.find_library(name)
if path:
paths.append(path)
if not paths:
# We may get here when find_library fails because, for example,
# the user does not have sufficient privileges to access those
# tools underlying find_library on linux.
import glob
for name in lib_names:
patterns = [
'/usr/local/lib*/lib%s.*' % name,
'/usr/lib*/lib%s.*' % name,
'lib%s.*' % name,
'%s.dll' % name]
for pat in patterns:
files = glob.glob(pat)
if files:
paths.extend(files)
for path in paths:
try:
lib = CDLL(path)
if hasattr(lib, search_symbol):
logging.info('loading %s from %s', library_name, path)
return lib
else:
logging.warn('can\'t find symbol %s in %s', search_symbol,
path)
except Exception:
pass
return None
def run_cipher(cipher, decipher):
from os import urandom
import random
import time
BLOCK_SIZE = 16384
rounds = 1 * 1024
plain = urandom(BLOCK_SIZE * rounds)
results = []
pos = 0
print('test start')
start = time.time()
while pos < len(plain):
l = random.randint(100, 32768)
c = cipher.update(plain[pos:pos + l])
results.append(c)
pos += l
pos = 0
c = b''.join(results)
results = []
while pos < len(plain):
l = random.randint(100, 32768)
results.append(decipher.update(c[pos:pos + l]))
pos += l
end = time.time()
print('speed: %d bytes/s' % (BLOCK_SIZE * rounds / (end - start)))
assert b''.join(results) == plain
def test_find_library():
assert find_library('c', 'strcpy', 'libc') is not None
assert find_library(['c'], 'strcpy', 'libc') is not None
assert find_library(('c',), 'strcpy', 'libc') is not None
assert find_library(('crypto', 'eay32'), 'EVP_CipherUpdate',
'libcrypto') is not None
assert find_library('notexist', 'strcpy', 'libnotexist') is None
assert find_library('c', 'symbol_not_exist', 'c') is None
assert find_library(('notexist', 'c', 'crypto', 'eay32'),
'EVP_CipherUpdate', 'libc') is not None
if __name__ == '__main__':
test_find_library()
| apache-2.0 |
keishi/chromium | third_party/closure_linter/closure_linter/not_strict_test.py | 142 | 2332 | #!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gjslint --nostrict.
Tests errors that can be thrown by gjslint when not in strict mode.
"""
import os
import sys
import unittest
import gflags as flags
import unittest as googletest
from closure_linter import checker
from closure_linter import errors
from closure_linter.common import filetestcase
_RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = False
flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
'limited_doc_checks.js')
# List of files under testdata to test.
# We need to list files explicitly since pyglib can't list directories.
_TEST_FILES = [
'not_strict.js'
]
class GJsLintTestSuite(unittest.TestSuite):
"""Test suite to run a GJsLintTest for each of several files.
If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
testdata to test. Otherwise, _TEST_FILES is used.
"""
def __init__(self, tests=()):
unittest.TestSuite.__init__(self, tests)
argv = sys.argv and sys.argv[1:] or []
if argv:
test_files = argv
else:
test_files = _TEST_FILES
for test_file in test_files:
resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
self.addTest(filetestcase.AnnotatedFileTestCase(resource_path,
checker.GJsLintRunner(),
errors.ByName))
if __name__ == '__main__':
# Don't let main parse args; it happens in the TestSuite.
googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')
| bsd-3-clause |
catapult-project/catapult-csm | third_party/html5lib-python/html5lib/tests/test_treeadapters.py | 451 | 1852 | from __future__ import absolute_import, division, unicode_literals
from . import support # flake8: noqa
import html5lib
from html5lib.treeadapters import sax
from html5lib.treewalkers import getTreeWalker
def test_to_sax():
handler = support.TracingSaxHandler()
tree = html5lib.parse("""<html xml:lang="en">
<title>Directory Listing</title>
<a href="/"><b/></p>
""", treebuilder="etree")
walker = getTreeWalker("etree")
sax.to_sax(walker(tree), handler)
expected = [
'startDocument',
('startElementNS', ('http://www.w3.org/1999/xhtml', 'html'),
'html', {(None, 'xml:lang'): 'en'}),
('startElementNS', ('http://www.w3.org/1999/xhtml', 'head'), 'head', {}),
('startElementNS', ('http://www.w3.org/1999/xhtml', 'title'), 'title', {}),
('characters', 'Directory Listing'),
('endElementNS', ('http://www.w3.org/1999/xhtml', 'title'), 'title'),
('characters', '\n '),
('endElementNS', ('http://www.w3.org/1999/xhtml', 'head'), 'head'),
('startElementNS', ('http://www.w3.org/1999/xhtml', 'body'), 'body', {}),
('startElementNS', ('http://www.w3.org/1999/xhtml', 'a'), 'a', {(None, 'href'): '/'}),
('startElementNS', ('http://www.w3.org/1999/xhtml', 'b'), 'b', {}),
('startElementNS', ('http://www.w3.org/1999/xhtml', 'p'), 'p', {}),
('endElementNS', ('http://www.w3.org/1999/xhtml', 'p'), 'p'),
('characters', '\n '),
('endElementNS', ('http://www.w3.org/1999/xhtml', 'b'), 'b'),
('endElementNS', ('http://www.w3.org/1999/xhtml', 'a'), 'a'),
('endElementNS', ('http://www.w3.org/1999/xhtml', 'body'), 'body'),
('endElementNS', ('http://www.w3.org/1999/xhtml', 'html'), 'html'),
'endDocument',
]
assert expected == handler.visited
| bsd-3-clause |
mchristopher/PokemonGo-DesktopMap | app/pywin/Lib/cmd.py | 145 | 15026 | """A generic class to build line-oriented command interpreters.
Interpreters constructed with this class obey the following conventions:
1. End of file on input is processed as the command 'EOF'.
2. A command is parsed out of each line by collecting the prefix composed
of characters in the identchars member.
3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
is passed a single argument consisting of the remainder of the line.
4. Typing an empty line repeats the last command. (Actually, it calls the
method `emptyline', which may be overridden in a subclass.)
5. There is a predefined `help' method. Given an argument `topic', it
calls the command `help_topic'. With no arguments, it lists all topics
with defined help_ functions, broken into up to three topics; documented
commands, miscellaneous help topics, and undocumented commands.
6. The command '?' is a synonym for `help'. The command '!' is a synonym
for `shell', if a do_shell method exists.
7. If completion is enabled, completing commands will be done automatically,
and completing of commands args is done by calling complete_foo() with
arguments text, line, begidx, endidx. text is string we are matching
against, all returned matches must begin with it. line is the current
input line (lstripped), begidx and endidx are the beginning and end
indexes of the text being matched, which could be used to provide
different completion depending upon which position the argument is in.
The `default' method may be overridden to intercept commands for which there
is no do_ method.
The `completedefault' method may be overridden to intercept completions for
commands that have no complete_ method.
The data member `self.ruler' sets the character used to draw separator lines
in the help messages. If empty, no ruler line is drawn. It defaults to "=".
If the value of `self.intro' is nonempty when the cmdloop method is called,
it is printed out on interpreter startup. This value may be overridden
via an optional argument to the cmdloop() method.
The data members `self.doc_header', `self.misc_header', and
`self.undoc_header' set the headers used for the help function's
listings of documented functions, miscellaneous topics, and undocumented
functions respectively.
These interpreters use raw_input; thus, if the readline module is loaded,
they automatically support Emacs-like command history and editing features.
"""
import string
__all__ = ["Cmd"]
PROMPT = '(Cmd) '
IDENTCHARS = string.ascii_letters + string.digits + '_'
class Cmd:
"""A simple framework for writing line-oriented command interpreters.
These are often useful for test harnesses, administrative tools, and
prototypes that will later be wrapped in a more sophisticated interface.
A Cmd instance or subclass instance is a line-oriented interpreter
framework. There is no good reason to instantiate Cmd itself; rather,
it's useful as a superclass of an interpreter class you define yourself
in order to inherit Cmd's methods and encapsulate action methods.
"""
prompt = PROMPT
identchars = IDENTCHARS
ruler = '='
lastcmd = ''
intro = None
doc_leader = ""
doc_header = "Documented commands (type help <topic>):"
misc_header = "Miscellaneous help topics:"
undoc_header = "Undocumented commands:"
nohelp = "*** No help on %s"
use_rawinput = 1
def __init__(self, completekey='tab', stdin=None, stdout=None):
"""Instantiate a line-oriented interpreter framework.
The optional argument 'completekey' is the readline name of a
completion key; it defaults to the Tab key. If completekey is
not None and the readline module is available, command completion
is done automatically. The optional arguments stdin and stdout
specify alternate input and output file objects; if not specified,
sys.stdin and sys.stdout are used.
"""
import sys
if stdin is not None:
self.stdin = stdin
else:
self.stdin = sys.stdin
if stdout is not None:
self.stdout = stdout
else:
self.stdout = sys.stdout
self.cmdqueue = []
self.completekey = completekey
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey+": complete")
except ImportError:
pass
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro)+"\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
line = raw_input(self.prompt)
except EOFError:
line = 'EOF'
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line.rstrip('\r\n')
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass
def precmd(self, line):
"""Hook method executed just before the command line is
interpreted, but after the input prompt is generated and issued.
"""
return line
def postcmd(self, stop, line):
"""Hook method executed just after a command dispatch is finished."""
return stop
def preloop(self):
"""Hook method executed once when the cmdloop() method is called."""
pass
def postloop(self):
"""Hook method executed once when the cmdloop() method is about to
return.
"""
pass
def parseline(self, line):
"""Parse the line into a command name and a string containing
the arguments. Returns a tuple containing (command, args, line).
'command' and 'args' may be None if the line couldn't be parsed.
"""
line = line.strip()
if not line:
return None, None, line
elif line[0] == '?':
line = 'help ' + line[1:]
elif line[0] == '!':
if hasattr(self, 'do_shell'):
line = 'shell ' + line[1:]
else:
return None, None, line
i, n = 0, len(line)
while i < n and line[i] in self.identchars: i = i+1
cmd, arg = line[:i], line[i:].strip()
return cmd, arg, line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
This may be overridden, but should not normally need to be;
see the precmd() and postcmd() methods for useful execution hooks.
The return value is a flag indicating whether interpretation of
commands by the interpreter should stop.
"""
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if line == 'EOF' :
self.lastcmd = ''
if cmd == '':
return self.default(line)
else:
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
return self.default(line)
return func(arg)
def emptyline(self):
"""Called when an empty line is entered in response to the prompt.
If this method is not overridden, it repeats the last nonempty
command entered.
"""
if self.lastcmd:
return self.onecmd(self.lastcmd)
def default(self, line):
"""Called on an input line when the command prefix is not recognized.
If this method is not overridden, it prints an error message and
returns.
"""
self.stdout.write('*** Unknown syntax: %s\n'%line)
def completedefault(self, *ignored):
"""Method called to complete an input line when no command-specific
complete_*() method is available.
By default, it returns an empty list.
"""
return []
def completenames(self, text, *ignored):
dotext = 'do_'+text
return [a[3:] for a in self.get_names() if a.startswith(dotext)]
def complete(self, text, state):
"""Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
if state == 0:
import readline
origline = readline.get_line_buffer()
line = origline.lstrip()
stripped = len(origline) - len(line)
begidx = readline.get_begidx() - stripped
endidx = readline.get_endidx() - stripped
if begidx>0:
cmd, args, foo = self.parseline(line)
if cmd == '':
compfunc = self.completedefault
else:
try:
compfunc = getattr(self, 'complete_' + cmd)
except AttributeError:
compfunc = self.completedefault
else:
compfunc = self.completenames
self.completion_matches = compfunc(text, line, begidx, endidx)
try:
return self.completion_matches[state]
except IndexError:
return None
def get_names(self):
# This method used to pull in base class attributes
# at a time dir() didn't do it yet.
return dir(self.__class__)
def complete_help(self, *args):
commands = set(self.completenames(*args))
topics = set(a[5:] for a in self.get_names()
if a.startswith('help_' + args[0]))
return list(commands | topics)
def do_help(self, arg):
'List available commands with "help" or detailed help with "help cmd".'
if arg:
# XXX check arg syntax
try:
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
doc=getattr(self, 'do_' + arg).__doc__
if doc:
self.stdout.write("%s\n"%str(doc))
return
except AttributeError:
pass
self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
return
func()
else:
names = self.get_names()
cmds_doc = []
cmds_undoc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
if cmd in help:
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
self.stdout.write("%s\n"%str(self.doc_leader))
self.print_topics(self.doc_header, cmds_doc, 15,80)
self.print_topics(self.misc_header, help.keys(),15,80)
self.print_topics(self.undoc_header, cmds_undoc, 15,80)
def print_topics(self, header, cmds, cmdlen, maxcol):
if cmds:
self.stdout.write("%s\n"%str(header))
if self.ruler:
self.stdout.write("%s\n"%str(self.ruler * len(header)))
self.columnize(cmds, maxcol-1)
self.stdout.write("\n")
def columnize(self, list, displaywidth=80):
"""Display a list of strings as a compact set of columns.
Each column is only as wide as necessary.
Columns are separated by two spaces (one was not legible enough).
"""
if not list:
self.stdout.write("<empty>\n")
return
nonstrings = [i for i in range(len(list))
if not isinstance(list[i], str)]
if nonstrings:
raise TypeError, ("list[i] not a string for i in %s" %
", ".join(map(str, nonstrings)))
size = len(list)
if size == 1:
self.stdout.write('%s\n'%str(list[0]))
return
# Try every row count from 1 upwards
for nrows in range(1, len(list)):
ncols = (size+nrows-1) // nrows
colwidths = []
totwidth = -2
for col in range(ncols):
colwidth = 0
for row in range(nrows):
i = row + nrows*col
if i >= size:
break
x = list[i]
colwidth = max(colwidth, len(x))
colwidths.append(colwidth)
totwidth += colwidth + 2
if totwidth > displaywidth:
break
if totwidth <= displaywidth:
break
else:
nrows = len(list)
ncols = 1
colwidths = [0]
for row in range(nrows):
texts = []
for col in range(ncols):
i = row + nrows*col
if i >= size:
x = ""
else:
x = list[i]
texts.append(x)
while texts and not texts[-1]:
del texts[-1]
for col in range(len(texts)):
texts[col] = texts[col].ljust(colwidths[col])
self.stdout.write("%s\n"%str(" ".join(texts)))
| mit |
dslomov/intellij-community | python/helpers/pydev/_pydev_imports_tipper.py | 52 | 12642 | import os.path
import inspect
import sys
from _pydev_tipper_common import DoFind
try:
xrange
except:
xrange = range
#completion types.
TYPE_IMPORT = '0'
TYPE_CLASS = '1'
TYPE_FUNCTION = '2'
TYPE_ATTR = '3'
TYPE_BUILTIN = '4'
TYPE_PARAM = '5'
def _imp(name, log=None):
try:
return __import__(name)
except:
if '.' in name:
sub = name[0:name.rfind('.')]
if log is not None:
log.AddContent('Unable to import', name, 'trying with', sub)
log.AddException()
return _imp(sub, log)
else:
s = 'Unable to import module: %s - sys.path: %s' % (str(name), sys.path)
if log is not None:
log.AddContent(s)
log.AddException()
raise ImportError(s)
IS_IPY = False
if sys.platform == 'cli':
IS_IPY = True
_old_imp = _imp
def _imp(name, log=None):
#We must add a reference in clr for .Net
import clr #@UnresolvedImport
initial_name = name
while '.' in name:
try:
clr.AddReference(name)
break #If it worked, that's OK.
except:
name = name[0:name.rfind('.')]
else:
try:
clr.AddReference(name)
except:
pass #That's OK (not dot net module).
return _old_imp(initial_name, log)
def GetFile(mod):
f = None
try:
f = inspect.getsourcefile(mod) or inspect.getfile(mod)
except:
if hasattr(mod, '__file__'):
f = mod.__file__
if f.lower(f[-4:]) in ['.pyc', '.pyo']:
filename = f[:-4] + '.py'
if os.path.exists(filename):
f = filename
return f
def Find(name, log=None):
f = None
mod = _imp(name, log)
parent = mod
foundAs = ''
if inspect.ismodule(mod):
f = GetFile(mod)
components = name.split('.')
old_comp = None
for comp in components[1:]:
try:
#this happens in the following case:
#we have mx.DateTime.mxDateTime.mxDateTime.pyd
#but after importing it, mx.DateTime.mxDateTime shadows access to mxDateTime.pyd
mod = getattr(mod, comp)
except AttributeError:
if old_comp != comp:
raise
if inspect.ismodule(mod):
f = GetFile(mod)
else:
if len(foundAs) > 0:
foundAs = foundAs + '.'
foundAs = foundAs + comp
old_comp = comp
return f, mod, parent, foundAs
def Search(data):
'''@return file, line, col
'''
data = data.replace('\n', '')
if data.endswith('.'):
data = data.rstrip('.')
f, mod, parent, foundAs = Find(data)
try:
return DoFind(f, mod), foundAs
except:
return DoFind(f, parent), foundAs
def GenerateTip(data, log=None):
data = data.replace('\n', '')
if data.endswith('.'):
data = data.rstrip('.')
f, mod, parent, foundAs = Find(data, log)
#print_ >> open('temp.txt', 'w'), f
tips = GenerateImportsTipForModule(mod)
return f, tips
def CheckChar(c):
if c == '-' or c == '.':
return '_'
return c
def GenerateImportsTipForModule(obj_to_complete, dirComps=None, getattr=getattr, filter=lambda name:True):
'''
@param obj_to_complete: the object from where we should get the completions
@param dirComps: if passed, we should not 'dir' the object and should just iterate those passed as a parameter
@param getattr: the way to get a given object from the obj_to_complete (used for the completer)
@param filter: a callable that receives the name and decides if it should be appended or not to the results
@return: list of tuples, so that each tuple represents a completion with:
name, doc, args, type (from the TYPE_* constants)
'''
ret = []
if dirComps is None:
dirComps = dir(obj_to_complete)
if hasattr(obj_to_complete, '__dict__'):
dirComps.append('__dict__')
if hasattr(obj_to_complete, '__class__'):
dirComps.append('__class__')
getCompleteInfo = True
if len(dirComps) > 1000:
#ok, we don't want to let our users wait forever...
#no complete info for you...
getCompleteInfo = False
dontGetDocsOn = (float, int, str, tuple, list)
for d in dirComps:
if d is None:
continue
if not filter(d):
continue
args = ''
try:
try:
obj = getattr(obj_to_complete.__class__, d)
except:
obj = getattr(obj_to_complete, d)
except: #just ignore and get it without additional info
ret.append((d, '', args, TYPE_BUILTIN))
else:
if getCompleteInfo:
try:
retType = TYPE_BUILTIN
#check if we have to get docs
getDoc = True
for class_ in dontGetDocsOn:
if isinstance(obj, class_):
getDoc = False
break
doc = ''
if getDoc:
#no need to get this info... too many constants are defined and
#makes things much slower (passing all that through sockets takes quite some time)
try:
doc = inspect.getdoc(obj)
if doc is None:
doc = ''
except: #may happen on jython when checking java classes (so, just ignore it)
doc = ''
if inspect.ismethod(obj) or inspect.isbuiltin(obj) or inspect.isfunction(obj) or inspect.isroutine(obj):
try:
args, vargs, kwargs, defaults = inspect.getargspec(obj)
r = ''
for a in (args):
if len(r) > 0:
r = r + ', '
r = r + str(a)
args = '(%s)' % (r)
except TypeError:
#ok, let's see if we can get the arguments from the doc
args = '()'
try:
found = False
if len(doc) > 0:
if IS_IPY:
#Handle case where we have the situation below
#sort(self, object cmp, object key)
#sort(self, object cmp, object key, bool reverse)
#sort(self)
#sort(self, object cmp)
#Or: sort(self: list, cmp: object, key: object)
#sort(self: list, cmp: object, key: object, reverse: bool)
#sort(self: list)
#sort(self: list, cmp: object)
if hasattr(obj, '__name__'):
name = obj.__name__+'('
#Fix issue where it was appearing sort(aa)sort(bb)sort(cc) in the same line.
lines = doc.splitlines()
if len(lines) == 1:
c = doc.count(name)
if c > 1:
doc = ('\n'+name).join(doc.split(name))
major = ''
for line in doc.splitlines():
if line.startswith(name) and line.endswith(')'):
if len(line) > len(major):
major = line
if major:
args = major[major.index('('):]
found = True
if not found:
i = doc.find('->')
if i < 0:
i = doc.find('--')
if i < 0:
i = doc.find('\n')
if i < 0:
i = doc.find('\r')
if i > 0:
s = doc[0:i]
s = s.strip()
#let's see if we have a docstring in the first line
if s[-1] == ')':
start = s.find('(')
if start >= 0:
end = s.find('[')
if end <= 0:
end = s.find(')')
if end <= 0:
end = len(s)
args = s[start:end]
if not args[-1] == ')':
args = args + ')'
#now, get rid of unwanted chars
l = len(args) - 1
r = []
for i in xrange(len(args)):
if i == 0 or i == l:
r.append(args[i])
else:
r.append(CheckChar(args[i]))
args = ''.join(r)
if IS_IPY:
if args.startswith('(self:'):
i = args.find(',')
if i >= 0:
args = '(self'+args[i:]
else:
args = '(self)'
i = args.find(')')
if i > 0:
args = args[:i+1]
except:
pass
retType = TYPE_FUNCTION
elif inspect.isclass(obj):
retType = TYPE_CLASS
elif inspect.ismodule(obj):
retType = TYPE_IMPORT
else:
retType = TYPE_ATTR
#add token and doc to return - assure only strings.
ret.append((d, doc, args, retType))
except: #just ignore and get it without aditional info
ret.append((d, '', args, TYPE_BUILTIN))
else: #getCompleteInfo == False
if inspect.ismethod(obj) or inspect.isbuiltin(obj) or inspect.isfunction(obj) or inspect.isroutine(obj):
retType = TYPE_FUNCTION
elif inspect.isclass(obj):
retType = TYPE_CLASS
elif inspect.ismodule(obj):
retType = TYPE_IMPORT
else:
retType = TYPE_ATTR
#ok, no complete info, let's try to do this as fast and clean as possible
#so, no docs for this kind of information, only the signatures
ret.append((d, '', str(args), retType))
return ret
| apache-2.0 |
ifduyue/django | tests/staticfiles_tests/cases.py | 39 | 4395 | import codecs
import os
import shutil
import tempfile
from django.conf import settings
from django.core.management import call_command
from django.template import Context, Template
from django.test import SimpleTestCase, override_settings
from .settings import TEST_SETTINGS
class BaseStaticFilesMixin:
"""
Test case with a couple utility assertions.
"""
def assertFileContains(self, filepath, text):
self.assertIn(
text,
self._get_file(filepath),
"'%s' not in '%s'" % (text, filepath),
)
def assertFileNotFound(self, filepath):
with self.assertRaises(IOError):
self._get_file(filepath)
def render_template(self, template, **kwargs):
if isinstance(template, str):
template = Template(template)
return template.render(Context(**kwargs)).strip()
def static_template_snippet(self, path, asvar=False):
if asvar:
return "{%% load static from staticfiles %%}{%% static '%s' as var %%}{{ var }}" % path
return "{%% load static from staticfiles %%}{%% static '%s' %%}" % path
def assertStaticRenders(self, path, result, asvar=False, **kwargs):
template = self.static_template_snippet(path, asvar)
self.assertEqual(self.render_template(template, **kwargs), result)
def assertStaticRaises(self, exc, path, result, asvar=False, **kwargs):
with self.assertRaises(exc):
self.assertStaticRenders(path, result, **kwargs)
@override_settings(**TEST_SETTINGS)
class StaticFilesTestCase(BaseStaticFilesMixin, SimpleTestCase):
pass
@override_settings(**TEST_SETTINGS)
class CollectionTestCase(BaseStaticFilesMixin, SimpleTestCase):
"""
Tests shared by all file finding features (collectstatic,
findstatic, and static serve view).
This relies on the asserts defined in BaseStaticFilesTestCase, but
is separated because some test cases need those asserts without
all these tests.
"""
def setUp(self):
super().setUp()
temp_dir = tempfile.mkdtemp()
# Override the STATIC_ROOT for all tests from setUp to tearDown
# rather than as a context manager
self.patched_settings = self.settings(STATIC_ROOT=temp_dir)
self.patched_settings.enable()
self.run_collectstatic()
# Same comment as in runtests.teardown.
self.addCleanup(shutil.rmtree, temp_dir)
def tearDown(self):
self.patched_settings.disable()
super().tearDown()
def run_collectstatic(self, *, verbosity=0, **kwargs):
call_command('collectstatic', interactive=False, verbosity=verbosity,
ignore_patterns=['*.ignoreme'], **kwargs)
def _get_file(self, filepath):
assert filepath, 'filepath is empty.'
filepath = os.path.join(settings.STATIC_ROOT, filepath)
with codecs.open(filepath, "r", "utf-8") as f:
return f.read()
class TestDefaults:
"""
A few standard test cases.
"""
def test_staticfiles_dirs(self):
"""
Can find a file in a STATICFILES_DIRS directory.
"""
self.assertFileContains('test.txt', 'Can we find')
self.assertFileContains(os.path.join('prefix', 'test.txt'), 'Prefix')
def test_staticfiles_dirs_subdir(self):
"""
Can find a file in a subdirectory of a STATICFILES_DIRS
directory.
"""
self.assertFileContains('subdir/test.txt', 'Can we find')
def test_staticfiles_dirs_priority(self):
"""
File in STATICFILES_DIRS has priority over file in app.
"""
self.assertFileContains('test/file.txt', 'STATICFILES_DIRS')
def test_app_files(self):
"""
Can find a file in an app static/ directory.
"""
self.assertFileContains('test/file1.txt', 'file1 in the app dir')
def test_nonascii_filenames(self):
"""
Can find a file with non-ASCII character in an app static/ directory.
"""
self.assertFileContains('test/⊗.txt', '⊗ in the app dir')
def test_camelcase_filenames(self):
"""
Can find a file with capital letters.
"""
self.assertFileContains('test/camelCase.txt', 'camelCase')
def test_filename_with_percent_sign(self):
self.assertFileContains('test/%2F.txt', '%2F content')
| bsd-3-clause |
Gadal/sympy | sympy/concrete/tests/test_delta.py | 87 | 23654 | from sympy.concrete import Sum
from sympy.concrete.delta import deltaproduct as dp, deltasummation as ds
from sympy.core import Eq, S, symbols, oo
from sympy.functions import KroneckerDelta as KD, Piecewise, piecewise_fold
from sympy.logic import And
i, j, k, l, m = symbols("i j k l m", integer=True, finite=True)
x, y = symbols("x y", commutative=False)
def test_deltaproduct_trivial():
assert dp(x, (j, 1, 0)) == 1
assert dp(x, (j, 1, 3)) == x**3
assert dp(x + y, (j, 1, 3)) == (x + y)**3
assert dp(x*y, (j, 1, 3)) == (x*y)**3
assert dp(KD(i, j), (k, 1, 3)) == KD(i, j)
assert dp(x*KD(i, j), (k, 1, 3)) == x**3*KD(i, j)
assert dp(x*y*KD(i, j), (k, 1, 3)) == (x*y)**3*KD(i, j)
def test_deltaproduct_basic():
assert dp(KD(i, j), (j, 1, 3)) == 0
assert dp(KD(i, j), (j, 1, 1)) == KD(i, 1)
assert dp(KD(i, j), (j, 2, 2)) == KD(i, 2)
assert dp(KD(i, j), (j, 3, 3)) == KD(i, 3)
assert dp(KD(i, j), (j, 1, k)) == KD(i, 1)*KD(k, 1) + KD(k, 0)
assert dp(KD(i, j), (j, k, 3)) == KD(i, 3)*KD(k, 3) + KD(k, 4)
assert dp(KD(i, j), (j, k, l)) == KD(i, l)*KD(k, l) + KD(k, l + 1)
def test_deltaproduct_mul_x_kd():
assert dp(x*KD(i, j), (j, 1, 3)) == 0
assert dp(x*KD(i, j), (j, 1, 1)) == x*KD(i, 1)
assert dp(x*KD(i, j), (j, 2, 2)) == x*KD(i, 2)
assert dp(x*KD(i, j), (j, 3, 3)) == x*KD(i, 3)
assert dp(x*KD(i, j), (j, 1, k)) == x*KD(i, 1)*KD(k, 1) + KD(k, 0)
assert dp(x*KD(i, j), (j, k, 3)) == x*KD(i, 3)*KD(k, 3) + KD(k, 4)
assert dp(x*KD(i, j), (j, k, l)) == x*KD(i, l)*KD(k, l) + KD(k, l + 1)
def test_deltaproduct_mul_add_x_y_kd():
assert dp((x + y)*KD(i, j), (j, 1, 3)) == 0
assert dp((x + y)*KD(i, j), (j, 1, 1)) == (x + y)*KD(i, 1)
assert dp((x + y)*KD(i, j), (j, 2, 2)) == (x + y)*KD(i, 2)
assert dp((x + y)*KD(i, j), (j, 3, 3)) == (x + y)*KD(i, 3)
assert dp((x + y)*KD(i, j), (j, 1, k)) == \
(x + y)*KD(i, 1)*KD(k, 1) + KD(k, 0)
assert dp((x + y)*KD(i, j), (j, k, 3)) == \
(x + y)*KD(i, 3)*KD(k, 3) + KD(k, 4)
assert dp((x + y)*KD(i, j), (j, k, l)) == \
(x + y)*KD(i, l)*KD(k, l) + KD(k, l + 1)
def test_deltaproduct_add_kd_kd():
assert dp(KD(i, k) + KD(j, k), (k, 1, 3)) == 0
assert dp(KD(i, k) + KD(j, k), (k, 1, 1)) == KD(i, 1) + KD(j, 1)
assert dp(KD(i, k) + KD(j, k), (k, 2, 2)) == KD(i, 2) + KD(j, 2)
assert dp(KD(i, k) + KD(j, k), (k, 3, 3)) == KD(i, 3) + KD(j, 3)
assert dp(KD(i, k) + KD(j, k), (k, 1, l)) == KD(l, 0) + \
KD(i, 1)*KD(l, 1) + KD(j, 1)*KD(l, 1) + \
KD(i, 1)*KD(j, 2)*KD(l, 2) + KD(j, 1)*KD(i, 2)*KD(l, 2)
assert dp(KD(i, k) + KD(j, k), (k, l, 3)) == KD(l, 4) + \
KD(i, 3)*KD(l, 3) + KD(j, 3)*KD(l, 3) + \
KD(i, 2)*KD(j, 3)*KD(l, 2) + KD(i, 3)*KD(j, 2)*KD(l, 2)
assert dp(KD(i, k) + KD(j, k), (k, l, m)) == KD(l, m + 1) + \
KD(i, m)*KD(l, m) + KD(j, m)*KD(l, m) + \
KD(i, m)*KD(j, m - 1)*KD(l, m - 1) + KD(i, m - 1)*KD(j, m)*KD(l, m - 1)
def test_deltaproduct_mul_x_add_kd_kd():
assert dp(x*(KD(i, k) + KD(j, k)), (k, 1, 3)) == 0
assert dp(x*(KD(i, k) + KD(j, k)), (k, 1, 1)) == x*(KD(i, 1) + KD(j, 1))
assert dp(x*(KD(i, k) + KD(j, k)), (k, 2, 2)) == x*(KD(i, 2) + KD(j, 2))
assert dp(x*(KD(i, k) + KD(j, k)), (k, 3, 3)) == x*(KD(i, 3) + KD(j, 3))
assert dp(x*(KD(i, k) + KD(j, k)), (k, 1, l)) == KD(l, 0) + \
x*KD(i, 1)*KD(l, 1) + x*KD(j, 1)*KD(l, 1) + \
x**2*KD(i, 1)*KD(j, 2)*KD(l, 2) + x**2*KD(j, 1)*KD(i, 2)*KD(l, 2)
assert dp(x*(KD(i, k) + KD(j, k)), (k, l, 3)) == KD(l, 4) + \
x*KD(i, 3)*KD(l, 3) + x*KD(j, 3)*KD(l, 3) + \
x**2*KD(i, 2)*KD(j, 3)*KD(l, 2) + x**2*KD(i, 3)*KD(j, 2)*KD(l, 2)
assert dp(x*(KD(i, k) + KD(j, k)), (k, l, m)) == KD(l, m + 1) + \
x*KD(i, m)*KD(l, m) + x*KD(j, m)*KD(l, m) + \
x**2*KD(i, m - 1)*KD(j, m)*KD(l, m - 1) + \
x**2*KD(i, m)*KD(j, m - 1)*KD(l, m - 1)
def test_deltaproduct_mul_add_x_y_add_kd_kd():
assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, 1, 3)) == 0
assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, 1, 1)) == \
(x + y)*(KD(i, 1) + KD(j, 1))
assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, 2, 2)) == \
(x + y)*(KD(i, 2) + KD(j, 2))
assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, 3, 3)) == \
(x + y)*(KD(i, 3) + KD(j, 3))
assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, 1, l)) == KD(l, 0) + \
(x + y)*KD(i, 1)*KD(l, 1) + (x + y)*KD(j, 1)*KD(l, 1) + \
(x + y)**2*KD(i, 1)*KD(j, 2)*KD(l, 2) + \
(x + y)**2*KD(j, 1)*KD(i, 2)*KD(l, 2)
assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, l, 3)) == KD(l, 4) + \
(x + y)*KD(i, 3)*KD(l, 3) + (x + y)*KD(j, 3)*KD(l, 3) + \
(x + y)**2*KD(i, 2)*KD(j, 3)*KD(l, 2) + \
(x + y)**2*KD(i, 3)*KD(j, 2)*KD(l, 2)
assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, l, m)) == KD(l, m + 1) + \
(x + y)*KD(i, m)*KD(l, m) + (x + y)*KD(j, m)*KD(l, m) + \
(x + y)**2*KD(i, m - 1)*KD(j, m)*KD(l, m - 1) + \
(x + y)**2*KD(i, m)*KD(j, m - 1)*KD(l, m - 1)
def test_deltaproduct_add_mul_x_y_mul_x_kd():
assert dp(x*y + x*KD(i, j), (j, 1, 3)) == (x*y)**3 + \
x*(x*y)**2*KD(i, 1) + (x*y)*x*(x*y)*KD(i, 2) + (x*y)**2*x*KD(i, 3)
assert dp(x*y + x*KD(i, j), (j, 1, 1)) == x*y + x*KD(i, 1)
assert dp(x*y + x*KD(i, j), (j, 2, 2)) == x*y + x*KD(i, 2)
assert dp(x*y + x*KD(i, j), (j, 3, 3)) == x*y + x*KD(i, 3)
assert dp(x*y + x*KD(i, j), (j, 1, k)) == \
(x*y)**k + Piecewise(
((x*y)**(i - 1)*x*(x*y)**(k - i), And(S(1) <= i, i <= k)),
(0, True)
)
assert dp(x*y + x*KD(i, j), (j, k, 3)) == \
(x*y)**(-k + 4) + Piecewise(
((x*y)**(i - k)*x*(x*y)**(3 - i), And(k <= i, i <= 3)),
(0, True)
)
assert dp(x*y + x*KD(i, j), (j, k, l)) == \
(x*y)**(-k + l + 1) + Piecewise(
((x*y)**(i - k)*x*(x*y)**(l - i), And(k <= i, i <= l)),
(0, True)
)
def test_deltaproduct_mul_x_add_y_kd():
assert dp(x*(y + KD(i, j)), (j, 1, 3)) == (x*y)**3 + \
x*(x*y)**2*KD(i, 1) + (x*y)*x*(x*y)*KD(i, 2) + (x*y)**2*x*KD(i, 3)
assert dp(x*(y + KD(i, j)), (j, 1, 1)) == x*(y + KD(i, 1))
assert dp(x*(y + KD(i, j)), (j, 2, 2)) == x*(y + KD(i, 2))
assert dp(x*(y + KD(i, j)), (j, 3, 3)) == x*(y + KD(i, 3))
assert dp(x*(y + KD(i, j)), (j, 1, k)) == \
(x*y)**k + Piecewise(
((x*y)**(i - 1)*x*(x*y)**(k - i), And(S(1) <= i, i <= k)),
(0, True)
)
assert dp(x*(y + KD(i, j)), (j, k, 3)) == \
(x*y)**(-k + 4) + Piecewise(
((x*y)**(i - k)*x*(x*y)**(3 - i), And(k <= i, i <= 3)),
(0, True)
)
assert dp(x*(y + KD(i, j)), (j, k, l)) == \
(x*y)**(-k + l + 1) + Piecewise(
((x*y)**(i - k)*x*(x*y)**(l - i), And(k <= i, i <= l)),
(0, True)
)
def test_deltaproduct_mul_x_add_y_twokd():
assert dp(x*(y + 2*KD(i, j)), (j, 1, 3)) == (x*y)**3 + \
2*x*(x*y)**2*KD(i, 1) + 2*x*y*x*x*y*KD(i, 2) + 2*(x*y)**2*x*KD(i, 3)
assert dp(x*(y + 2*KD(i, j)), (j, 1, 1)) == x*(y + 2*KD(i, 1))
assert dp(x*(y + 2*KD(i, j)), (j, 2, 2)) == x*(y + 2*KD(i, 2))
assert dp(x*(y + 2*KD(i, j)), (j, 3, 3)) == x*(y + 2*KD(i, 3))
assert dp(x*(y + 2*KD(i, j)), (j, 1, k)) == \
(x*y)**k + Piecewise(
(2*(x*y)**(i - 1)*x*(x*y)**(k - i), And(S(1) <= i, i <= k)),
(0, True)
)
assert dp(x*(y + 2*KD(i, j)), (j, k, 3)) == \
(x*y)**(-k + 4) + Piecewise(
(2*(x*y)**(i - k)*x*(x*y)**(3 - i), And(k <= i, i <= 3)),
(0, True)
)
assert dp(x*(y + 2*KD(i, j)), (j, k, l)) == \
(x*y)**(-k + l + 1) + Piecewise(
(2*(x*y)**(i - k)*x*(x*y)**(l - i), And(k <= i, i <= l)),
(0, True)
)
def test_deltaproduct_mul_add_x_y_add_y_kd():
assert dp((x + y)*(y + KD(i, j)), (j, 1, 3)) == ((x + y)*y)**3 + \
(x + y)*((x + y)*y)**2*KD(i, 1) + \
(x + y)*y*(x + y)**2*y*KD(i, 2) + \
((x + y)*y)**2*(x + y)*KD(i, 3)
assert dp((x + y)*(y + KD(i, j)), (j, 1, 1)) == (x + y)*(y + KD(i, 1))
assert dp((x + y)*(y + KD(i, j)), (j, 2, 2)) == (x + y)*(y + KD(i, 2))
assert dp((x + y)*(y + KD(i, j)), (j, 3, 3)) == (x + y)*(y + KD(i, 3))
assert dp((x + y)*(y + KD(i, j)), (j, 1, k)) == \
((x + y)*y)**k + Piecewise(
(((x + y)*y)**(i - 1)*(x + y)*((x + y)*y)**(k - i),
And(S(1) <= i, i <= k)),
(0, True)
)
assert dp((x + y)*(y + KD(i, j)), (j, k, 3)) == \
((x + y)*y)**(-k + 4) + Piecewise(
(((x + y)*y)**(i - k)*(x + y)*((x + y)*y)**(3 - i),
And(k <= i, i <= 3)),
(0, True)
)
assert dp((x + y)*(y + KD(i, j)), (j, k, l)) == \
((x + y)*y)**(-k + l + 1) + Piecewise(
(((x + y)*y)**(i - k)*(x + y)*((x + y)*y)**(l - i),
And(k <= i, i <= l)),
(0, True)
)
def test_deltaproduct_mul_add_x_kd_add_y_kd():
assert dp((x + KD(i, k))*(y + KD(i, j)), (j, 1, 3)) == \
KD(i, 1)*(KD(i, k) + x)*((KD(i, k) + x)*y)**2 + \
KD(i, 2)*(KD(i, k) + x)*y*(KD(i, k) + x)**2*y + \
KD(i, 3)*((KD(i, k) + x)*y)**2*(KD(i, k) + x) + \
((KD(i, k) + x)*y)**3
assert dp((x + KD(i, k))*(y + KD(i, j)), (j, 1, 1)) == \
(x + KD(i, k))*(y + KD(i, 1))
assert dp((x + KD(i, k))*(y + KD(i, j)), (j, 2, 2)) == \
(x + KD(i, k))*(y + KD(i, 2))
assert dp((x + KD(i, k))*(y + KD(i, j)), (j, 3, 3)) == \
(x + KD(i, k))*(y + KD(i, 3))
assert dp((x + KD(i, k))*(y + KD(i, j)), (j, 1, k)) == \
((x + KD(i, k))*y)**k + Piecewise(
(((x + KD(i, k))*y)**(i - 1)*(x + KD(i, k))*
((x + KD(i, k))*y)**(-i + k), And(S(1) <= i, i <= k)),
(0, True)
)
assert dp((x + KD(i, k))*(y + KD(i, j)), (j, k, 3)) == \
((x + KD(i, k))*y)**(4 - k) + Piecewise(
(((x + KD(i, k))*y)**(i - k)*(x + KD(i, k))*
((x + KD(i, k))*y)**(-i + 3), And(k <= i, i <= 3)),
(0, True)
)
assert dp((x + KD(i, k))*(y + KD(i, j)), (j, k, l)) == \
((x + KD(i, k))*y)**(-k + l + 1) + Piecewise(
(((x + KD(i, k))*y)**(i - k)*(x + KD(i, k))*
((x + KD(i, k))*y)**(-i + l), And(k <= i, i <= l)),
(0, True)
)
def test_deltasummation_trivial():
assert ds(x, (j, 1, 0)) == 0
assert ds(x, (j, 1, 3)) == 3*x
assert ds(x + y, (j, 1, 3)) == 3*(x + y)
assert ds(x*y, (j, 1, 3)) == 3*x*y
assert ds(KD(i, j), (k, 1, 3)) == 3*KD(i, j)
assert ds(x*KD(i, j), (k, 1, 3)) == 3*x*KD(i, j)
assert ds(x*y*KD(i, j), (k, 1, 3)) == 3*x*y*KD(i, j)
def test_deltasummation_basic_numerical():
n = symbols('n', integer=True, nonzero=True)
assert ds(KD(n, 0), (n, 1, 3)) == 0
# return unevaluated, until it gets implemented
assert ds(KD(i**2, j**2), (j, -oo, oo)) == \
Sum(KD(i**2, j**2), (j, -oo, oo))
assert Piecewise((KD(i, k), And(S(1) <= i, i <= 3)), (0, True)) == \
ds(KD(i, j)*KD(j, k), (j, 1, 3)) == \
ds(KD(j, k)*KD(i, j), (j, 1, 3))
assert ds(KD(i, k), (k, -oo, oo)) == 1
assert ds(KD(i, k), (k, 0, oo)) == Piecewise((1, S(0) <= i), (0, True))
assert ds(KD(i, k), (k, 1, 3)) == \
Piecewise((1, And(S(1) <= i, i <= 3)), (0, True))
assert ds(k*KD(i, j)*KD(j, k), (k, -oo, oo)) == j*KD(i, j)
assert ds(j*KD(i, j), (j, -oo, oo)) == i
assert ds(i*KD(i, j), (i, -oo, oo)) == j
assert ds(x, (i, 1, 3)) == 3*x
assert ds((i + j)*KD(i, j), (j, -oo, oo)) == 2*i
def test_deltasummation_basic_symbolic():
assert ds(KD(i, j), (j, 1, 3)) == \
Piecewise((1, And(S(1) <= i, i <= 3)), (0, True))
assert ds(KD(i, j), (j, 1, 1)) == Piecewise((1, Eq(i, 1)), (0, True))
assert ds(KD(i, j), (j, 2, 2)) == Piecewise((1, Eq(i, 2)), (0, True))
assert ds(KD(i, j), (j, 3, 3)) == Piecewise((1, Eq(i, 3)), (0, True))
assert ds(KD(i, j), (j, 1, k)) == \
Piecewise((1, And(S(1) <= i, i <= k)), (0, True))
assert ds(KD(i, j), (j, k, 3)) == \
Piecewise((1, And(k <= i, i <= 3)), (0, True))
assert ds(KD(i, j), (j, k, l)) == \
Piecewise((1, And(k <= i, i <= l)), (0, True))
def test_deltasummation_mul_x_kd():
assert ds(x*KD(i, j), (j, 1, 3)) == \
Piecewise((x, And(S(1) <= i, i <= 3)), (0, True))
assert ds(x*KD(i, j), (j, 1, 1)) == Piecewise((x, Eq(i, 1)), (0, True))
assert ds(x*KD(i, j), (j, 2, 2)) == Piecewise((x, Eq(i, 2)), (0, True))
assert ds(x*KD(i, j), (j, 3, 3)) == Piecewise((x, Eq(i, 3)), (0, True))
assert ds(x*KD(i, j), (j, 1, k)) == \
Piecewise((x, And(S(1) <= i, i <= k)), (0, True))
assert ds(x*KD(i, j), (j, k, 3)) == \
Piecewise((x, And(k <= i, i <= 3)), (0, True))
assert ds(x*KD(i, j), (j, k, l)) == \
Piecewise((x, And(k <= i, i <= l)), (0, True))
def test_deltasummation_mul_add_x_y_kd():
assert ds((x + y)*KD(i, j), (j, 1, 3)) == \
Piecewise((x + y, And(S(1) <= i, i <= 3)), (0, True))
assert ds((x + y)*KD(i, j), (j, 1, 1)) == \
Piecewise((x + y, Eq(i, 1)), (0, True))
assert ds((x + y)*KD(i, j), (j, 2, 2)) == \
Piecewise((x + y, Eq(i, 2)), (0, True))
assert ds((x + y)*KD(i, j), (j, 3, 3)) == \
Piecewise((x + y, Eq(i, 3)), (0, True))
assert ds((x + y)*KD(i, j), (j, 1, k)) == \
Piecewise((x + y, And(S(1) <= i, i <= k)), (0, True))
assert ds((x + y)*KD(i, j), (j, k, 3)) == \
Piecewise((x + y, And(k <= i, i <= 3)), (0, True))
assert ds((x + y)*KD(i, j), (j, k, l)) == \
Piecewise((x + y, And(k <= i, i <= l)), (0, True))
def test_deltasummation_add_kd_kd():
assert ds(KD(i, k) + KD(j, k), (k, 1, 3)) == piecewise_fold(
Piecewise((1, And(S(1) <= i, i <= 3)), (0, True)) +
Piecewise((1, And(S(1) <= j, j <= 3)), (0, True)))
assert ds(KD(i, k) + KD(j, k), (k, 1, 1)) == piecewise_fold(
Piecewise((1, Eq(i, 1)), (0, True)) +
Piecewise((1, Eq(j, 1)), (0, True)))
assert ds(KD(i, k) + KD(j, k), (k, 2, 2)) == piecewise_fold(
Piecewise((1, Eq(i, 2)), (0, True)) +
Piecewise((1, Eq(j, 2)), (0, True)))
assert ds(KD(i, k) + KD(j, k), (k, 3, 3)) == piecewise_fold(
Piecewise((1, Eq(i, 3)), (0, True)) +
Piecewise((1, Eq(j, 3)), (0, True)))
assert ds(KD(i, k) + KD(j, k), (k, 1, l)) == piecewise_fold(
Piecewise((1, And(S(1) <= i, i <= l)), (0, True)) +
Piecewise((1, And(S(1) <= j, j <= l)), (0, True)))
assert ds(KD(i, k) + KD(j, k), (k, l, 3)) == piecewise_fold(
Piecewise((1, And(l <= i, i <= 3)), (0, True)) +
Piecewise((1, And(l <= j, j <= 3)), (0, True)))
assert ds(KD(i, k) + KD(j, k), (k, l, m)) == piecewise_fold(
Piecewise((1, And(l <= i, i <= m)), (0, True)) +
Piecewise((1, And(l <= j, j <= m)), (0, True)))
def test_deltasummation_add_mul_x_kd_kd():
assert ds(x*KD(i, k) + KD(j, k), (k, 1, 3)) == piecewise_fold(
Piecewise((x, And(S(1) <= i, i <= 3)), (0, True)) +
Piecewise((1, And(S(1) <= j, j <= 3)), (0, True)))
assert ds(x*KD(i, k) + KD(j, k), (k, 1, 1)) == piecewise_fold(
Piecewise((x, Eq(i, 1)), (0, True)) +
Piecewise((1, Eq(j, 1)), (0, True)))
assert ds(x*KD(i, k) + KD(j, k), (k, 2, 2)) == piecewise_fold(
Piecewise((x, Eq(i, 2)), (0, True)) +
Piecewise((1, Eq(j, 2)), (0, True)))
assert ds(x*KD(i, k) + KD(j, k), (k, 3, 3)) == piecewise_fold(
Piecewise((x, Eq(i, 3)), (0, True)) +
Piecewise((1, Eq(j, 3)), (0, True)))
assert ds(x*KD(i, k) + KD(j, k), (k, 1, l)) == piecewise_fold(
Piecewise((x, And(S(1) <= i, i <= l)), (0, True)) +
Piecewise((1, And(S(1) <= j, j <= l)), (0, True)))
assert ds(x*KD(i, k) + KD(j, k), (k, l, 3)) == piecewise_fold(
Piecewise((x, And(l <= i, i <= 3)), (0, True)) +
Piecewise((1, And(l <= j, j <= 3)), (0, True)))
assert ds(x*KD(i, k) + KD(j, k), (k, l, m)) == piecewise_fold(
Piecewise((x, And(l <= i, i <= m)), (0, True)) +
Piecewise((1, And(l <= j, j <= m)), (0, True)))
def test_deltasummation_mul_x_add_kd_kd():
assert ds(x*(KD(i, k) + KD(j, k)), (k, 1, 3)) == piecewise_fold(
Piecewise((x, And(S(1) <= i, i <= 3)), (0, True)) +
Piecewise((x, And(S(1) <= j, j <= 3)), (0, True)))
assert ds(x*(KD(i, k) + KD(j, k)), (k, 1, 1)) == piecewise_fold(
Piecewise((x, Eq(i, 1)), (0, True)) +
Piecewise((x, Eq(j, 1)), (0, True)))
assert ds(x*(KD(i, k) + KD(j, k)), (k, 2, 2)) == piecewise_fold(
Piecewise((x, Eq(i, 2)), (0, True)) +
Piecewise((x, Eq(j, 2)), (0, True)))
assert ds(x*(KD(i, k) + KD(j, k)), (k, 3, 3)) == piecewise_fold(
Piecewise((x, Eq(i, 3)), (0, True)) +
Piecewise((x, Eq(j, 3)), (0, True)))
assert ds(x*(KD(i, k) + KD(j, k)), (k, 1, l)) == piecewise_fold(
Piecewise((x, And(S(1) <= i, i <= l)), (0, True)) +
Piecewise((x, And(S(1) <= j, j <= l)), (0, True)))
assert ds(x*(KD(i, k) + KD(j, k)), (k, l, 3)) == piecewise_fold(
Piecewise((x, And(l <= i, i <= 3)), (0, True)) +
Piecewise((x, And(l <= j, j <= 3)), (0, True)))
assert ds(x*(KD(i, k) + KD(j, k)), (k, l, m)) == piecewise_fold(
Piecewise((x, And(l <= i, i <= m)), (0, True)) +
Piecewise((x, And(l <= j, j <= m)), (0, True)))
def test_deltasummation_mul_add_x_y_add_kd_kd():
assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, 1, 3)) == piecewise_fold(
Piecewise((x + y, And(S(1) <= i, i <= 3)), (0, True)) +
Piecewise((x + y, And(S(1) <= j, j <= 3)), (0, True)))
assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, 1, 1)) == piecewise_fold(
Piecewise((x + y, Eq(i, 1)), (0, True)) +
Piecewise((x + y, Eq(j, 1)), (0, True)))
assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, 2, 2)) == piecewise_fold(
Piecewise((x + y, Eq(i, 2)), (0, True)) +
Piecewise((x + y, Eq(j, 2)), (0, True)))
assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, 3, 3)) == piecewise_fold(
Piecewise((x + y, Eq(i, 3)), (0, True)) +
Piecewise((x + y, Eq(j, 3)), (0, True)))
assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, 1, l)) == piecewise_fold(
Piecewise((x + y, And(S(1) <= i, i <= l)), (0, True)) +
Piecewise((x + y, And(S(1) <= j, j <= l)), (0, True)))
assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, l, 3)) == piecewise_fold(
Piecewise((x + y, And(l <= i, i <= 3)), (0, True)) +
Piecewise((x + y, And(l <= j, j <= 3)), (0, True)))
assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, l, m)) == piecewise_fold(
Piecewise((x + y, And(l <= i, i <= m)), (0, True)) +
Piecewise((x + y, And(l <= j, j <= m)), (0, True)))
def test_deltasummation_add_mul_x_y_mul_x_kd():
assert ds(x*y + x*KD(i, j), (j, 1, 3)) == \
Piecewise((3*x*y + x, And(S(1) <= i, i <= 3)), (3*x*y, True))
assert ds(x*y + x*KD(i, j), (j, 1, 1)) == \
Piecewise((x*y + x, Eq(i, 1)), (x*y, True))
assert ds(x*y + x*KD(i, j), (j, 2, 2)) == \
Piecewise((x*y + x, Eq(i, 2)), (x*y, True))
assert ds(x*y + x*KD(i, j), (j, 3, 3)) == \
Piecewise((x*y + x, Eq(i, 3)), (x*y, True))
assert ds(x*y + x*KD(i, j), (j, 1, k)) == \
Piecewise((k*x*y + x, And(S(1) <= i, i <= k)), (k*x*y, True))
assert ds(x*y + x*KD(i, j), (j, k, 3)) == \
Piecewise(((4 - k)*x*y + x, And(k <= i, i <= 3)), ((4 - k)*x*y, True))
assert ds(x*y + x*KD(i, j), (j, k, l)) == Piecewise(
((l - k + 1)*x*y + x, And(k <= i, i <= l)), ((l - k + 1)*x*y, True))
def test_deltasummation_mul_x_add_y_kd():
assert ds(x*(y + KD(i, j)), (j, 1, 3)) == \
Piecewise((3*x*y + x, And(S(1) <= i, i <= 3)), (3*x*y, True))
assert ds(x*(y + KD(i, j)), (j, 1, 1)) == \
Piecewise((x*y + x, Eq(i, 1)), (x*y, True))
assert ds(x*(y + KD(i, j)), (j, 2, 2)) == \
Piecewise((x*y + x, Eq(i, 2)), (x*y, True))
assert ds(x*(y + KD(i, j)), (j, 3, 3)) == \
Piecewise((x*y + x, Eq(i, 3)), (x*y, True))
assert ds(x*(y + KD(i, j)), (j, 1, k)) == \
Piecewise((k*x*y + x, And(S(1) <= i, i <= k)), (k*x*y, True))
assert ds(x*(y + KD(i, j)), (j, k, 3)) == \
Piecewise(((4 - k)*x*y + x, And(k <= i, i <= 3)), ((4 - k)*x*y, True))
assert ds(x*(y + KD(i, j)), (j, k, l)) == Piecewise(
((l - k + 1)*x*y + x, And(k <= i, i <= l)), ((l - k + 1)*x*y, True))
def test_deltasummation_mul_x_add_y_twokd():
assert ds(x*(y + 2*KD(i, j)), (j, 1, 3)) == \
Piecewise((3*x*y + 2*x, And(S(1) <= i, i <= 3)), (3*x*y, True))
assert ds(x*(y + 2*KD(i, j)), (j, 1, 1)) == \
Piecewise((x*y + 2*x, Eq(i, 1)), (x*y, True))
assert ds(x*(y + 2*KD(i, j)), (j, 2, 2)) == \
Piecewise((x*y + 2*x, Eq(i, 2)), (x*y, True))
assert ds(x*(y + 2*KD(i, j)), (j, 3, 3)) == \
Piecewise((x*y + 2*x, Eq(i, 3)), (x*y, True))
assert ds(x*(y + 2*KD(i, j)), (j, 1, k)) == \
Piecewise((k*x*y + 2*x, And(S(1) <= i, i <= k)), (k*x*y, True))
assert ds(x*(y + 2*KD(i, j)), (j, k, 3)) == Piecewise(
((4 - k)*x*y + 2*x, And(k <= i, i <= 3)), ((4 - k)*x*y, True))
assert ds(x*(y + 2*KD(i, j)), (j, k, l)) == Piecewise(
((l - k + 1)*x*y + 2*x, And(k <= i, i <= l)), ((l - k + 1)*x*y, True))
def test_deltasummation_mul_add_x_y_add_y_kd():
assert ds((x + y)*(y + KD(i, j)), (j, 1, 3)) == Piecewise(
(3*(x + y)*y + x + y, And(S(1) <= i, i <= 3)), (3*(x + y)*y, True))
assert ds((x + y)*(y + KD(i, j)), (j, 1, 1)) == \
Piecewise(((x + y)*y + x + y, Eq(i, 1)), ((x + y)*y, True))
assert ds((x + y)*(y + KD(i, j)), (j, 2, 2)) == \
Piecewise(((x + y)*y + x + y, Eq(i, 2)), ((x + y)*y, True))
assert ds((x + y)*(y + KD(i, j)), (j, 3, 3)) == \
Piecewise(((x + y)*y + x + y, Eq(i, 3)), ((x + y)*y, True))
assert ds((x + y)*(y + KD(i, j)), (j, 1, k)) == Piecewise(
(k*(x + y)*y + x + y, And(S(1) <= i, i <= k)), (k*(x + y)*y, True))
assert ds((x + y)*(y + KD(i, j)), (j, k, 3)) == Piecewise(
((4 - k)*(x + y)*y + x + y, And(k <= i, i <= 3)),
((4 - k)*(x + y)*y, True))
assert ds((x + y)*(y + KD(i, j)), (j, k, l)) == Piecewise(
((l - k + 1)*(x + y)*y + x + y, And(k <= i, i <= l)),
((l - k + 1)*(x + y)*y, True))
def test_deltasummation_mul_add_x_kd_add_y_kd():
assert ds((x + KD(i, k))*(y + KD(i, j)), (j, 1, 3)) == piecewise_fold(
Piecewise((KD(i, k) + x, And(S(1) <= i, i <= 3)), (0, True)) +
3*(KD(i, k) + x)*y)
assert ds((x + KD(i, k))*(y + KD(i, j)), (j, 1, 1)) == piecewise_fold(
Piecewise((KD(i, k) + x, Eq(i, 1)), (0, True)) +
(KD(i, k) + x)*y)
assert ds((x + KD(i, k))*(y + KD(i, j)), (j, 2, 2)) == piecewise_fold(
Piecewise((KD(i, k) + x, Eq(i, 2)), (0, True)) +
(KD(i, k) + x)*y)
assert ds((x + KD(i, k))*(y + KD(i, j)), (j, 3, 3)) == piecewise_fold(
Piecewise((KD(i, k) + x, Eq(i, 3)), (0, True)) +
(KD(i, k) + x)*y)
assert ds((x + KD(i, k))*(y + KD(i, j)), (j, 1, k)) == piecewise_fold(
Piecewise((KD(i, k) + x, And(S(1) <= i, i <= k)), (0, True)) +
k*(KD(i, k) + x)*y)
assert ds((x + KD(i, k))*(y + KD(i, j)), (j, k, 3)) == piecewise_fold(
Piecewise((KD(i, k) + x, And(k <= i, i <= 3)), (0, True)) +
(4 - k)*(KD(i, k) + x)*y)
assert ds((x + KD(i, k))*(y + KD(i, j)), (j, k, l)) == piecewise_fold(
Piecewise((KD(i, k) + x, And(k <= i, i <= l)), (0, True)) +
(l - k + 1)*(KD(i, k) + x)*y)
| bsd-3-clause |
craftytrickster/servo | tests/heartbeats/characterize_android.py | 139 | 4036 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import os
from os import path
import time
import datetime
import argparse
import subprocess
TOP_DIR = path.join("..", "..")
GUARD_TIME = 20
SUMMARY_OUTPUT = "summary.txt"
def get_command(layout_thread_count, renderer, page, profile):
"""Get the command to execute.
"""
return path.join(TOP_DIR, "mach") + " run --android" + \
" -p %d -o /sdcard/servo/output.png -y %d %s -Z profile-script-events,profile-heartbeats '%s'" % \
(profile, layout_thread_count, renderer, page)
def git_rev_hash():
"""Get the git revision hash.
"""
return subprocess.check_output(['git', 'rev-parse', 'HEAD']).rstrip()
def git_rev_hash_short():
"""Get the git revision short hash.
"""
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).rstrip()
def execute(base_dir, renderer, page, profile, trial, layout_thread_count):
"""Run a single execution.
"""
log_dir = path.join(base_dir, "logs_l" + str(layout_thread_count),
"trial_" + str(trial))
if os.path.exists(log_dir):
print "Log directory already exists: " + log_dir
sys.exit(1)
os.makedirs(log_dir)
# Execute
cmd = get_command(layout_thread_count, renderer, page, profile)
print cmd
os.system(cmd)
print 'sleep ' + str(GUARD_TIME)
time.sleep(GUARD_TIME)
# Write a file that describes this execution
with open(path.join(log_dir, SUMMARY_OUTPUT), "w") as f:
f.write("Datetime (UTC): " + datetime.datetime.utcnow().isoformat())
f.write("\nPlatform: Android")
f.write("\nGit hash: " + git_rev_hash())
f.write("\nGit short hash: " + git_rev_hash_short())
f.write("\nLayout threads: " + str(layout_thread_count))
f.write("\nTrial: " + str(trial))
f.write("\nCommand: " + cmd)
def main():
"""For this script to be useful, the following conditions are needed:
- Build servo for Android in release mode with the "energy-profiling" feature enabled.
"""
# Default number of layout threads
layout_threads = 1
# Default benchmark
benchmark = "https://www.mozilla.org/"
# Default renderer
renderer = ""
# Default output directory
output_dir = "heartbeat_logs"
# Default profile interval
profile = 60
# Parsing the input of the script
parser = argparse.ArgumentParser(description="Characterize Servo timing and energy behavior on Android")
parser.add_argument("-b", "--benchmark",
default=benchmark,
help="Gets the benchmark, for example \"-b http://www.example.com\"")
parser.add_argument("-w", "--webrender",
action='store_true',
help="Use webrender backend")
parser.add_argument("-l", "--layout_threads",
help="Specify the number of threads for layout, for example \"-l 5\"")
parser.add_argument("-o", "--output",
help="Specify the log output directory, for example \"-o heartbeat_logs\"")
parser.add_argument("-p", "--profile",
default=60,
help="Profiler output interval, for example \"-p 60\"")
args = parser.parse_args()
if args.benchmark:
benchmark = args.benchmark
if args.webrender:
renderer = "-w"
if args.layout_threads:
layout_threads = int(args.layout_threads)
if args.output:
output_dir = args.output
if args.profile:
profile = args.profile
if os.path.exists(output_dir):
print "Output directory already exists: " + output_dir
sys.exit(1)
os.makedirs(output_dir)
execute(output_dir, renderer, benchmark, profile, 1, layout_threads)
if __name__ == "__main__":
main()
| mpl-2.0 |
JCA-Developpement/Odoo | addons/procurement/procurement.py | 44 | 15869 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from psycopg2 import OperationalError
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
import openerp
PROCUREMENT_PRIORITIES = [('0', 'Not urgent'), ('1', 'Normal'), ('2', 'Urgent'), ('3', 'Very Urgent')]
class procurement_group(osv.osv):
'''
The procurement group class is used to group products together
when computing procurements. (tasks, physical products, ...)
The goal is that when you have one sale order of several products
and the products are pulled from the same or several location(s), to keep
having the moves grouped into pickings that represent the sale order.
Used in: sales order (to group delivery order lines like the so), pull/push
rules (to pack like the delivery order), on orderpoints (e.g. for wave picking
all the similar products together).
Grouping is made only if the source and the destination is the same.
Suppose you have 4 lines on a picking from Output where 2 lines will need
to come from Input (crossdock) and 2 lines coming from Stock -> Output As
the four procurement orders will have the same group ids from the SO, the
move from input will have a stock.picking with 2 grouped lines and the move
from stock will have 2 grouped lines also.
The name is usually the name of the original document (sale order) or a
sequence computed if created manually.
'''
_name = 'procurement.group'
_description = 'Procurement Requisition'
_order = "id desc"
_columns = {
'name': fields.char('Reference', required=True),
'move_type': fields.selection([
('direct', 'Partial'), ('one', 'All at once')],
'Delivery Method', required=True),
'procurement_ids': fields.one2many('procurement.order', 'group_id', 'Procurements'),
}
_defaults = {
'name': lambda self, cr, uid, c: self.pool.get('ir.sequence').get(cr, uid, 'procurement.group') or '',
'move_type': lambda self, cr, uid, c: 'direct'
}
class procurement_rule(osv.osv):
'''
A rule describe what a procurement should do; produce, buy, move, ...
'''
_name = 'procurement.rule'
_description = "Procurement Rule"
_order = "name"
def _get_action(self, cr, uid, context=None):
return []
_columns = {
'name': fields.char('Name', required=True,
help="This field will fill the packing origin and the name of its moves"),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the rule without removing it."),
'group_propagation_option': fields.selection([('none', 'Leave Empty'), ('propagate', 'Propagate'), ('fixed', 'Fixed')], string="Propagation of Procurement Group"),
'group_id': fields.many2one('procurement.group', 'Fixed Procurement Group'),
'action': fields.selection(selection=lambda s, cr, uid, context=None: s._get_action(cr, uid, context=context),
string='Action', required=True),
'sequence': fields.integer('Sequence'),
'company_id': fields.many2one('res.company', 'Company'),
}
_defaults = {
'group_propagation_option': 'propagate',
'sequence': 20,
'active': True,
}
class procurement_order(osv.osv):
"""
Procurement Orders
"""
_name = "procurement.order"
_description = "Procurement"
_order = 'priority desc, date_planned, id asc'
_inherit = ['mail.thread']
_log_create = False
_columns = {
'name': fields.text('Description', required=True),
'origin': fields.char('Source Document',
help="Reference of the document that created this Procurement.\n"
"This is automatically completed by Odoo."),
'company_id': fields.many2one('res.company', 'Company', required=True),
# These two fields are used for shceduling
'priority': fields.selection(PROCUREMENT_PRIORITIES, 'Priority', required=True, select=True, track_visibility='onchange'),
'date_planned': fields.datetime('Scheduled Date', required=True, select=True, track_visibility='onchange'),
'group_id': fields.many2one('procurement.group', 'Procurement Group'),
'rule_id': fields.many2one('procurement.rule', 'Rule', track_visibility='onchange', help="Chosen rule for the procurement resolution. Usually chosen by the system but can be manually set by the procurement manager to force an unusual behavior."),
'product_id': fields.many2one('product.product', 'Product', required=True, states={'confirmed': [('readonly', False)]}, readonly=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True, states={'confirmed': [('readonly', False)]}, readonly=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, states={'confirmed': [('readonly', False)]}, readonly=True),
'product_uos_qty': fields.float('UoS Quantity', states={'confirmed': [('readonly', False)]}, readonly=True),
'product_uos': fields.many2one('product.uom', 'Product UoS', states={'confirmed': [('readonly', False)]}, readonly=True),
'state': fields.selection([
('cancel', 'Cancelled'),
('confirmed', 'Confirmed'),
('exception', 'Exception'),
('running', 'Running'),
('done', 'Done')
], 'Status', required=True, track_visibility='onchange', copy=False),
}
_defaults = {
'state': 'confirmed',
'priority': '1',
'date_planned': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'procurement.order', context=c)
}
def unlink(self, cr, uid, ids, context=None):
procurements = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in procurements:
if s['state'] == 'cancel':
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'),
_('Cannot delete Procurement Order(s) which are in %s state.') % s['state'])
return osv.osv.unlink(self, cr, uid, unlink_ids, context=context)
def do_view_procurements(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing procurement orders
of same procurement group of given ids.
'''
act_obj = self.pool.get('ir.actions.act_window')
action_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'procurement.do_view_procurements', raise_if_not_found=True)
result = act_obj.read(cr, uid, [action_id], context=context)[0]
group_ids = set([proc.group_id.id for proc in self.browse(cr, uid, ids, context=context) if proc.group_id])
result['domain'] = "[('group_id','in',[" + ','.join(map(str, list(group_ids))) + "])]"
return result
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
""" Finds UoM and UoS of changed product.
@param product_id: Changed id of product.
@return: Dictionary of values.
"""
if product_id:
w = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
v = {
'product_uom': w.uom_id.id,
'product_uos': w.uos_id and w.uos_id.id or w.uom_id.id
}
return {'value': v}
return {}
def get_cancel_ids(self, cr, uid, ids, context=None):
return [proc.id for proc in self.browse(cr, uid, ids, context=context) if proc.state != 'done']
def cancel(self, cr, uid, ids, context=None):
#cancel only the procurements that aren't done already
to_cancel_ids = self.get_cancel_ids(cr, uid, ids, context=context)
if to_cancel_ids:
return self.write(cr, uid, to_cancel_ids, {'state': 'cancel'}, context=context)
def reset_to_confirmed(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
def run(self, cr, uid, ids, autocommit=False, context=None):
for procurement_id in ids:
#we intentionnaly do the browse under the for loop to avoid caching all ids which would be resource greedy
#and useless as we'll make a refresh later that will invalidate all the cache (and thus the next iteration
#will fetch all the ids again)
procurement = self.browse(cr, uid, procurement_id, context=context)
if procurement.state not in ("running", "done"):
try:
if self._assign(cr, uid, procurement, context=context):
procurement.refresh()
res = self._run(cr, uid, procurement, context=context or {})
if res:
self.write(cr, uid, [procurement.id], {'state': 'running'}, context=context)
else:
self.write(cr, uid, [procurement.id], {'state': 'exception'}, context=context)
else:
self.message_post(cr, uid, [procurement.id], body=_('No rule matching this procurement'), context=context)
self.write(cr, uid, [procurement.id], {'state': 'exception'}, context=context)
if autocommit:
cr.commit()
except OperationalError:
if autocommit:
cr.rollback()
continue
else:
raise
return True
def check(self, cr, uid, ids, autocommit=False, context=None):
done_ids = []
for procurement in self.browse(cr, uid, ids, context=context):
try:
result = self._check(cr, uid, procurement, context=context)
if result:
done_ids.append(procurement.id)
if autocommit:
cr.commit()
except OperationalError:
if autocommit:
cr.rollback()
continue
else:
raise
if done_ids:
self.write(cr, uid, done_ids, {'state': 'done'}, context=context)
return done_ids
#
# Method to overwrite in different procurement modules
#
def _find_suitable_rule(self, cr, uid, procurement, context=None):
'''This method returns a procurement.rule that depicts what to do with the given procurement
in order to complete its needs. It returns False if no suiting rule is found.
:param procurement: browse record
:rtype: int or False
'''
return False
def _assign(self, cr, uid, procurement, context=None):
'''This method check what to do with the given procurement in order to complete its needs.
It returns False if no solution is found, otherwise it stores the matching rule (if any) and
returns True.
:param procurement: browse record
:rtype: boolean
'''
#if the procurement already has a rule assigned, we keep it (it has a higher priority as it may have been chosen manually)
if procurement.rule_id:
return True
elif procurement.product_id.type != 'service':
rule_id = self._find_suitable_rule(cr, uid, procurement, context=context)
if rule_id:
self.write(cr, uid, [procurement.id], {'rule_id': rule_id}, context=context)
return True
return False
def _run(self, cr, uid, procurement, context=None):
'''This method implements the resolution of the given procurement
:param procurement: browse record
:returns: True if the resolution of the procurement was a success, False otherwise to set it in exception
'''
return True
def _check(self, cr, uid, procurement, context=None):
'''Returns True if the given procurement is fulfilled, False otherwise
:param procurement: browse record
:rtype: boolean
'''
return False
#
# Scheduler
#
def run_scheduler(self, cr, uid, use_new_cursor=False, company_id = False, context=None):
'''
Call the scheduler to check the procurement order. This is intented to be done for all existing companies at
the same time, so we're running all the methods as SUPERUSER to avoid intercompany and access rights issues.
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param ids: List of selected IDs
@param use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement.
This is appropriate for batch jobs only.
@param context: A standard dictionary for contextual values
@return: Dictionary of values
'''
if context is None:
context = {}
try:
if use_new_cursor:
cr = openerp.registry(cr.dbname).cursor()
# Run confirmed procurements
dom = [('state', '=', 'confirmed')]
if company_id:
dom += [('company_id', '=', company_id)]
prev_ids = []
while True:
ids = self.search(cr, SUPERUSER_ID, dom, context=context)
if not ids or prev_ids == ids:
break
else:
prev_ids = ids
self.run(cr, SUPERUSER_ID, ids, autocommit=use_new_cursor, context=context)
if use_new_cursor:
cr.commit()
# Check if running procurements are done
offset = 0
dom = [('state', '=', 'running')]
if company_id:
dom += [('company_id', '=', company_id)]
prev_ids = []
while True:
ids = self.search(cr, SUPERUSER_ID, dom, offset=offset, context=context)
if not ids or prev_ids == ids:
break
else:
prev_ids = ids
self.check(cr, SUPERUSER_ID, ids, autocommit=use_new_cursor, context=context)
if use_new_cursor:
cr.commit()
finally:
if use_new_cursor:
try:
cr.close()
except Exception:
pass
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
takaaptech/sky_engine | mojo/public/tools/bindings/pylib/mojom/generate/pack_tests.py | 69 | 6000 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import module as mojom
import pack
import test_support
EXPECT_EQ = test_support.EXPECT_EQ
EXPECT_TRUE = test_support.EXPECT_TRUE
RunTest = test_support.RunTest
def TestOrdinalOrder():
errors = 0
struct = mojom.Struct('test')
struct.AddField('testfield1', mojom.INT32, 2)
struct.AddField('testfield2', mojom.INT32, 1)
ps = pack.PackedStruct(struct)
errors += EXPECT_EQ(2, len(ps.packed_fields))
errors += EXPECT_EQ('testfield2', ps.packed_fields[0].field.name)
errors += EXPECT_EQ('testfield1', ps.packed_fields[1].field.name)
return errors
def TestZeroFields():
errors = 0
struct = mojom.Struct('test')
ps = pack.PackedStruct(struct)
errors += EXPECT_EQ(0, len(ps.packed_fields))
return errors
def TestOneField():
errors = 0
struct = mojom.Struct('test')
struct.AddField('testfield1', mojom.INT8)
ps = pack.PackedStruct(struct)
errors += EXPECT_EQ(1, len(ps.packed_fields))
return errors
# Pass three tuples.
# |kinds| is a sequence of mojom.Kinds that specify the fields that are to
# be created.
# |fields| is the expected order of the resulting fields, with the integer
# "1" first.
# |offsets| is the expected order of offsets, with the integer "0" first.
def TestSequence(kinds, fields, offsets):
errors = 0
struct = mojom.Struct('test')
index = 1
for kind in kinds:
struct.AddField("%d" % index, kind)
index += 1
ps = pack.PackedStruct(struct)
num_fields = len(ps.packed_fields)
errors += EXPECT_EQ(len(kinds), num_fields)
for i in xrange(num_fields):
EXPECT_EQ("%d" % fields[i], ps.packed_fields[i].field.name)
EXPECT_EQ(offsets[i], ps.packed_fields[i].offset)
return errors
def TestPaddingPackedInOrder():
return TestSequence(
(mojom.INT8, mojom.UINT8, mojom.INT32),
(1, 2, 3),
(0, 1, 4))
def TestPaddingPackedOutOfOrder():
return TestSequence(
(mojom.INT8, mojom.INT32, mojom.UINT8),
(1, 3, 2),
(0, 1, 4))
def TestPaddingPackedOverflow():
kinds = (mojom.INT8, mojom.INT32, mojom.INT16, mojom.INT8, mojom.INT8)
# 2 bytes should be packed together first, followed by short, then by int.
fields = (1, 4, 3, 2, 5)
offsets = (0, 1, 2, 4, 8)
return TestSequence(kinds, fields, offsets)
def TestNullableTypes():
kinds = (mojom.STRING.MakeNullableKind(),
mojom.HANDLE.MakeNullableKind(),
mojom.Struct('test_struct').MakeNullableKind(),
mojom.DCPIPE.MakeNullableKind(),
mojom.Array().MakeNullableKind(),
mojom.DPPIPE.MakeNullableKind(),
mojom.Array(length=5).MakeNullableKind(),
mojom.MSGPIPE.MakeNullableKind(),
mojom.Interface('test_inteface').MakeNullableKind(),
mojom.SHAREDBUFFER.MakeNullableKind(),
mojom.InterfaceRequest().MakeNullableKind())
fields = (1, 2, 4, 3, 5, 6, 8, 7, 9, 10, 11)
offsets = (0, 8, 12, 16, 24, 32, 36, 40, 48, 52, 56)
return TestSequence(kinds, fields, offsets)
def TestAllTypes():
return TestSequence(
(mojom.BOOL, mojom.INT8, mojom.STRING, mojom.UINT8,
mojom.INT16, mojom.DOUBLE, mojom.UINT16,
mojom.INT32, mojom.UINT32, mojom.INT64,
mojom.FLOAT, mojom.STRING, mojom.HANDLE,
mojom.UINT64, mojom.Struct('test'), mojom.Array(),
mojom.STRING.MakeNullableKind()),
(1, 2, 4, 5, 7, 3, 6, 8, 9, 10, 11, 13, 12, 14, 15, 16, 17, 18),
(0, 1, 2, 4, 6, 8, 16, 24, 28, 32, 40, 44, 48, 56, 64, 72, 80, 88))
def TestPaddingPackedOutOfOrderByOrdinal():
errors = 0
struct = mojom.Struct('test')
struct.AddField('testfield1', mojom.INT8)
struct.AddField('testfield3', mojom.UINT8, 3)
struct.AddField('testfield2', mojom.INT32, 2)
ps = pack.PackedStruct(struct)
errors += EXPECT_EQ(3, len(ps.packed_fields))
# Second byte should be packed in behind first, altering order.
errors += EXPECT_EQ('testfield1', ps.packed_fields[0].field.name)
errors += EXPECT_EQ('testfield3', ps.packed_fields[1].field.name)
errors += EXPECT_EQ('testfield2', ps.packed_fields[2].field.name)
# Second byte should be packed with first.
errors += EXPECT_EQ(0, ps.packed_fields[0].offset)
errors += EXPECT_EQ(1, ps.packed_fields[1].offset)
errors += EXPECT_EQ(4, ps.packed_fields[2].offset)
return errors
def TestBools():
errors = 0
struct = mojom.Struct('test')
struct.AddField('bit0', mojom.BOOL)
struct.AddField('bit1', mojom.BOOL)
struct.AddField('int', mojom.INT32)
struct.AddField('bit2', mojom.BOOL)
struct.AddField('bit3', mojom.BOOL)
struct.AddField('bit4', mojom.BOOL)
struct.AddField('bit5', mojom.BOOL)
struct.AddField('bit6', mojom.BOOL)
struct.AddField('bit7', mojom.BOOL)
struct.AddField('bit8', mojom.BOOL)
ps = pack.PackedStruct(struct)
errors += EXPECT_EQ(10, len(ps.packed_fields))
# First 8 bits packed together.
for i in xrange(8):
pf = ps.packed_fields[i]
errors += EXPECT_EQ(0, pf.offset)
errors += EXPECT_EQ("bit%d" % i, pf.field.name)
errors += EXPECT_EQ(i, pf.bit)
# Ninth bit goes into second byte.
errors += EXPECT_EQ("bit8", ps.packed_fields[8].field.name)
errors += EXPECT_EQ(1, ps.packed_fields[8].offset)
errors += EXPECT_EQ(0, ps.packed_fields[8].bit)
# int comes last.
errors += EXPECT_EQ("int", ps.packed_fields[9].field.name)
errors += EXPECT_EQ(4, ps.packed_fields[9].offset)
return errors
def Main(args):
errors = 0
errors += RunTest(TestZeroFields)
errors += RunTest(TestOneField)
errors += RunTest(TestPaddingPackedInOrder)
errors += RunTest(TestPaddingPackedOutOfOrder)
errors += RunTest(TestPaddingPackedOverflow)
errors += RunTest(TestNullableTypes)
errors += RunTest(TestAllTypes)
errors += RunTest(TestPaddingPackedOutOfOrderByOrdinal)
errors += RunTest(TestBools)
return errors
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
scootergrisen/virtaal | virtaal/controllers/unitcontroller.py | 4 | 10360 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008-2011 Zuza Software Foundation
#
# This file is part of Virtaal.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
from gobject import SIGNAL_RUN_FIRST, timeout_add
from translate.storage import workflow
from virtaal.common import GObjectWrapper
from basecontroller import BaseController
class UnitController(BaseController):
"""Controller for unit-based operations."""
__gtype_name__ = "UnitController"
__gsignals__ = {
'unit-done': (SIGNAL_RUN_FIRST, None, (object, int)),
'unit-modified': (SIGNAL_RUN_FIRST, None, (object,)),
'unit-delete-text': (SIGNAL_RUN_FIRST, None, (object, object, object, int, int, object, int)),
'unit-insert-text': (SIGNAL_RUN_FIRST, None, (object, object, int, object, int)),
'unit-paste-start': (SIGNAL_RUN_FIRST, None, (object, object, object, int)),
}
STATE_TIMEOUT = 200
# INITIALIZERS #
def __init__(self, store_controller):
GObjectWrapper.__init__(self)
self.current_unit = None
self.main_controller = store_controller.main_controller
self.main_controller.unit_controller = self
self.store_controller = store_controller
self.store_controller.unit_controller = self
self.checks_controller = None
from virtaal.views.unitview import UnitView
self.view = UnitView(self)
self.view.connect('delete-text', self._unit_delete_text)
self.view.connect('insert-text', self._unit_insert_text)
self.view.connect('paste-start', self._unit_paste_start)
self.view.connect('modified', self._unit_modified)
self.view.connect('unit-done', self._unit_done)
self.view.enable_signals()
self.store_controller.connect('store-loaded', self._on_store_loaded)
self.main_controller.connect('controller-registered', self._on_controller_registered)
self._recreate_workflow = False
self._unit_state_names = {}
self._state_timer_active = False
# ACCESSORS #
def get_unit_target(self, target_index):
return self.view.get_target_n(target_index)
def set_unit_target(self, target_index, value, cursor_pos=-1):
self.view.set_target_n(target_index, value, cursor_pos)
# METHODS #
def get_unit_state_names(self, unit=None):
self._unit_state_names = {
#FIXME: choose friendly names
workflow.StateEnum.EMPTY: _('Untranslated'),
workflow.StateEnum.NEEDS_WORK: _('Needs work'),
workflow.StateEnum.REJECTED: _('Rejected'),
workflow.StateEnum.NEEDS_REVIEW: _('Needs review'),
workflow.StateEnum.UNREVIEWED: _('Translated'),
workflow.StateEnum.FINAL: _('Reviewed'),
}
return self._unit_state_names
def set_current_state(self, newstate, from_user=False):
if isinstance(newstate, workflow.UnitState):
newstate = newstate.state_value
self.current_unit._current_state = newstate
if from_user:
# No need to update the GUI, and we should make the choice sticky
self.current_unit._state_sticky = True
else:
self.view.update_state(self._unit_state_names[newstate])
def load_unit(self, unit):
if self.current_unit and self.current_unit is unit:
return self.view
self.current_unit = unit
self.nplurals = self.main_controller.lang_controller.target_lang.nplurals
unit._modified = False
if not unit.STATE:
# If the unit doesn't support states, just skip the state code
self.view.load_unit(unit)
return self.view
# This unit does support states
state_n, state_id = unit.get_state_n(), unit.get_state_id()
state_names = self.get_unit_state_names()
unit._state_sticky = False
unit._current_state = state_n
if self._recreate_workflow or True:
# This will only happen when a document is loaded.
self._unit_state_names = {}
# FIXME: The call below is run for the second time, but is necessary
# because the names could have changed in the new document :/
state_names = self.get_unit_state_names()
if state_names:
unit._workflow = workflow.create_unit_workflow(unit, state_names)
self._recreate_workflow = False
if state_names:
unit._workflow.reset(unit, init_state=state_names[state_id])
#XXX: we should make 100% sure that .reset() doesn't actually call
# a set method in the unit, since it might cause a diff or loss of
# meta-data.
self.view.load_unit(unit)
return self.view
def _unit_delete_text(self, unitview, deleted, parent, offset, cursor_pos, elem, target_num):
self.emit('unit-delete-text', self.current_unit, deleted, parent, offset, cursor_pos, elem, target_num)
def _unit_insert_text(self, unitview, ins_text, offset, elem, target_num):
self.emit('unit-insert-text', self.current_unit, ins_text, offset, elem, target_num)
def _unit_paste_start(self, unitview, old_text, offsets, target_num):
self.emit('unit-paste-start', self.current_unit, old_text, offsets, target_num)
def _unit_modified(self, *args):
self.emit('unit-modified', self.current_unit)
self.current_unit._modified = True
if self.current_unit.STATE and not self.current_unit._state_sticky:
self._start_state_timer()
def _unit_done(self, widget, unit):
if unit._modified and unit.STATE:
if len(unit.target) != 0 and unit._current_state == workflow.StateEnum.EMPTY and not unit._state_sticky:
# Oops! The user entered a translation, but the timer didn't
# expire yet, so let's mark it fuzzy to be safe. We don't know
# exactly what kind of fuzzy the format supports, so let's use
# .set_state_n() directly. Also, if the workflow does more, we
# probably don't want it, since we really only want to set the
# state.
unit.set_state_n(workflow.StateEnum.NEEDS_REVIEW)
else:
# Now really advance the workflow that we ended at
unit._workflow.set_current_state(self._unit_state_names[unit._current_state])
self.emit('unit-done', unit, unit._modified)
# let's just clean up a bit:
del unit._modified
if unit.STATE:
del unit._state_sticky
del unit._current_state
def _state_timer_expired(self, unit):
self._state_timer_active = False
if unit is not self.current_unit:
return
if unit.hasplural():
target_len = min([len(s) for s in unit.target.strings])
else:
target_len = len(unit.target)
empty_state = unit._current_state == workflow.StateEnum.EMPTY
if target_len and empty_state:
self.set_current_state(workflow.StateEnum.UNREVIEWED)
elif not target_len and not empty_state:
self.set_current_state(workflow.StateEnum.EMPTY)
def _start_state_timer(self):
if self._state_timer_active:
return
self._state_timer_active = True
timeout_add(self.STATE_TIMEOUT, self._state_timer_expired, self.current_unit)
def prepare_for_save(self):
"""Finalise outstanding changes to the toolkit store for saving."""
unit = self.current_unit
if unit._modified and unit.STATE:
unit._workflow.set_current_state(self._unit_state_names[unit._current_state])
# EVENT HANDLERS #
def _on_controller_registered(self, main_controller, controller):
if controller is main_controller.lang_controller:
self.main_controller.lang_controller.connect('source-lang-changed', self._on_language_changed)
self.main_controller.lang_controller.connect('target-lang-changed', self._on_language_changed)
elif controller is main_controller.checks_controller:
self.checks_controller = controller
elif controller is main_controller.placeables_controller:
controller.connect('parsers-changed', self._on_parsers_changed)
self._on_parsers_changed(controller)
def _on_language_changed(self, lang_controller, langcode):
self.nplurals = lang_controller.target_lang.nplurals
if hasattr(self, 'view'):
self.view.update_languages()
def _on_parsers_changed(self, placeables_controller):
if self.current_unit:
self.current_unit.rich_source = placeables_controller.apply_parsers(self.current_unit.rich_source)
def _on_store_loaded(self, store_controller):
"""Call C{_on_language_changed()} and set flag to recreate workflow.
If the target language loaded at start-up (from config) is the same
as that of the first opened file, C{self.view.update_languages()} is
not called, because the L{LangController}'s C{"target-lang-changed"}
signal is never emitted, because the language has not really
changed.
This event handler ensures that it is loaded. As a side-effect,
C{self.view.update_languages()} is called twice if language before
and after a store load is different. But we'll just have to live
with that."""
self._on_language_changed(
self.main_controller.lang_controller,
self.main_controller.lang_controller.target_lang.code
)
self._recreate_workflow = True
| gpl-2.0 |
malvikasharan/APRICOT | apricotlib/apricot_visualization.py | 1 | 22211 | #!/usr/bin/env python
# Description = Visualizes different output data from APRICOT analysis
from collections import defaultdict
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import sys
try:
import subprocess
except ImportError:
print('Python package subprocess is missing. Please install/update.\n')
sys.exit(0)
try:
import shutil
except ImportError:
print('Python package shutil is missing. Please install/update.\n')
sys.exit(0)
class VizApricotAnalysis(object):
def __init__(self, annotation_scoring_data,
domain_file,
additional_annotation,
outpath):
self._annotation_scoring_data = annotation_scoring_data
self._domain_file = domain_file
self._additional_annotation = additional_annotation
self._outpath = outpath
self._sec_str = self._outpath+'/secondary_structure'
self._dom_highlight = self._outpath+'/domain_highlighting'
self._pdb_msa = self._outpath+'/homologous_pdb_msa'
self._overview = self._outpath+'/overview_and_statistics'
self._localize = self._outpath+'/subcellular_localization'
self._annotation_data = []
self._filter_viz_dict = {}
self._highlight_dict = {}
self._uid_key_dict = {}
self._dom_rank = {}
self._fasta_dict = {}
self._secstr_dict = {}
self._dom_annotation = {}
self._location_dict = defaultdict(lambda: defaultdict(lambda: []))
self._sec_str_color = {'H': '#FF6666', 'E': '#33CCCC', 'C': '#FFFFCC'}
self._localization_dict = defaultdict(
lambda: defaultdict(lambda: float))
self._color_list = (
"Blue", "Green", "Teal", "Lime", "SeaGreen", "MediumTurquoise",
"Pink", "DarkOliveGreen", "Indigo", "Orange", "SlateBlue",
"LawnGreen", "Brown", "LightSkyBlue", "LightGreen", "DarkOrchid",
"GoldenRod", "MidnightBlue", "LightPink", "Gold")
def viz_all_the_visualization_files(self):
self.viz_domain_data()
self.domain_highlight()
self.viz_annotation_scoring()
self.viz_secondary_structure()
self.viz_subcellular_localization()
self.viz_homologous_pdb_msa()
def viz_domain_data(self):
with open(self._domain_file, 'r') as in_fh:
for entry in in_fh:
if not entry.startswith('Entry'):
domain_info = DomainDataColumns(
entry.strip().split('\t'))
prot_name = domain_info.entry_name
prot_end = int(domain_info.length)-1
prot_key = '\n'.join(
["\tstart: 0,", "\tend: %s,"
% prot_end, '\tname: "%s",' % prot_name,
'\thref: "http://www.uniprot.org/uniprot/%s"'
% domain_info.uid])
self._uid_key_dict[domain_info.uid] = prot_key
self._location_dict[
domain_info.uid][domain_info.domain_id].append(
'\t{start: %s, end: %s}' % (
domain_info.start, domain_info.stop))
self._dom_annotation[
domain_info.domain_id] = domain_info.full_name
src = domain_info.resource
if src == 'CDD':
self._dom_rank.setdefault(
domain_info.uid+':CDD', []).append(
domain_info.domain_id)
self._highlight_dict.setdefault(
prot_key, []).append('\n'.join(
['\t\tstart: %s,' % domain_info.start,
'\t\tend: %s,' % domain_info.stop,
'\t\tdomain: {', '\t\t\tname: "%s",'
% domain_info.domain_id,
'\t\t\tid: %s,' % len(
self._dom_rank[domain_info.uid+':CDD']),
'\t\t\tdescription: "%s"},' %
domain_info.short_name,
'\t\tsource: {', '\t\t\tname: "CDD",',
'\t\t\thref: null,', '\t\t\tid: 1}']))
else:
self._dom_rank.setdefault(
domain_info.uid+':IPR', []).append(
domain_info.domain_id)
self._highlight_dict.setdefault(
prot_key, []).append('\n'.join(
['start: %s,' % domain_info.start,
'end: %s,' % domain_info.stop,
'domain: {', '\t\tname: "%s",' %
domain_info.domain_id,
'\t\tid: %s,' % len(
self._dom_rank[domain_info.uid+':IPR']),
'\t\tdescription: "%s"},' % domain_info.short_name,
'source: {', '\t\tname: "InterPro",',
'\t\thref: null,', '\t\tid: 2}']))
return self._uid_key_dict, self._location_dict, self._dom_annotation, self._dom_highlight, self._highlight_dict
def domain_highlight(self):
for uid in self._uid_key_dict.keys():
header = '\n'.join(['<meta charset="UTF-8">'
'<link type="text/css" rel="stylesheet" href="http://parce.li/bundle/biojs-vis-protein-viewer@0.1.4">',
'<script src="https://wzrd.in/bundle/biojs-vis-protein-viewer@0.1.4"></script>',
'<div id="j-main">', '</div>', '<script>',
'var ProteinViewer = require("biojs-vis-protein-viewer");'])
body = '\n'.join(['var highlightData = [', '\t{',
'\n\t},\n\t{\n'.join(self._highlight_dict[
self._uid_key_dict[uid]]), '\t}', '];'])
panel = '\n'.join(['var highlightLocusData = {',
self._uid_key_dict[uid], '};'])
footer = '\n'.join([
'var pv = new ProteinViewer({',
'\tel: document.getElementById("j-main"),',
'\tdata: highlightData,',
'\tlocusData: highlightLocusData', '});',
'pv.render();', '</script>'])
with open(self._dom_highlight+'/%s.html' % uid, 'w') as out_fh:
out_fh.write('\n'.join([header, body, panel, footer]))
def viz_annotation_scoring(self):
if os.path.exists(self._annotation_scoring_data):
with open(self._annotation_scoring_data, 'r') as in_fh:
for entry in in_fh:
if not entry.startswith('Entry'):
self._filter_viz_dict.setdefault('filter1_list', []).append(
float(entry.strip().split('\t')[-5]))
self._filter_viz_dict.setdefault('filter2_list', []).append(
float(entry.strip().split('\t')[-4]))
self._filter_viz_dict.setdefault('filter3_list', []).append(
float(entry.strip().split('\t')[-3]))
self._filter_viz_dict.setdefault('filter4_list', []).append(
float(entry.strip().split('\t')[-2]))
self._filter_viz_dict.setdefault('bayscore_list', []).append(
float(entry.strip().split('\t')[-1]))
try:
label_list = range(0, len(self._filter_viz_dict['bayscore_list']))
plt.plot(sorted(self._filter_viz_dict['filter1_list']), 'ro', label='Filter-1 Score')
plt.plot(sorted(self._filter_viz_dict['filter2_list']), 'ys', label='Filter-2 Score')
plt.plot(sorted(self._filter_viz_dict['filter3_list']), 'g8', label='Filter-3 Score')
plt.plot(sorted(self._filter_viz_dict['filter4_list']), 'mp', label='Filter-4 Score')
plt.plot(sorted(self._filter_viz_dict['bayscore_list']), 'b^', label='Bayesian Score')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
plt.xticks(label_list)
plt.xlabel('Annotation scores of selected proteins')
plt.ylabel('Filter/Bayesian score')
plt.savefig(os.path.join(self._overview, 'viz_annotation_scoring.png'))
except KeyError:
print("!!! The annotation scoring file seems to be empty."
" Please reanalyse annotation score using the subcommand 'annoscore' !!!")
else:
print('The data for annotation scores do not exist,'
'please calculate the annotation score using the subcommand'
'"annoscore", the flag "-nd" can be used to specify the absolute path for needle.')
def viz_secondary_structure(self):
for uid in self._uid_key_dict.keys():
if uid+'.horiz' in os.listdir(
self._additional_annotation+'/protein_secondary_structure/'):
files = uid+'.horiz'
elif uid+'.plain' in os.listdir(
self._additional_annotation+'/protein_secondary_structure/'):
files = uid+'.plain'
print("\nRaptorX secondary structure files are unavailable.")
print("Visualizing secondary structure using literature based analysis.\n")
else:
print("\nRaptorX/literature-based secondary structure files are unavailable.")
print("Exiting the current analysis.")
print("Please re-run the secondary structure prediction by RaptorX\n")
return
secstr_list = []
uid_secstr_dict = {}
sec_data_sites = []
with open(self._additional_annotation+
'/protein_secondary_structure/'+files, 'r') as in_fh:
for entry in in_fh:
if 'AA: ' in entry:
self._fasta_dict.setdefault(uid,
[]).append(entry.strip().split('AA: ')[1])
if 'Pred: ' in entry:
try:
secstr_list.append(entry.strip().split('Pred: ')[1])
except IndexError:
print("\nRaptorX output file is incomplete. Exiting the current analysis.")
print("Please re-run the secondary structure prediction by RaptorX\n")
return
for i, pred_data in enumerate(''.join(secstr_list)):
uid_secstr_dict[i] = pred_data
for j in range(len(uid_secstr_dict)-1):
if j == 0:
sec_data_sites.append(j)
if not uid_secstr_dict[j] == uid_secstr_dict[j+1]:
sec_data_sites.append(j+1)
self._secstr_dict.setdefault(uid, []).append(
'mySequence.addHighlight({start:%s, end:%s, color:"Black", background:"%s"});'
%(int(sec_data_sites[-2])+1, int(j)+1,
self._sec_str_color[uid_secstr_dict[j]]))
self._secstr_dict.setdefault(uid, []).append(
'mySequence.addHighlight({start:%s, end:%s, color:"Black", background:"%s"});'
%(int(sec_data_sites[-1])+1, int(list(uid_secstr_dict.keys())[-1])+1,
self._sec_str_color[uid_secstr_dict[j]]))
self.sec_str_script()
def sec_str_script(self):
for uid in self._fasta_dict.keys():
header = '\n'.join(['<meta charset="UTF-8">',
'<script src="http://code.jquery.com/jquery-1.11.0.min.js"></script>',
'<script src="https://wzrd.in/bundle/biojs-vis-sequence@0.1.7"></script>',
'<script src="https://wzrd.in/bundle/biojs-io-fasta@latest"></script>',
'<div id="snippetDiv"></div>', '<script>',
'var yourDiv = document.getElementById("snippetDiv");',
'var Seq = require("biojs-vis-sequence");'])
footer = '\n'.join([
'mySequence.on("all",function(name,data){var obj = {name: name, data: data};if(inIframe()){ parent.postMessage(obj, "*") }})',
'mySequence.onAll(function(name,data){',
'console.log(arguments);', '});', '};',
'function inIframe(){try{return window.self!==window.top}catch(e){return true}}',
'</script>'])
body1 = '\n'.join(['var theSequence = "%s";' %
''.join(self._fasta_dict[uid]), 'yourDiv.textContent = "";',
'window.onload = function() {', 'var mySequence = new Seq({',
'\tsequence : theSequence,', '\ttarget : yourDiv.id,',
'\tformat : "CODATA",', '\tformatOptions : {',
'\ttitle:false,', '\tfooter:false', '\t},', '\tid : "%s"' % uid, '});'])
body2 = '\n'.join(self._secstr_dict[uid])
dom_list = sorted(list(self._location_dict[uid].keys()))
annotation_list = []
for dom_id in dom_list:
dom_idx = dom_list.index(dom_id)
annotation_list.append('\n'.join([
'mySequence.addAnnotation({', 'name:"Domain-%s",' % str(int(dom_idx)+1),
'html:"<br>%s<br>%s</b>",' % (dom_id,
self._dom_annotation[dom_id]), 'color:"%s",' % self._color_list[dom_idx],
'regions: [', ',\n'.join(self._location_dict[uid][dom_id]), ']});']))
with open(self._sec_str+'/'+uid+'.html', 'w') as out_fh:
out_fh.write('\n'.join([header, body1, '\n'.join(annotation_list),
body2, footer]))
def viz_subcellular_localization(self):
''''''
if 'psortb_data_summary.csv' in os.listdir(
self._additional_annotation+'/protein_localization'):
total_loc = set()
with open(
self._additional_annotation+'/protein_localization/psortb_data_summary.csv',
'r') as in_fh:
for entry in in_fh:
if not 'Localization' in entry:
protein = entry.strip().split('\t')[0]
localization = entry.strip().split('\t')[1]
if not localization.lower() == 'unknown':
score = float(entry.strip().split('\t')[2])
self._localization_dict[protein][localization] = score
total_loc.add(localization)
with open(self._localize+'/localization_table.csv', 'w') as out_fh:
out_fh.write('Proteins\t%s\n' % '\t'.join(sorted(list(total_loc))))
for each_prot in self._localization_dict.keys():
for localization in self._localization_dict[each_prot]:
entry_list = list('0'*len(total_loc))
loc_idx = sorted(list(total_loc)).index(localization)
entry_list[loc_idx] = self._localization_dict[each_prot][localization]
out_fh.write("%s\t%s\n" % (each_prot, '\t'.join(map(str, entry_list))))
self._create_localization_heatmap()
else:
print("\nPsortB-based localization prediction files are unavailable.")
print("Exiting the current analysis.")
print("Please re-run the localization prediction by PsortB\n")
return
def _create_localization_heatmap(self):
''''''
plot_file = self._localize+'/localization_heatmap.pdf'
infile = self._localize+'/localization_table.csv'
with open(self._localize+'/localization_heatmap.R', 'w') as r_fh:
r_fh.write('\n'.join(['library(gplots)', 'library(RColorBrewer)', 'display.brewer.all()',
'data <- read.csv("%s", header=T, sep = "\\t")' % infile,
'rnames <- data[,1]', 'data_matrix <- data.matrix(data[,2:ncol(data)])',
'data_matrix[is.na(data_matrix)] <- 0', 'data_matrix[is.nan(data_matrix)] <- 0',
'data_matrix[is.infinite(data_matrix)] <- max(data_matrix)',
'rownames(data_matrix) <- rnames', 'pdf(file="%s")' % plot_file,
'out_map <- heatmap.2(data_matrix, dendrogram = "none", Rowv = FALSE, \
Colv = FALSE, col=brewer.pal(9,"YlGn"), margins=c(5,8), \
cexCol=0.8, cexRow=0.8, key.title="PsortB Pred-value", key.xlab="", key.ylab="")',
'dev.off()']))
subprocess.Popen(['Rscript %s/localization_heatmap.R' %
self._localize], shell=True).wait()
def viz_homologous_pdb_msa(self):
header = '\n'.join(['<meta charset="UTF-8">',
'<link type="text/css" rel="stylesheet" href="http://parce.li/bundle/msa@0.4.8">',
'<script src="https://wzrd.in/bundle/msa@0.4.8"></script>',
'<script src="https://wzrd.in/bundle/biojs-io-fasta@latest"></script>',
'<script src="https://wzrd.in/bundle/biojs-io-clustal@latest"></script>',
'<script src="https://wzrd.in/bundle/biojs-io-gff@latest"></script>',
'<script src="https://wzrd.in/bundle/xhr@latest"></script>',
'<div id="snippetDiv"></div>', '<script>',
'var rootDiv = document.getElementById("snippetDiv");',
'var msa = require("msa");', 'var menuDiv = document.createElement("div");',
'var msaDiv = document.createElement("div");',
'rootDiv.appendChild(menuDiv);', 'rootDiv.appendChild(msaDiv);'])
footer = '\n'.join(['opts.conf = {', '\tdropImport: true,',
'\tmanualRendering: true', '};', 'opts.vis = {', '\tconserv: false,',
'\toverviewbox: false,', '\tseqlogo: true,', '\tmetacell: true', '};',
'opts.zoomer = {', '\tlabelIdLength: 20', '};', 'var m = msa(opts);',
'gg = m;', 'm.u.file.importURL(url, function() {',
'\tvar defMenu = new msa.menu.defaultmenu({', '\t\tel: menuDiv,',
'\t\tmsa: m', '\t});', '\tdefMenu.render();', '\tm.render();', '});',
'm.g.on("all",function(name,data){var obj = {name: name, data: data};if(inIframe()){ parent.postMessage(obj, "*") }})',
'function inIframe(){try{return window.self!==window.top}catch(e){return true}}',
'</script>'])
body = '//EDIT PATH\n'.join([
'var url = "https://github.com/malvikasharan/APRICOT/blob/master/Biojs_dependencies/data/biojs_msa_tab.clustal";'
'var opts = {', '\tel: msaDiv', '};'])
with open(self._pdb_msa+'/Biojs_pdb_msa_tab.html', 'w') as out_fh:
out_fh.write('\n'.join([header, body, footer]))
for files in os.listdir(self._additional_annotation+'/pdb_sequence_prediction/'):
if '_top5.fasta' in files:
shutil.copyfile(
self._additional_annotation+'/pdb_sequence_prediction/'+files,
self._pdb_msa+'/'+files)
subprocess.Popen(['bin/reference_db_files/clustal/clustalw2 %s' %
self._pdb_msa+'/'+files], shell=True).wait()
print("\nPlease open the BioJS MSA tab generated in Biojs_pdb_msa_tab.html.")
print("Import MSA files (.aln) in the BioJS MSA tab to visualize the alignment.\n")
class AnnotationScoringColumns(object):
'''Column information of annotation scoring file'''
def __init__(self, row):
self.uid = row[0]
self.entry_name = row[1]
self.prot_name = row[2]
self.species = row[3]
self.length = row[4]
self.resource = row[5]
self.resource_id = row[6]
self.domain_id = row[7]
self.short_name = row[8]
self.full_name = row[9]
self.domain_length = row[10]
self.start = row[11]
self.stop = row[12]
self.ref_seq = row[13]
self.q_seq = row[14]
self.ref_ss = row[15]
self.q_ss = row[16]
self.mol_mass = row[17]
self.iso_pt = row[18]
self.solub = row[19]
self.vdw = row[20]
self.coverage = row[21]
self.cov_by_dom = row[22]
self.identity = row[23]
self.iden_by_cov = row[24]
self.similarity = row[25]
self.sim_by_cov = row[26]
self.gap = row[27]
self.gap_by_cov = row[28]
self.AA_RO = row[29]
self.SS_RO = row[30]
self.PC_RO = row[31]
self.AAC_ED = row[32]
self.PCC_ED = row[33]
self.DPC_ED = row[34]
self.TPC_ED = row[35]
class DomainDataColumns(object):
'''Column information of domain annotation file'''
def __init__(self, row):
self.uid = row[0]
self.entry_name = row[1]
self.prot_name = row[2]
self.species = row[3]
self.length = row[4]
self.gene_name = row[5]
self.locus_tag = row[6]
self.existance = row[7]
self.go = row[8]
self.embl_id = row[9]
self.pdb_id = row[10]
self.kegg_id = row[11]
self.interpro_id = row[12]
self.pfam_id = row[13]
self.pubmed_id = row[14]
self.resource = row[15]
self.resource_id = row[16]
self.domain_id = row[17]
self.short_name = row[18]
self.full_name = row[19]
self.dom_kw = row[20]
self.dom_go = row[21]
self.members = row[22]
self.dom_len = row[23]
self.start = row[24]
self.stop = row[25]
self.evalue = row[26]
self.bitscore = row[27]
self.bits = row[28]
self.cover_len = row[29]
self.cov_prcnt = row[30]
self.identity = row[31]
self.iden_prcnt = row[32]
self.similarity = row[33]
self.sim_prcnt = row[34]
self.gaps = row[35]
self.gap_prcnt = row[36]
self.filter_tag = row[37]
| isc |
SurfasJones/icecream-info | icecream/lib/python2.7/site-packages/django/contrib/gis/db/models/sql/query.py | 209 | 5406 | from django.db import connections
from django.db.models.query import sql
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.sql import aggregates as gis_aggregates
from django.contrib.gis.db.models.sql.conversion import AreaField, DistanceField, GeomField
from django.contrib.gis.db.models.sql.where import GeoWhereNode
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
ALL_TERMS = set([
'bbcontains', 'bboverlaps', 'contained', 'contains',
'contains_properly', 'coveredby', 'covers', 'crosses', 'disjoint',
'distance_gt', 'distance_gte', 'distance_lt', 'distance_lte',
'dwithin', 'equals', 'exact',
'intersects', 'overlaps', 'relate', 'same_as', 'touches', 'within',
'left', 'right', 'overlaps_left', 'overlaps_right',
'overlaps_above', 'overlaps_below',
'strictly_above', 'strictly_below'
])
ALL_TERMS.update(sql.constants.QUERY_TERMS)
class GeoQuery(sql.Query):
"""
A single spatial SQL query.
"""
# Overridding the valid query terms.
query_terms = ALL_TERMS
aggregates_module = gis_aggregates
compiler = 'GeoSQLCompiler'
#### Methods overridden from the base Query class ####
def __init__(self, model, where=GeoWhereNode):
super(GeoQuery, self).__init__(model, where)
# The following attributes are customized for the GeoQuerySet.
# The GeoWhereNode and SpatialBackend classes contain backend-specific
# routines and functions.
self.custom_select = {}
self.transformed_srid = None
self.extra_select_fields = {}
def clone(self, *args, **kwargs):
obj = super(GeoQuery, self).clone(*args, **kwargs)
# Customized selection dictionary and transformed srid flag have
# to also be added to obj.
obj.custom_select = self.custom_select.copy()
obj.transformed_srid = self.transformed_srid
obj.extra_select_fields = self.extra_select_fields.copy()
return obj
def convert_values(self, value, field, connection):
"""
Using the same routines that Oracle does we can convert our
extra selection objects into Geometry and Distance objects.
TODO: Make converted objects 'lazy' for less overhead.
"""
if connection.ops.oracle:
# Running through Oracle's first.
value = super(GeoQuery, self).convert_values(value, field or GeomField(), connection)
if value is None:
# Output from spatial function is NULL (e.g., called
# function on a geometry field with NULL value).
pass
elif isinstance(field, DistanceField):
# Using the field's distance attribute, can instantiate
# `Distance` with the right context.
value = Distance(**{field.distance_att : value})
elif isinstance(field, AreaField):
value = Area(**{field.area_att : value})
elif isinstance(field, (GeomField, GeometryField)) and value:
value = Geometry(value)
elif field is not None:
return super(GeoQuery, self).convert_values(value, field, connection)
return value
def get_aggregation(self, using):
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
connection = connections[using]
for alias, aggregate in self.aggregate_select.items():
if isinstance(aggregate, gis_aggregates.GeoAggregate):
if not getattr(aggregate, 'is_extent', False) or connection.ops.oracle:
self.extra_select_fields[alias] = GeomField()
return super(GeoQuery, self).get_aggregation(using)
def resolve_aggregate(self, value, aggregate, connection):
"""
Overridden from GeoQuery's normalize to handle the conversion of
GeoAggregate objects.
"""
if isinstance(aggregate, self.aggregates_module.GeoAggregate):
if aggregate.is_extent:
if aggregate.is_extent == '3D':
return connection.ops.convert_extent3d(value)
else:
return connection.ops.convert_extent(value)
else:
return connection.ops.convert_geom(value, aggregate.source)
else:
return super(GeoQuery, self).resolve_aggregate(value, aggregate, connection)
# Private API utilities, subject to change.
def _geo_field(self, field_name=None):
"""
Returns the first Geometry field encountered; or specified via the
`field_name` keyword. The `field_name` may be a string specifying
the geometry field on this GeoQuery's model, or a lookup string
to a geometry field via a ForeignKey relation.
"""
if field_name is None:
# Incrementing until the first geographic field is found.
for fld in self.model._meta.fields:
if isinstance(fld, GeometryField): return fld
return False
else:
# Otherwise, check by the given field name -- which may be
# a lookup to a _related_ geographic field.
return GeoWhereNode._check_geo_field(self.model._meta, field_name)
| mit |
HuimingCheng/AutoGrading | learning/web_Haotian/venv/Lib/site-packages/flask/exthook.py | 128 | 5762 | # -*- coding: utf-8 -*-
"""
flask.exthook
~~~~~~~~~~~~~
Redirect imports for extensions. This module basically makes it possible
for us to transition from flaskext.foo to flask_foo without having to
force all extensions to upgrade at the same time.
When a user does ``from flask.ext.foo import bar`` it will attempt to
import ``from flask_foo import bar`` first and when that fails it will
try to import ``from flaskext.foo import bar``.
We're switching from namespace packages because it was just too painful for
everybody involved.
This is used by `flask.ext`.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import warnings
from ._compat import reraise
class ExtDeprecationWarning(DeprecationWarning):
pass
warnings.simplefilter('always', ExtDeprecationWarning)
class ExtensionImporter(object):
"""This importer redirects imports from this submodule to other locations.
This makes it possible to transition from the old flaskext.name to the
newer flask_name without people having a hard time.
"""
def __init__(self, module_choices, wrapper_module):
self.module_choices = module_choices
self.wrapper_module = wrapper_module
self.prefix = wrapper_module + '.'
self.prefix_cutoff = wrapper_module.count('.') + 1
def __eq__(self, other):
return self.__class__.__module__ == other.__class__.__module__ and \
self.__class__.__name__ == other.__class__.__name__ and \
self.wrapper_module == other.wrapper_module and \
self.module_choices == other.module_choices
def __ne__(self, other):
return not self.__eq__(other)
def install(self):
sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self]
def find_module(self, fullname, path=None):
if fullname.startswith(self.prefix) and \
fullname != 'flask.ext.ExtDeprecationWarning':
return self
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff]
warnings.warn(
"Importing flask.ext.{x} is deprecated, use flask_{x} instead."
.format(x=modname), ExtDeprecationWarning, stacklevel=2
)
for path in self.module_choices:
realname = path % modname
try:
__import__(realname)
except ImportError:
exc_type, exc_value, tb = sys.exc_info()
# since we only establish the entry in sys.modules at the
# very this seems to be redundant, but if recursive imports
# happen we will call into the move import a second time.
# On the second invocation we still don't have an entry for
# fullname in sys.modules, but we will end up with the same
# fake module name and that import will succeed since this
# one already has a temporary entry in the modules dict.
# Since this one "succeeded" temporarily that second
# invocation now will have created a fullname entry in
# sys.modules which we have to kill.
sys.modules.pop(fullname, None)
# If it's an important traceback we reraise it, otherwise
# we swallow it and try the next choice. The skipped frame
# is the one from __import__ above which we don't care about
if self.is_important_traceback(realname, tb):
reraise(exc_type, exc_value, tb.tb_next)
continue
module = sys.modules[fullname] = sys.modules[realname]
if '.' not in modname:
setattr(sys.modules[self.wrapper_module], modname, module)
if realname.startswith('flaskext.'):
warnings.warn(
"Detected extension named flaskext.{x}, please rename it "
"to flask_{x}. The old form is deprecated."
.format(x=modname), ExtDeprecationWarning
)
return module
raise ImportError('No module named %s' % fullname)
def is_important_traceback(self, important_module, tb):
"""Walks a traceback's frames and checks if any of the frames
originated in the given important module. If that is the case then we
were able to import the module itself but apparently something went
wrong when the module was imported. (Eg: import of an import failed).
"""
while tb is not None:
if self.is_important_frame(important_module, tb):
return True
tb = tb.tb_next
return False
def is_important_frame(self, important_module, tb):
"""Checks a single frame if it's important."""
g = tb.tb_frame.f_globals
if '__name__' not in g:
return False
module_name = g['__name__']
# Python 2.7 Behavior. Modules are cleaned up late so the
# name shows up properly here. Success!
if module_name == important_module:
return True
# Some python versions will clean up modules so early that the
# module name at that point is no longer set. Try guessing from
# the filename then.
filename = os.path.abspath(tb.tb_frame.f_code.co_filename)
test_string = os.path.sep + important_module.replace('.', os.path.sep)
return test_string + '.py' in filename or \
test_string + os.path.sep + '__init__.py' in filename
| mit |
jm-begon/scikit-learn | sklearn/datasets/base.py | 196 | 18554 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
Shrhawk/edx-platform | common/djangoapps/external_auth/tests/test_shib.py | 38 | 30207 | # -*- coding: utf-8 -*-
"""
Tests for Shibboleth Authentication
@jbau
"""
import unittest
from ddt import ddt, data
from django.conf import settings
from django.http import HttpResponseRedirect
from django.test import TestCase
from django.test.client import RequestFactory, Client as DjangoTestClient
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AnonymousUser, User
from django.utils.importlib import import_module
from edxmako.tests import mako_middleware_process_request
from external_auth.models import ExternalAuthMap
from external_auth.views import (
shib_login, course_specific_login, course_specific_register, _flatten_to_ascii
)
from mock import patch
from urllib import urlencode
from student.views import create_account, change_enrollment
from student.models import UserProfile, CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore import ModuleStoreEnum
# Shib is supposed to provide 'REMOTE_USER', 'givenName', 'sn', 'mail', 'Shib-Identity-Provider'
# attributes via request.META. We can count on 'Shib-Identity-Provider', and 'REMOTE_USER' being present
# b/c of how mod_shib works but should test the behavior with the rest of the attributes present/missing
# For the sake of python convention we'll make all of these variable names ALL_CAPS
# These values would all returned from request.META, so they need to be str, not unicode
IDP = 'https://idp.stanford.edu/'
REMOTE_USER = 'test_user@stanford.edu'
MAILS = [None, '', 'test_user@stanford.edu'] # unicode shouldn't be in emails, would fail django's email validator
DISPLAYNAMES = [None, '', 'Jason 包']
GIVENNAMES = [None, '', 'jasön; John; bob'] # At Stanford, the givenNames can be a list delimited by ';'
SNS = [None, '', '包; smith'] # At Stanford, the sns can be a list delimited by ';'
def gen_all_identities():
"""
A generator for all combinations of test inputs.
Each generated item is a dict that represents what a shib IDP
could potentially pass to django via request.META, i.e.
setting (or not) request.META['givenName'], etc.
"""
def _build_identity_dict(mail, display_name, given_name, surname):
""" Helper function to return a dict of test identity """
meta_dict = {'Shib-Identity-Provider': IDP,
'REMOTE_USER': REMOTE_USER}
if display_name is not None:
meta_dict['displayName'] = display_name
if mail is not None:
meta_dict['mail'] = mail
if given_name is not None:
meta_dict['givenName'] = given_name
if surname is not None:
meta_dict['sn'] = surname
return meta_dict
for mail in MAILS:
for given_name in GIVENNAMES:
for surname in SNS:
for display_name in DISPLAYNAMES:
yield _build_identity_dict(mail, display_name, given_name, surname)
@ddt
@override_settings(SESSION_ENGINE='django.contrib.sessions.backends.cache')
class ShibSPTest(ModuleStoreTestCase):
"""
Tests for the Shibboleth SP, which communicates via request.META
(Apache environment variables set by mod_shib)
"""
request_factory = RequestFactory()
def setUp(self):
super(ShibSPTest, self).setUp(create_user=False)
self.test_user_id = ModuleStoreEnum.UserID.test
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_exception_shib_login(self):
"""
Tests that we get the error page when there is no REMOTE_USER
or Shib-Identity-Provider in request.META
"""
no_remote_user_request = self.request_factory.get('/shib-login')
no_remote_user_request.META.update({'Shib-Identity-Provider': IDP})
no_remote_user_request.user = AnonymousUser()
mako_middleware_process_request(no_remote_user_request)
no_remote_user_response = shib_login(no_remote_user_request)
self.assertEqual(no_remote_user_response.status_code, 403)
self.assertIn("identity server did not return your ID information", no_remote_user_response.content)
no_idp_request = self.request_factory.get('/shib-login')
no_idp_request.META.update({'REMOTE_USER': REMOTE_USER})
no_idp_response = shib_login(no_idp_request)
self.assertEqual(no_idp_response.status_code, 403)
self.assertIn("identity server did not return your ID information", no_idp_response.content)
def _assert_shib_login_is_logged(self, audit_log_call, remote_user):
"""Asserts that shibboleth login attempt is being logged"""
remote_user = _flatten_to_ascii(remote_user) # django usernames have to be ascii
method_name, args, _kwargs = audit_log_call
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'logged in via Shibboleth', args[0])
self.assertIn(remote_user, args[0])
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_shib_login(self):
"""
Tests that:
* shib credentials that match an existing ExternalAuthMap with a linked active user logs the user in
* shib credentials that match an existing ExternalAuthMap with a linked inactive user shows error page
* shib credentials that match an existing ExternalAuthMap without a linked user and also match the email
of an existing user without an existing ExternalAuthMap links the two and log the user in
* shib credentials that match an existing ExternalAuthMap without a linked user and also match the email
of an existing user that already has an ExternalAuthMap causes an error (403)
* shib credentials that do not match an existing ExternalAuthMap causes the registration form to appear
"""
user_w_map = UserFactory.create(email='withmap@stanford.edu')
extauth = ExternalAuthMap(external_id='withmap@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=user_w_map)
user_wo_map = UserFactory.create(email='womap@stanford.edu')
user_w_map.save()
user_wo_map.save()
extauth.save()
inactive_user = UserFactory.create(email='inactive@stanford.edu')
inactive_user.is_active = False
inactive_extauth = ExternalAuthMap(external_id='inactive@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=inactive_user)
inactive_user.save()
inactive_extauth.save()
idps = ['https://idp.stanford.edu/', 'https://someother.idp.com/']
remote_users = ['withmap@stanford.edu', 'womap@stanford.edu',
'testuser2@someother_idp.com', 'inactive@stanford.edu']
for idp in idps:
for remote_user in remote_users:
request = self.request_factory.get('/shib-login')
request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session
request.META.update({'Shib-Identity-Provider': idp,
'REMOTE_USER': remote_user,
'mail': remote_user})
request.user = AnonymousUser()
mako_middleware_process_request(request)
with patch('external_auth.views.AUDIT_LOG') as mock_audit_log:
response = shib_login(request)
audit_log_calls = mock_audit_log.method_calls
if idp == "https://idp.stanford.edu/" and remote_user == 'withmap@stanford.edu':
self.assertIsInstance(response, HttpResponseRedirect)
self.assertEqual(request.user, user_w_map)
self.assertEqual(response['Location'], '/dashboard')
# verify logging:
self.assertEquals(len(audit_log_calls), 2)
self._assert_shib_login_is_logged(audit_log_calls[0], remote_user)
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success', args[0])
self.assertIn(remote_user, args[0])
elif idp == "https://idp.stanford.edu/" and remote_user == 'inactive@stanford.edu':
self.assertEqual(response.status_code, 403)
self.assertIn("Account not yet activated: please look for link in your email", response.content)
# verify logging:
self.assertEquals(len(audit_log_calls), 2)
self._assert_shib_login_is_logged(audit_log_calls[0], remote_user)
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'warning')
self.assertEquals(len(args), 1)
self.assertIn(u'is not active after external login', args[0])
# self.assertEquals(remote_user, args[1])
elif idp == "https://idp.stanford.edu/" and remote_user == 'womap@stanford.edu':
self.assertIsNotNone(ExternalAuthMap.objects.get(user=user_wo_map))
self.assertIsInstance(response, HttpResponseRedirect)
self.assertEqual(request.user, user_wo_map)
self.assertEqual(response['Location'], '/dashboard')
# verify logging:
self.assertEquals(len(audit_log_calls), 2)
self._assert_shib_login_is_logged(audit_log_calls[0], remote_user)
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success', args[0])
self.assertIn(remote_user, args[0])
elif idp == "https://someother.idp.com/" and remote_user in \
['withmap@stanford.edu', 'womap@stanford.edu', 'inactive@stanford.edu']:
self.assertEqual(response.status_code, 403)
self.assertIn("You have already created an account using an external login", response.content)
# no audit logging calls
self.assertEquals(len(audit_log_calls), 0)
else:
self.assertEqual(response.status_code, 200)
self.assertContains(response,
("Preferences for {platform_name}"
.format(platform_name=settings.PLATFORM_NAME)))
# no audit logging calls
self.assertEquals(len(audit_log_calls), 0)
def _base_test_extauth_auto_activate_user_with_flag(self, log_user_string="inactive@stanford.edu"):
"""
Tests that FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'] means extauth automatically
linked users, activates them, and logs them in
"""
inactive_user = UserFactory.create(email='inactive@stanford.edu')
inactive_user.is_active = False
inactive_user.save()
request = self.request_factory.get('/shib-login')
request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session
request.META.update({
'Shib-Identity-Provider': 'https://idp.stanford.edu/',
'REMOTE_USER': 'inactive@stanford.edu',
'mail': 'inactive@stanford.edu'
})
request.user = AnonymousUser()
with patch('external_auth.views.AUDIT_LOG') as mock_audit_log:
response = shib_login(request)
audit_log_calls = mock_audit_log.method_calls
# reload user from db, since the view function works via db side-effects
inactive_user = User.objects.get(id=inactive_user.id)
self.assertIsNotNone(ExternalAuthMap.objects.get(user=inactive_user))
self.assertTrue(inactive_user.is_active)
self.assertIsInstance(response, HttpResponseRedirect)
self.assertEqual(request.user, inactive_user)
self.assertEqual(response['Location'], '/dashboard')
# verify logging:
self.assertEquals(len(audit_log_calls), 3)
self._assert_shib_login_is_logged(audit_log_calls[0], log_user_string)
method_name, args, _kwargs = audit_log_calls[2]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success', args[0])
self.assertIn(log_user_string, args[0])
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@patch.dict(settings.FEATURES, {'BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH': True, 'SQUELCH_PII_IN_LOGS': False})
def test_extauth_auto_activate_user_with_flag_no_squelch(self):
"""
Wrapper to run base_test_extauth_auto_activate_user_with_flag with {'SQUELCH_PII_IN_LOGS': False}
"""
self._base_test_extauth_auto_activate_user_with_flag(log_user_string="inactive@stanford.edu")
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@patch.dict(settings.FEATURES, {'BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH': True, 'SQUELCH_PII_IN_LOGS': True})
def test_extauth_auto_activate_user_with_flag_squelch(self):
"""
Wrapper to run base_test_extauth_auto_activate_user_with_flag with {'SQUELCH_PII_IN_LOGS': True}
"""
self._base_test_extauth_auto_activate_user_with_flag(log_user_string="user.id: 1")
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@data(*gen_all_identities())
def test_registration_form(self, identity):
"""
Tests the registration form showing up with the proper parameters.
Uses django test client for its session support
"""
client = DjangoTestClient()
# identity k/v pairs will show up in request.META
response = client.get(path='/shib-login/', data={}, follow=False, **identity)
self.assertEquals(response.status_code, 200)
mail_input_HTML = '<input class="" id="email" type="email" name="email"'
if not identity.get('mail'):
self.assertContains(response, mail_input_HTML)
else:
self.assertNotContains(response, mail_input_HTML)
sn_empty = not identity.get('sn')
given_name_empty = not identity.get('givenName')
displayname_empty = not identity.get('displayName')
fullname_input_html = '<input id="name" type="text" name="name"'
if sn_empty and given_name_empty and displayname_empty:
self.assertContains(response, fullname_input_html)
else:
self.assertNotContains(response, fullname_input_html)
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@data(*gen_all_identities())
def test_registration_form_submit(self, identity):
"""
Tests user creation after the registration form that pops is submitted. If there is no shib
ExternalAuthMap in the session, then the created user should take the username and email from the
request.
Uses django test client for its session support
"""
# First we pop the registration form
client = DjangoTestClient()
response1 = client.get(path='/shib-login/', data={}, follow=False, **identity)
# Then we have the user answer the registration form
# These are unicode because request.POST returns unicode
postvars = {'email': u'post_email@stanford.edu',
'username': u'post_username', # django usernames can't be unicode
'password': u'post_pássword',
'name': u'post_náme',
'terms_of_service': u'true',
'honor_code': u'true'}
# use RequestFactory instead of TestClient here because we want access to request.user
request2 = self.request_factory.post('/create_account', data=postvars)
request2.session = client.session
request2.user = AnonymousUser()
mako_middleware_process_request(request2)
with patch('student.views.AUDIT_LOG') as mock_audit_log:
_response2 = create_account(request2)
user = request2.user
mail = identity.get('mail')
# verify logging of login happening during account creation:
audit_log_calls = mock_audit_log.method_calls
self.assertEquals(len(audit_log_calls), 3)
method_name, args, _kwargs = audit_log_calls[0]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success on new account creation', args[0])
self.assertIn(u'post_username', args[0])
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 2)
self.assertIn(u'User registered with external_auth', args[0])
self.assertEquals(u'post_username', args[1])
method_name, args, _kwargs = audit_log_calls[2]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 3)
self.assertIn(u'Updated ExternalAuthMap for ', args[0])
self.assertEquals(u'post_username', args[1])
self.assertEquals(u'test_user@stanford.edu', args[2].external_id)
# check that the created user has the right email, either taken from shib or user input
if mail:
self.assertEqual(user.email, mail)
self.assertEqual(list(User.objects.filter(email=postvars['email'])), [])
self.assertIsNotNone(User.objects.get(email=mail)) # get enforces only 1 such user
else:
self.assertEqual(user.email, postvars['email'])
self.assertEqual(list(User.objects.filter(email=mail)), [])
self.assertIsNotNone(User.objects.get(email=postvars['email'])) # get enforces only 1 such user
# check that the created user profile has the right name, either taken from shib or user input
profile = UserProfile.objects.get(user=user)
sn_empty = not identity.get('sn')
given_name_empty = not identity.get('givenName')
displayname_empty = not identity.get('displayName')
if displayname_empty:
if sn_empty and given_name_empty:
self.assertEqual(profile.name, postvars['name'])
else:
self.assertEqual(profile.name, request2.session['ExternalAuthMap'].external_name)
self.assertNotIn(u';', profile.name)
else:
self.assertEqual(profile.name, request2.session['ExternalAuthMap'].external_name)
self.assertEqual(profile.name, identity.get('displayName').decode('utf-8'))
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@data(None, "", "shib:https://idp.stanford.edu/")
def test_course_specific_login_and_reg(self, domain):
"""
Tests that the correct course specific login and registration urls work for shib
"""
course = CourseFactory.create(
org='MITx',
number='999',
display_name='Robot Super Course',
user_id=self.test_user_id,
)
# Test for cases where course is found
# set domains
# temporarily set the branch to draft-preferred so we can update the course
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id):
course.enrollment_domain = domain
self.store.update_item(course, self.test_user_id)
# setting location to test that GET params get passed through
login_request = self.request_factory.get('/course_specific_login/MITx/999/Robot_Super_Course' +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
_reg_request = self.request_factory.get('/course_specific_register/MITx/999/Robot_Super_Course' +
'?course_id=MITx/999/course/Robot_Super_Course' +
'&enrollment_action=enroll')
login_response = course_specific_login(login_request, 'MITx/999/Robot_Super_Course')
reg_response = course_specific_register(login_request, 'MITx/999/Robot_Super_Course')
if domain and "shib" in domain:
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(login_response['Location'],
reverse('shib-login') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(reg_response['Location'],
reverse('shib-login') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
else:
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(login_response['Location'],
reverse('signin_user') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(reg_response['Location'],
reverse('register_user') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
# Now test for non-existent course
# setting location to test that GET params get passed through
login_request = self.request_factory.get('/course_specific_login/DNE/DNE/DNE' +
'?course_id=DNE/DNE/DNE' +
'&enrollment_action=enroll')
_reg_request = self.request_factory.get('/course_specific_register/DNE/DNE/DNE' +
'?course_id=DNE/DNE/DNE/Robot_Super_Course' +
'&enrollment_action=enroll')
login_response = course_specific_login(login_request, 'DNE/DNE/DNE')
reg_response = course_specific_register(login_request, 'DNE/DNE/DNE')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(login_response['Location'],
reverse('signin_user') +
'?course_id=DNE/DNE/DNE' +
'&enrollment_action=enroll')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(reg_response['Location'],
reverse('register_user') +
'?course_id=DNE/DNE/DNE' +
'&enrollment_action=enroll')
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_enrollment_limit_by_domain(self):
"""
Tests that the enrollmentDomain setting is properly limiting enrollment to those who have
the proper external auth
"""
# create 2 course, one with limited enrollment one without
shib_course = CourseFactory.create(
org='Stanford',
number='123',
display_name='Shib Only',
enrollment_domain='shib:https://idp.stanford.edu/',
user_id=self.test_user_id,
)
open_enroll_course = CourseFactory.create(
org='MITx',
number='999',
display_name='Robot Super Course',
enrollment_domain='',
user_id=self.test_user_id,
)
# create 3 kinds of students, external_auth matching shib_course, external_auth not matching, no external auth
shib_student = UserFactory.create()
shib_student.save()
extauth = ExternalAuthMap(external_id='testuser@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=shib_student)
extauth.save()
other_ext_student = UserFactory.create()
other_ext_student.username = "teststudent2"
other_ext_student.email = "teststudent2@other.edu"
other_ext_student.save()
extauth = ExternalAuthMap(external_id='testuser1@other.edu',
external_email='',
external_domain='shib:https://other.edu/',
external_credentials="",
user=other_ext_student)
extauth.save()
int_student = UserFactory.create()
int_student.username = "teststudent3"
int_student.email = "teststudent3@gmail.com"
int_student.save()
# Tests the two case for courses, limited and not
for course in [shib_course, open_enroll_course]:
for student in [shib_student, other_ext_student, int_student]:
request = self.request_factory.post('/change_enrollment')
request.POST.update({'enrollment_action': 'enroll',
'course_id': course.id.to_deprecated_string()})
request.user = student
response = change_enrollment(request)
# If course is not limited or student has correct shib extauth then enrollment should be allowed
if course is open_enroll_course or student is shib_student:
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseEnrollment.is_enrolled(student, course.id))
else:
self.assertEqual(response.status_code, 400)
self.assertFalse(CourseEnrollment.is_enrolled(student, course.id))
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_shib_login_enrollment(self):
"""
A functionality test that a student with an existing shib login
can auto-enroll in a class with GET or POST params. Also tests the direction functionality of
the 'next' GET/POST param
"""
student = UserFactory.create()
extauth = ExternalAuthMap(external_id='testuser@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
internal_password="password",
user=student)
student.set_password("password")
student.save()
extauth.save()
course = CourseFactory.create(
org='Stanford',
number='123',
display_name='Shib Only',
enrollment_domain='shib:https://idp.stanford.edu/',
user_id=self.test_user_id,
)
# use django test client for sessions and url processing
# no enrollment before trying
self.assertFalse(CourseEnrollment.is_enrolled(student, course.id))
self.client.logout()
params = [
('course_id', course.id.to_deprecated_string()),
('enrollment_action', 'enroll'),
('next', '/testredirect')
]
request_kwargs = {'path': '/shib-login/',
'data': dict(params),
'follow': False,
'REMOTE_USER': 'testuser@stanford.edu',
'Shib-Identity-Provider': 'https://idp.stanford.edu/'}
response = self.client.get(**request_kwargs)
# successful login is a redirect to the URL that handles auto-enrollment
self.assertEqual(response.status_code, 302)
self.assertEqual(response['location'], 'http://testserver/account/finish_auth?{}'.format(urlencode(params)))
class ShibUtilFnTest(TestCase):
"""
Tests util functions in shib module
"""
def test__flatten_to_ascii(self):
DIACRITIC = u"àèìòùÀÈÌÒÙáéíóúýÁÉÍÓÚÝâêîôûÂÊÎÔÛãñõÃÑÕäëïöüÿÄËÏÖÜŸåÅçÇ" # pylint: disable=invalid-name
STR_DIACRI = "àèìòùÀÈÌÒÙáéíóúýÁÉÍÓÚÝâêîôûÂÊÎÔÛãñõÃÑÕäëïöüÿÄËÏÖÜŸåÅçÇ" # pylint: disable=invalid-name
FLATTENED = u"aeiouAEIOUaeiouyAEIOUYaeiouAEIOUanoANOaeiouyAEIOUYaAcC" # pylint: disable=invalid-name
self.assertEqual(_flatten_to_ascii('jasön'), 'jason') # umlaut
self.assertEqual(_flatten_to_ascii('Jason包'), 'Jason') # mandarin, so it just gets dropped
self.assertEqual(_flatten_to_ascii('abc'), 'abc') # pass through
unicode_test = _flatten_to_ascii(DIACRITIC)
self.assertEqual(unicode_test, FLATTENED)
self.assertIsInstance(unicode_test, unicode)
str_test = _flatten_to_ascii(STR_DIACRI)
self.assertEqual(str_test, FLATTENED)
self.assertIsInstance(str_test, str)
| agpl-3.0 |
TheCoSMoCompany/biopredyn | Prototype/src/libsbml-5.10.0/src/bindings/python/test/sbml/TestUnit_newSetters.py | 1 | 4337 | #
# @file TestUnit_newSetters.py
# @brief Unit unit tests for new set function API
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestUnit_newSetters.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestUnit_newSetters(unittest.TestCase):
global U
U = None
def setUp(self):
self.U = libsbml.Unit(1,2)
if (self.U == None):
pass
pass
def tearDown(self):
_dummyList = [ self.U ]; _dummyList[:] = []; del _dummyList
pass
def test_Unit_removeScale(self):
i = self.U.setScale(2)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.U.getScale() == 2 )
i = libsbml.Unit.removeScale(self.U)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.U.getScale() == 0 )
self.assert_( self.U.getMultiplier() == 100 )
pass
def test_Unit_setExponent1(self):
i = self.U.setExponent(2)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.U.getExponent() == 2 )
pass
def test_Unit_setExponent2(self):
i = self.U.setExponent(2.0)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.U.getExponent() == 2 )
pass
def test_Unit_setExponent3(self):
i = self.U.setExponent(2.2)
self.assert_( i == libsbml.LIBSBML_INVALID_ATTRIBUTE_VALUE )
self.assert_( self.U.getExponent() == 1 )
pass
def test_Unit_setKind1(self):
i = self.U.setKind(libsbml.UnitKind_forName("cell"))
self.assert_( i == libsbml.LIBSBML_INVALID_ATTRIBUTE_VALUE )
self.assertEqual( False, self.U.isSetKind() )
pass
def test_Unit_setKind2(self):
i = self.U.setKind(libsbml.UnitKind_forName("litre"))
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, self.U.isSetKind() )
pass
def test_Unit_setMultiplier1(self):
i = self.U.setMultiplier(2)
self.assert_( i == libsbml.LIBSBML_UNEXPECTED_ATTRIBUTE )
self.assert_( self.U.getMultiplier() == 2 )
pass
def test_Unit_setMultiplier2(self):
c = libsbml.Unit(2,2)
i = c.setMultiplier(4)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( c.getMultiplier() == 4 )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
pass
def test_Unit_setOffset1(self):
i = self.U.setOffset(2.0)
self.assert_( i == libsbml.LIBSBML_UNEXPECTED_ATTRIBUTE )
self.assert_( self.U.getOffset() == 0 )
pass
def test_Unit_setOffset2(self):
U1 = libsbml.Unit(2,1)
i = U1.setOffset(2.0)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( U1.getOffset() == 2 )
pass
def test_Unit_setScale1(self):
i = self.U.setScale(2)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.U.getScale() == 2 )
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestUnit_newSetters))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| bsd-3-clause |
anderson7ru/bienestarues | citasmedicasapp/models.py | 1 | 2389 | # -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from django.utils import timezone
# apps internas y externas
from datospersonalesapp.models import Paciente
from empleadosapp.models import Empleado, Doctor
ESTADO_ELECCIONES = (
('N','Normal'),
('C','Cancelado'),
('R','Reprogramar'),
)
class diasSemana(models.Model):
dias = models.CharField(max_length=15) #Lunes-Viernes y Todos los dias
#Al llamar a diasSemana, se visualiza: los dias de la semana
def __str__(self):
return '%s' % (self.dias)
class HorarioAtencion(models.Model):
codigoHorario = models.AutoField(primary_key=True, null=False)
codigoDoctor = models.ForeignKey(Doctor, on_delete=models.CASCADE, verbose_name="Doctor",)
dia = models.ManyToManyField(diasSemana)
horaInicio = models.TimeField("Hora inicio")
horaFinal = models.TimeField("Hora final")
pacienteConsulta = models.PositiveIntegerField()
#Al llamar a HorarioAtencion, se visualiza: doctor
def __str__(self):
return '%s' % (self.codigoDoctor)
class Cita(models.Model):
codigoCita = models.AutoField(primary_key=True, null=False)
codigoDoctor = models.ForeignKey(Doctor, on_delete=models.CASCADE, verbose_name="Doctor",)
paciente = models.ForeignKey(Paciente, null=False, on_delete=models.CASCADE,limit_choices_to={'estadoExpediente':'A'},verbose_name="No expediente")
fechaConsulta = models.DateField("Fecha de la Consulta",help_text="DD/MM/YYYY")
horaConsulta = models.TimeField("Hora de la consulta")
estadoConsulta = models.CharField(max_length=1,choices=ESTADO_ELECCIONES, default='N')
#Al llamar a Cita, se visualiza: paciente,codigoDoctor,estadoConsulta
def __str__(self):
return '%s %s %s' % (self.codigoDoctor, self.paciente, self.estadoConsulta)
class Cancelacion(models.Model):
codigoCancelacion = models.AutoField(primary_key=True, null=False)
codigoDoctor = models.ForeignKey(Doctor, on_delete=models.CASCADE, verbose_name="Doctor",)
fechaInicio = models.DateField("fecha inicio",help_text="DD/MM/YYYY")
fechaFinal = models.DateField("fecha final",help_text="DD/MM/YYYY", null=True, blank=True)
horaInicio = models.TimeField("Hora inicio")
horaFinal = models.TimeField("Hora final")
#Al llamar a Cancelacion, se visualiza: doctor,fecha y hora de inicio
def __str__(self):
return '%s %s %s' % (self.codigoDoctor, self.fechaInicio, self.horaInicio) | mit |
HSAnet/glimpse_client | 3rdparty/breakpad/src/tools/gyp/test/defines/gyptest-defines-env.py | 501 | 1874 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable with C++ define specified by a gyp define.
"""
import os
import TestGyp
test = TestGyp.TestGyp()
# With the value only given in environment, it should be used.
try:
os.environ['GYP_DEFINES'] = 'value=10'
test.run_gyp('defines-env.gyp')
finally:
del os.environ['GYP_DEFINES']
test.build('defines-env.gyp')
expect = """\
VALUE is 10
"""
test.run_built_executable('defines', stdout=expect)
# With the value given in both command line and environment,
# command line should take precedence.
try:
os.environ['GYP_DEFINES'] = 'value=20'
test.run_gyp('defines-env.gyp', '-Dvalue=25')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines.c')
test.build('defines-env.gyp')
expect = """\
VALUE is 25
"""
test.run_built_executable('defines', stdout=expect)
# With the value only given in environment, it should be ignored if
# --ignore-environment is specified.
try:
os.environ['GYP_DEFINES'] = 'value=30'
test.run_gyp('defines-env.gyp', '--ignore-environment')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines.c')
test.build('defines-env.gyp')
expect = """\
VALUE is 5
"""
test.run_built_executable('defines', stdout=expect)
# With the value given in both command line and environment, and
# --ignore-environment also specified, command line should still be used.
try:
os.environ['GYP_DEFINES'] = 'value=40'
test.run_gyp('defines-env.gyp', '--ignore-environment', '-Dvalue=45')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines.c')
test.build('defines-env.gyp')
expect = """\
VALUE is 45
"""
test.run_built_executable('defines', stdout=expect)
test.pass_test()
| bsd-3-clause |
neurord/pysb | pysb/examples/bax_pore.py | 4 | 2561 | """A version of BAX pore assembly where the subunits dimerize and then
tetramerize instead of assembling sequentially (contrast with
bax_pore_sequential.py). Inhibition of pore formation by Mcl-1 is also
implemented.
"""
from pysb import *
Model()
# Each BAX-BAX bond must always involve a t1 site on one monomer and a
# t2 site on the other.
Monomer('BAX', ['t1', 't2', 'inh'])
Annotation(BAX, 'http://identifiers.org/uniprot/Q07812')
Monomer('MCL1', ['b'])
Annotation(MCL1, 'http://identifiers.org/uniprot/Q07820')
# Two lone monomers form a dimer.
Parameter('kdimf', 1e-6)
Parameter('kdimr', 1e-7)
Rule('bax_dim',
BAX(t1=None, t2=None) + BAX(t1=None, t2=None) <>
BAX(t1=1, t2=None) % BAX(t1=None, t2=1),
kdimf, kdimr)
# Two lone dimers form a tetramer, with a higher rate than the dimerization.
Parameter('ktetf', 1e-3)
Parameter('ktetr', 1e-4)
Rule('bax_tet',
BAX(t1=1, t2=None) % BAX(t1=None, t2=1) + BAX(t1=2, t2=None) % BAX(t1=None, t2=2) <>
BAX(t1=1, t2=3) % BAX(t1=4, t2=1) % BAX(t1=2, t2=4) % BAX(t1=3, t2=2),
ktetf, ktetr)
# An inhibitory protein can bind to a BAX subunit at any time.
Parameter('kbaxmcl1f', 1e-5)
Parameter('kbaxmcl1r', 1e-6)
Rule('bax_inh_mcl1',
BAX(inh=None) + MCL1(b=None) <>
BAX(inh=1) % MCL1(b=1),
kbaxmcl1f, kbaxmcl1r)
# Initial conditions
Parameter('BAX_0', 8e4)
Initial(BAX(t1=None, t2=None, inh=None), BAX_0)
Parameter('MCL1_0', 2e4)
Initial(MCL1(b=None), MCL1_0)
for p in BAX_0, MCL1_0:
Annotation(p, 'http://identifiers.org/doi/10.1371/journal.pcbi.1002482',
'isDescribedBy')
# We must fully specify all four BAX-BAX bonds, otherwise the pattern
# is too loose, match a given species multiple times (beyond the
# factor of four expected due to the rotational symmetry of the
# tetramer), resulting in erroneously high values.
Observable('BAX4', BAX(t1=1, t2=3) % BAX(t1=4, t2=1) % BAX(t1=2, t2=4) % BAX(t1=3, t2=2))
# Same all-bonds requirement here. However since the BAX tetramer is
# considered inhibited when even one subunit has an inhibitor bound,
# we only need to explicitly write inh=ANY on one of the monomer
# patterns.
Observable('BAX4_inh', BAX(inh=ANY, t1=1, t2=3) % BAX(t1=4, t2=1) % BAX(t1=2, t2=4) % BAX(t1=3, t2=2))
if __name__ == '__main__':
print __doc__, "\n", model
print "\nNOTE: This model code is designed to be imported and programatically " \
"manipulated,\nnot executed directly. The above output is merely a " \
"diagnostic aid. Please see\n" \
"run_bax_pore.py for example usage."
| bsd-2-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/IPython/core/magics/code.py | 8 | 27573 | """Implementation of code management magic functions.
"""
from __future__ import print_function
from __future__ import absolute_import
#-----------------------------------------------------------------------------
# Copyright (c) 2012 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import inspect
import io
import os
import re
import sys
import ast
from itertools import chain
# Our own packages
from IPython.core.error import TryNext, StdinNotImplementedError, UsageError
from IPython.core.macro import Macro
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.core.oinspect import find_file, find_source_lines
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils import py3compat
from IPython.utils.py3compat import string_types
from IPython.utils.contexts import preserve_keys
from IPython.utils.path import get_py_filename
from warnings import warn
from logging import error
from IPython.utils.text import get_text_list
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
# Used for exception handling in magic_edit
class MacroToEdit(ValueError): pass
ipython_input_pat = re.compile(r"<ipython\-input\-(\d+)-[a-z\d]+>$")
# To match, e.g. 8-10 1:5 :10 3-
range_re = re.compile(r"""
(?P<start>\d+)?
((?P<sep>[\-:])
(?P<end>\d+)?)?
$""", re.VERBOSE)
def extract_code_ranges(ranges_str):
"""Turn a string of range for %%load into 2-tuples of (start, stop)
ready to use as a slice of the content splitted by lines.
Examples
--------
list(extract_input_ranges("5-10 2"))
[(4, 10), (1, 2)]
"""
for range_str in ranges_str.split():
rmatch = range_re.match(range_str)
if not rmatch:
continue
sep = rmatch.group("sep")
start = rmatch.group("start")
end = rmatch.group("end")
if sep == '-':
start = int(start) - 1 if start else None
end = int(end) if end else None
elif sep == ':':
start = int(start) - 1 if start else None
end = int(end) - 1 if end else None
else:
end = int(start)
start = int(start) - 1
yield (start, end)
@skip_doctest
def extract_symbols(code, symbols):
"""
Return a tuple (blocks, not_found)
where ``blocks`` is a list of code fragments
for each symbol parsed from code, and ``not_found`` are
symbols not found in the code.
For example::
>>> code = '''a = 10
def b(): return 42
class A: pass'''
>>> extract_symbols(code, 'A,b,z')
(["class A: pass", "def b(): return 42"], ['z'])
"""
symbols = symbols.split(',')
# this will raise SyntaxError if code isn't valid Python
py_code = ast.parse(code)
marks = [(getattr(s, 'name', None), s.lineno) for s in py_code.body]
code = code.split('\n')
symbols_lines = {}
# we already know the start_lineno of each symbol (marks).
# To find each end_lineno, we traverse in reverse order until each
# non-blank line
end = len(code)
for name, start in reversed(marks):
while not code[end - 1].strip():
end -= 1
if name:
symbols_lines[name] = (start - 1, end)
end = start - 1
# Now symbols_lines is a map
# {'symbol_name': (start_lineno, end_lineno), ...}
# fill a list with chunks of codes for each requested symbol
blocks = []
not_found = []
for symbol in symbols:
if symbol in symbols_lines:
start, end = symbols_lines[symbol]
blocks.append('\n'.join(code[start:end]) + '\n')
else:
not_found.append(symbol)
return blocks, not_found
def strip_initial_indent(lines):
"""For %load, strip indent from lines until finding an unindented line.
https://github.com/ipython/ipython/issues/9775
"""
indent_re = re.compile(r'\s+')
it = iter(lines)
first_line = next(it)
indent_match = indent_re.match(first_line)
if indent_match:
# First line was indented
indent = indent_match.group()
yield first_line[len(indent):]
for line in it:
if line.startswith(indent):
yield line[len(indent):]
else:
# Less indented than the first line - stop dedenting
yield line
break
else:
yield first_line
# Pass the remaining lines through without dedenting
for line in it:
yield line
class InteractivelyDefined(Exception):
"""Exception for interactively defined variable in magic_edit"""
def __init__(self, index):
self.index = index
@magics_class
class CodeMagics(Magics):
"""Magics related to code management (loading, saving, editing, ...)."""
def __init__(self, *args, **kwargs):
self._knowntemps = set()
super(CodeMagics, self).__init__(*args, **kwargs)
@line_magic
def save(self, parameter_s=''):
"""Save a set of lines or a macro to a given filename.
Usage:\\
%save [options] filename n1-n2 n3-n4 ... n5 .. n6 ...
Options:
-r: use 'raw' input. By default, the 'processed' history is used,
so that magics are loaded in their transformed version to valid
Python. If this option is given, the raw input as typed as the
command line is used instead.
-f: force overwrite. If file exists, %save will prompt for overwrite
unless -f is given.
-a: append to the file instead of overwriting it.
This function uses the same syntax as %history for input ranges,
then saves the lines to the filename you specify.
It adds a '.py' extension to the file if you don't do so yourself, and
it asks for confirmation before overwriting existing files.
If `-r` option is used, the default extension is `.ipy`.
"""
opts,args = self.parse_options(parameter_s,'fra',mode='list')
if not args:
raise UsageError('Missing filename.')
raw = 'r' in opts
force = 'f' in opts
append = 'a' in opts
mode = 'a' if append else 'w'
ext = u'.ipy' if raw else u'.py'
fname, codefrom = args[0], " ".join(args[1:])
if not fname.endswith((u'.py',u'.ipy')):
fname += ext
file_exists = os.path.isfile(fname)
if file_exists and not force and not append:
try:
overwrite = self.shell.ask_yes_no('File `%s` exists. Overwrite (y/[N])? ' % fname, default='n')
except StdinNotImplementedError:
print("File `%s` exists. Use `%%save -f %s` to force overwrite" % (fname, parameter_s))
return
if not overwrite :
print('Operation cancelled.')
return
try:
cmds = self.shell.find_user_code(codefrom,raw)
except (TypeError, ValueError) as e:
print(e.args[0])
return
out = py3compat.cast_unicode(cmds)
with io.open(fname, mode, encoding="utf-8") as f:
if not file_exists or not append:
f.write(u"# coding: utf-8\n")
f.write(out)
# make sure we end on a newline
if not out.endswith(u'\n'):
f.write(u'\n')
print('The following commands were written to file `%s`:' % fname)
print(cmds)
@line_magic
def pastebin(self, parameter_s=''):
"""Upload code to Github's Gist paste bin, returning the URL.
Usage:\\
%pastebin [-d "Custom description"] 1-7
The argument can be an input history range, a filename, or the name of a
string or macro.
Options:
-d: Pass a custom description for the gist. The default will say
"Pasted from IPython".
"""
opts, args = self.parse_options(parameter_s, 'd:')
try:
code = self.shell.find_user_code(args)
except (ValueError, TypeError) as e:
print(e.args[0])
return
# Deferred import
try:
from urllib.request import urlopen # Py 3
except ImportError:
from urllib2 import urlopen
import json
post_data = json.dumps({
"description": opts.get('d', "Pasted from IPython"),
"public": True,
"files": {
"file1.py": {
"content": code
}
}
}).encode('utf-8')
response = urlopen("https://api.github.com/gists", post_data)
response_data = json.loads(response.read().decode('utf-8'))
return response_data['html_url']
@line_magic
def loadpy(self, arg_s):
"""Alias of `%load`
`%loadpy` has gained some flexibility and dropped the requirement of a `.py`
extension. So it has been renamed simply into %load. You can look at
`%load`'s docstring for more info.
"""
self.load(arg_s)
@line_magic
def load(self, arg_s):
"""Load code into the current frontend.
Usage:\\
%load [options] source
where source can be a filename, URL, input history range, macro, or
element in the user namespace
Options:
-r <lines>: Specify lines or ranges of lines to load from the source.
Ranges could be specified as x-y (x..y) or in python-style x:y
(x..(y-1)). Both limits x and y can be left blank (meaning the
beginning and end of the file, respectively).
-s <symbols>: Specify function or classes to load from python source.
-y : Don't ask confirmation for loading source above 200 000 characters.
-n : Include the user's namespace when searching for source code.
This magic command can either take a local filename, a URL, an history
range (see %history) or a macro as argument, it will prompt for
confirmation before loading source with more than 200 000 characters, unless
-y flag is passed or if the frontend does not support raw_input::
%load myscript.py
%load 7-27
%load myMacro
%load http://www.example.com/myscript.py
%load -r 5-10 myscript.py
%load -r 10-20,30,40: foo.py
%load -s MyClass,wonder_function myscript.py
%load -n MyClass
%load -n my_module.wonder_function
"""
opts,args = self.parse_options(arg_s,'yns:r:')
if not args:
raise UsageError('Missing filename, URL, input history range, '
'macro, or element in the user namespace.')
search_ns = 'n' in opts
contents = self.shell.find_user_code(args, search_ns=search_ns)
if 's' in opts:
try:
blocks, not_found = extract_symbols(contents, opts['s'])
except SyntaxError:
# non python code
error("Unable to parse the input as valid Python code")
return
if len(not_found) == 1:
warn('The symbol `%s` was not found' % not_found[0])
elif len(not_found) > 1:
warn('The symbols %s were not found' % get_text_list(not_found,
wrap_item_with='`')
)
contents = '\n'.join(blocks)
if 'r' in opts:
ranges = opts['r'].replace(',', ' ')
lines = contents.split('\n')
slices = extract_code_ranges(ranges)
contents = [lines[slice(*slc)] for slc in slices]
contents = '\n'.join(strip_initial_indent(chain.from_iterable(contents)))
l = len(contents)
# 200 000 is ~ 2500 full 80 caracter lines
# so in average, more than 5000 lines
if l > 200000 and 'y' not in opts:
try:
ans = self.shell.ask_yes_no(("The text you're trying to load seems pretty big"\
" (%d characters). Continue (y/[N]) ?" % l), default='n' )
except StdinNotImplementedError:
#asume yes if raw input not implemented
ans = True
if ans is False :
print('Operation cancelled.')
return
contents = "# %load {}\n".format(arg_s) + contents
self.shell.set_next_input(contents, replace=True)
@staticmethod
def _find_edit_target(shell, args, opts, last_call):
"""Utility method used by magic_edit to find what to edit."""
def make_filename(arg):
"Make a filename from the given args"
try:
filename = get_py_filename(arg)
except IOError:
# If it ends with .py but doesn't already exist, assume we want
# a new file.
if arg.endswith('.py'):
filename = arg
else:
filename = None
return filename
# Set a few locals from the options for convenience:
opts_prev = 'p' in opts
opts_raw = 'r' in opts
# custom exceptions
class DataIsObject(Exception): pass
# Default line number value
lineno = opts.get('n',None)
if opts_prev:
args = '_%s' % last_call[0]
if args not in shell.user_ns:
args = last_call[1]
# by default this is done with temp files, except when the given
# arg is a filename
use_temp = True
data = ''
# First, see if the arguments should be a filename.
filename = make_filename(args)
if filename:
use_temp = False
elif args:
# Mode where user specifies ranges of lines, like in %macro.
data = shell.extract_input_lines(args, opts_raw)
if not data:
try:
# Load the parameter given as a variable. If not a string,
# process it as an object instead (below)
#print '*** args',args,'type',type(args) # dbg
data = eval(args, shell.user_ns)
if not isinstance(data, string_types):
raise DataIsObject
except (NameError,SyntaxError):
# given argument is not a variable, try as a filename
filename = make_filename(args)
if filename is None:
warn("Argument given (%s) can't be found as a variable "
"or as a filename." % args)
return (None, None, None)
use_temp = False
except DataIsObject:
# macros have a special edit function
if isinstance(data, Macro):
raise MacroToEdit(data)
# For objects, try to edit the file where they are defined
filename = find_file(data)
if filename:
if 'fakemodule' in filename.lower() and \
inspect.isclass(data):
# class created by %edit? Try to find source
# by looking for method definitions instead, the
# __module__ in those classes is FakeModule.
attrs = [getattr(data, aname) for aname in dir(data)]
for attr in attrs:
if not inspect.ismethod(attr):
continue
filename = find_file(attr)
if filename and \
'fakemodule' not in filename.lower():
# change the attribute to be the edit
# target instead
data = attr
break
m = ipython_input_pat.match(os.path.basename(filename))
if m:
raise InteractivelyDefined(int(m.groups()[0]))
datafile = 1
if filename is None:
filename = make_filename(args)
datafile = 1
if filename is not None:
# only warn about this if we get a real name
warn('Could not find file where `%s` is defined.\n'
'Opening a file named `%s`' % (args, filename))
# Now, make sure we can actually read the source (if it was
# in a temp file it's gone by now).
if datafile:
if lineno is None:
lineno = find_source_lines(data)
if lineno is None:
filename = make_filename(args)
if filename is None:
warn('The file where `%s` was defined '
'cannot be read or found.' % data)
return (None, None, None)
use_temp = False
if use_temp:
filename = shell.mktempfile(data)
print('IPython will make a temporary file named:',filename)
# use last_call to remember the state of the previous call, but don't
# let it be clobbered by successive '-p' calls.
try:
last_call[0] = shell.displayhook.prompt_count
if not opts_prev:
last_call[1] = args
except:
pass
return filename, lineno, use_temp
def _edit_macro(self,mname,macro):
"""open an editor with the macro data in a file"""
filename = self.shell.mktempfile(macro.value)
self.shell.hooks.editor(filename)
# and make a new macro object, to replace the old one
with open(filename) as mfile:
mvalue = mfile.read()
self.shell.user_ns[mname] = Macro(mvalue)
@skip_doctest
@line_magic
def edit(self, parameter_s='',last_call=['','']):
"""Bring up an editor and execute the resulting code.
Usage:
%edit [options] [args]
%edit runs IPython's editor hook. The default version of this hook is
set to call the editor specified by your $EDITOR environment variable.
If this isn't found, it will default to vi under Linux/Unix and to
notepad under Windows. See the end of this docstring for how to change
the editor hook.
You can also set the value of this editor via the
``TerminalInteractiveShell.editor`` option in your configuration file.
This is useful if you wish to use a different editor from your typical
default with IPython (and for Windows users who typically don't set
environment variables).
This command allows you to conveniently edit multi-line code right in
your IPython session.
If called without arguments, %edit opens up an empty editor with a
temporary file and will execute the contents of this file when you
close it (don't forget to save it!).
Options:
-n <number>: open the editor at a specified line number. By default,
the IPython editor hook uses the unix syntax 'editor +N filename', but
you can configure this by providing your own modified hook if your
favorite editor supports line-number specifications with a different
syntax.
-p: this will call the editor with the same data as the previous time
it was used, regardless of how long ago (in your current session) it
was.
-r: use 'raw' input. This option only applies to input taken from the
user's history. By default, the 'processed' history is used, so that
magics are loaded in their transformed version to valid Python. If
this option is given, the raw input as typed as the command line is
used instead. When you exit the editor, it will be executed by
IPython's own processor.
-x: do not execute the edited code immediately upon exit. This is
mainly useful if you are editing programs which need to be called with
command line arguments, which you can then do using %run.
Arguments:
If arguments are given, the following possibilities exist:
- If the argument is a filename, IPython will load that into the
editor. It will execute its contents with execfile() when you exit,
loading any code in the file into your interactive namespace.
- The arguments are ranges of input history, e.g. "7 ~1/4-6".
The syntax is the same as in the %history magic.
- If the argument is a string variable, its contents are loaded
into the editor. You can thus edit any string which contains
python code (including the result of previous edits).
- If the argument is the name of an object (other than a string),
IPython will try to locate the file where it was defined and open the
editor at the point where it is defined. You can use `%edit function`
to load an editor exactly at the point where 'function' is defined,
edit it and have the file be executed automatically.
- If the object is a macro (see %macro for details), this opens up your
specified editor with a temporary file containing the macro's data.
Upon exit, the macro is reloaded with the contents of the file.
Note: opening at an exact line is only supported under Unix, and some
editors (like kedit and gedit up to Gnome 2.8) do not understand the
'+NUMBER' parameter necessary for this feature. Good editors like
(X)Emacs, vi, jed, pico and joe all do.
After executing your code, %edit will return as output the code you
typed in the editor (except when it was an existing file). This way
you can reload the code in further invocations of %edit as a variable,
via _<NUMBER> or Out[<NUMBER>], where <NUMBER> is the prompt number of
the output.
Note that %edit is also available through the alias %ed.
This is an example of creating a simple function inside the editor and
then modifying it. First, start up the editor::
In [1]: edit
Editing... done. Executing edited code...
Out[1]: 'def foo():\\n print "foo() was defined in an editing
session"\\n'
We can then call the function foo()::
In [2]: foo()
foo() was defined in an editing session
Now we edit foo. IPython automatically loads the editor with the
(temporary) file where foo() was previously defined::
In [3]: edit foo
Editing... done. Executing edited code...
And if we call foo() again we get the modified version::
In [4]: foo()
foo() has now been changed!
Here is an example of how to edit a code snippet successive
times. First we call the editor::
In [5]: edit
Editing... done. Executing edited code...
hello
Out[5]: "print 'hello'\\n"
Now we call it again with the previous output (stored in _)::
In [6]: edit _
Editing... done. Executing edited code...
hello world
Out[6]: "print 'hello world'\\n"
Now we call it with the output #8 (stored in _8, also as Out[8])::
In [7]: edit _8
Editing... done. Executing edited code...
hello again
Out[7]: "print 'hello again'\\n"
Changing the default editor hook:
If you wish to write your own editor hook, you can put it in a
configuration file which you load at startup time. The default hook
is defined in the IPython.core.hooks module, and you can use that as a
starting example for further modifications. That file also has
general instructions on how to set a new hook for use once you've
defined it."""
opts,args = self.parse_options(parameter_s,'prxn:')
try:
filename, lineno, is_temp = self._find_edit_target(self.shell,
args, opts, last_call)
except MacroToEdit as e:
self._edit_macro(args, e.args[0])
return
except InteractivelyDefined as e:
print("Editing In[%i]" % e.index)
args = str(e.index)
filename, lineno, is_temp = self._find_edit_target(self.shell,
args, opts, last_call)
if filename is None:
# nothing was found, warnings have already been issued,
# just give up.
return
if is_temp:
self._knowntemps.add(filename)
elif (filename in self._knowntemps):
is_temp = True
# do actual editing here
print('Editing...', end=' ')
sys.stdout.flush()
try:
# Quote filenames that may have spaces in them
if ' ' in filename:
filename = "'%s'" % filename
self.shell.hooks.editor(filename,lineno)
except TryNext:
warn('Could not open editor')
return
# XXX TODO: should this be generalized for all string vars?
# For now, this is special-cased to blocks created by cpaste
if args.strip() == 'pasted_block':
with open(filename, 'r') as f:
self.shell.user_ns['pasted_block'] = f.read()
if 'x' in opts: # -x prevents actual execution
print()
else:
print('done. Executing edited code...')
with preserve_keys(self.shell.user_ns, '__file__'):
if not is_temp:
self.shell.user_ns['__file__'] = filename
if 'r' in opts: # Untranslated IPython code
with open(filename, 'r') as f:
source = f.read()
self.shell.run_cell(source, store_history=False)
else:
self.shell.safe_execfile(filename, self.shell.user_ns,
self.shell.user_ns)
if is_temp:
try:
return open(filename).read()
except IOError as msg:
if msg.filename == filename:
warn('File not found. Did you forget to save?')
return
else:
self.shell.showtraceback()
| gpl-3.0 |
android-ia/platform_external_chromium_org | build/android/pylib/utils/timeout_retry.py | 47 | 1566 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A utility to run functions with timeouts and retries."""
# pylint: disable=W0702
import threading
from pylib.utils import reraiser_thread
from pylib.utils import watchdog_timer
def Run(func, timeout, retries, args=None, kwargs=None):
"""Runs the passed function in a separate thread with timeouts and retries.
Args:
func: the function to be wrapped.
timeout: the timeout in seconds for each try.
retries: the number of retries.
args: list of positional args to pass to |func|.
kwargs: dictionary of keyword args to pass to |func|.
Returns:
The return value of func(*args, **kwargs).
"""
if not args:
args = []
if not kwargs:
kwargs = {}
# The return value uses a list because Python variables are references, not
# values. Closures make a copy of the reference, so updating the closure's
# reference wouldn't update where the original reference pointed.
ret = [None]
def RunOnTimeoutThread():
ret[0] = func(*args, **kwargs)
while True:
try:
name = 'TimeoutThread-for-%s' % threading.current_thread().name
thread_group = reraiser_thread.ReraiserThreadGroup(
[reraiser_thread.ReraiserThread(RunOnTimeoutThread, name=name)])
thread_group.StartAll()
thread_group.JoinAll(watchdog_timer.WatchdogTimer(timeout))
return ret[0]
except:
if retries <= 0:
raise
retries -= 1
| bsd-3-clause |
cvvnx1/nagweb | www/static/metronic1.4/admin/template_content/assets/plugins/jquery-file-upload/server/gae-python/main.py | 223 | 5173 | # -*- coding: utf-8 -*-
#
# jQuery File Upload Plugin GAE Python Example 2.0
# https://github.com/blueimp/jQuery-File-Upload
#
# Copyright 2011, Sebastian Tschan
# https://blueimp.net
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT
#
from __future__ import with_statement
from google.appengine.api import files, images
from google.appengine.ext import blobstore, deferred
from google.appengine.ext.webapp import blobstore_handlers
import json, re, urllib, webapp2
WEBSITE = 'http://blueimp.github.com/jQuery-File-Upload/'
MIN_FILE_SIZE = 1 # bytes
MAX_FILE_SIZE = 5000000 # bytes
IMAGE_TYPES = re.compile('image/(gif|p?jpeg|(x-)?png)')
ACCEPT_FILE_TYPES = IMAGE_TYPES
THUMBNAIL_MODIFICATOR = '=s80' # max width / height
EXPIRATION_TIME = 300 # seconds
def cleanup(blob_keys):
blobstore.delete(blob_keys)
class UploadHandler(webapp2.RequestHandler):
def initialize(self, request, response):
super(UploadHandler, self).initialize(request, response)
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers[
'Access-Control-Allow-Methods'
] = 'OPTIONS, HEAD, GET, POST, PUT, DELETE'
def validate(self, file):
if file['size'] < MIN_FILE_SIZE:
file['error'] = 'File is too small'
elif file['size'] > MAX_FILE_SIZE:
file['error'] = 'File is too big'
elif not ACCEPT_FILE_TYPES.match(file['type']):
file['error'] = 'Filetype not allowed'
else:
return True
return False
def get_file_size(self, file):
file.seek(0, 2) # Seek to the end of the file
size = file.tell() # Get the position of EOF
file.seek(0) # Reset the file position to the beginning
return size
def write_blob(self, data, info):
blob = files.blobstore.create(
mime_type=info['type'],
_blobinfo_uploaded_filename=info['name']
)
with files.open(blob, 'a') as f:
f.write(data)
files.finalize(blob)
return files.blobstore.get_blob_key(blob)
def handle_upload(self):
results = []
blob_keys = []
for name, fieldStorage in self.request.POST.items():
if type(fieldStorage) is unicode:
continue
result = {}
result['name'] = re.sub(r'^.*\\', '',
fieldStorage.filename)
result['type'] = fieldStorage.type
result['size'] = self.get_file_size(fieldStorage.file)
if self.validate(result):
blob_key = str(
self.write_blob(fieldStorage.value, result)
)
blob_keys.append(blob_key)
result['delete_type'] = 'DELETE'
result['delete_url'] = self.request.host_url +\
'/?key=' + urllib.quote(blob_key, '')
if (IMAGE_TYPES.match(result['type'])):
try:
result['url'] = images.get_serving_url(
blob_key,
secure_url=self.request.host_url\
.startswith('https')
)
result['thumbnail_url'] = result['url'] +\
THUMBNAIL_MODIFICATOR
except: # Could not get an image serving url
pass
if not 'url' in result:
result['url'] = self.request.host_url +\
'/' + blob_key + '/' + urllib.quote(
result['name'].encode('utf-8'), '')
results.append(result)
deferred.defer(
cleanup,
blob_keys,
_countdown=EXPIRATION_TIME
)
return results
def options(self):
pass
def head(self):
pass
def get(self):
self.redirect(WEBSITE)
def post(self):
if (self.request.get('_method') == 'DELETE'):
return self.delete()
result = {'files': self.handle_upload()}
s = json.dumps(result, separators=(',',':'))
redirect = self.request.get('redirect')
if redirect:
return self.redirect(str(
redirect.replace('%s', urllib.quote(s, ''), 1)
))
if 'application/json' in self.request.headers.get('Accept'):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(s)
def delete(self):
blobstore.delete(self.request.get('key') or '')
class DownloadHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, key, filename):
if not blobstore.get(key):
self.error(404)
else:
# Cache for the expiration time:
self.response.headers['Cache-Control'] =\
'public,max-age=%d' % EXPIRATION_TIME
self.send_blob(key, save_as=filename)
app = webapp2.WSGIApplication(
[
('/', UploadHandler),
('/([^/]+)/([^/]+)', DownloadHandler)
],
debug=True
) | gpl-2.0 |
ehashman/oh-mainline | vendor/packages/celery/celery/events/__init__.py | 18 | 8485 | # -*- coding: utf-8 -*-
"""
celery.events
~~~~~~~~~~~~~
Events are messages sent for actions happening
in the worker (and clients if :setting:`CELERY_SEND_TASK_SENT_EVENT`
is enabled), used for monitoring purposes.
:copyright: (c) 2009 - 2011 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from __future__ import with_statement
import time
import socket
import threading
from collections import deque
from contextlib import contextmanager
from itertools import count
from kombu.entity import Exchange, Queue
from kombu.messaging import Consumer, Producer
from ..app import app_or_default
from ..utils import uuid
event_exchange = Exchange("celeryev", type="topic")
def Event(type, _fields=None, **fields):
"""Create an event.
An event is a dictionary, the only required field is ``type``.
"""
event = dict(_fields or {}, type=type, **fields)
if "timestamp" not in event:
event["timestamp"] = time.time()
return event
class EventDispatcher(object):
"""Send events as messages.
:param connection: Connection to the broker.
:keyword hostname: Hostname to identify ourselves as,
by default uses the hostname returned by :func:`socket.gethostname`.
:keyword enabled: Set to :const:`False` to not actually publish any events,
making :meth:`send` a noop operation.
:keyword channel: Can be used instead of `connection` to specify
an exact channel to use when sending events.
:keyword buffer_while_offline: If enabled events will be buffered
while the connection is down. :meth:`flush` must be called
as soon as the connection is re-established.
You need to :meth:`close` this after use.
"""
def __init__(self, connection=None, hostname=None, enabled=True,
channel=None, buffer_while_offline=True, app=None,
serializer=None):
self.app = app_or_default(app)
self.connection = connection
self.channel = channel
self.hostname = hostname or socket.gethostname()
self.buffer_while_offline = buffer_while_offline
self.mutex = threading.Lock()
self.publisher = None
self._outbound_buffer = deque()
self.serializer = serializer or self.app.conf.CELERY_EVENT_SERIALIZER
self.enabled = enabled
if self.enabled:
self.enable()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def enable(self):
self.publisher = Producer(self.channel or self.connection.channel(),
exchange=event_exchange,
serializer=self.serializer)
self.enabled = True
def disable(self):
if self.enabled:
self.enabled = False
self.close()
def send(self, type, **fields):
"""Send event.
:param type: Kind of event.
:keyword \*\*fields: Event arguments.
"""
if self.enabled:
with self.mutex:
event = Event(type, hostname=self.hostname,
clock=self.app.clock.forward(), **fields)
try:
self.publisher.publish(event,
routing_key=type.replace("-", "."))
except Exception, exc:
if not self.buffer_while_offline:
raise
self._outbound_buffer.append((type, fields, exc))
def flush(self):
while self._outbound_buffer:
try:
type, fields, _ = self._outbound_buffer.popleft()
except IndexError:
return
self.send(type, **fields)
def copy_buffer(self, other):
self._outbound_buffer = other._outbound_buffer
def close(self):
"""Close the event dispatcher."""
self.mutex.locked() and self.mutex.release()
if self.publisher is not None:
if not self.channel: # close auto channel.
self.publisher.channel.close()
self.publisher = None
class EventReceiver(object):
"""Capture events.
:param connection: Connection to the broker.
:keyword handlers: Event handlers.
:attr:`handlers` is a dict of event types and their handlers,
the special handler `"*"` captures all events that doesn't have a
handler.
"""
handlers = {}
def __init__(self, connection, handlers=None, routing_key="#",
node_id=None, app=None):
self.app = app_or_default(app)
self.connection = connection
if handlers is not None:
self.handlers = handlers
self.routing_key = routing_key
self.node_id = node_id or uuid()
self.queue = Queue("%s.%s" % ("celeryev", self.node_id),
exchange=event_exchange,
routing_key=self.routing_key,
auto_delete=True,
durable=False)
def process(self, type, event):
"""Process the received event by dispatching it to the appropriate
handler."""
handler = self.handlers.get(type) or self.handlers.get("*")
handler and handler(event)
@contextmanager
def consumer(self):
"""Create event consumer.
.. warning::
This creates a new channel that needs to be closed
by calling `consumer.channel.close()`.
"""
consumer = Consumer(self.connection.channel(),
queues=[self.queue], no_ack=True)
consumer.register_callback(self._receive)
with consumer:
yield consumer
consumer.channel.close()
def itercapture(self, limit=None, timeout=None, wakeup=True):
with self.consumer() as consumer:
if wakeup:
self.wakeup_workers(channel=consumer.channel)
yield consumer
self.drain_events(limit=limit, timeout=timeout)
def capture(self, limit=None, timeout=None, wakeup=True):
"""Open up a consumer capturing events.
This has to run in the main process, and it will never
stop unless forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.
"""
list(self.itercapture(limit=limit, timeout=timeout, wakeup=wakeup))
def wakeup_workers(self, channel=None):
self.app.control.broadcast("heartbeat",
connection=self.connection,
channel=channel)
def drain_events(self, limit=None, timeout=None):
for iteration in count(0):
if limit and iteration >= limit:
break
try:
self.connection.drain_events(timeout=timeout)
except socket.timeout:
if timeout:
raise
except socket.error:
pass
def _receive(self, body, message):
type = body.pop("type").lower()
clock = body.get("clock")
if clock:
self.app.clock.adjust(clock)
self.process(type, Event(type, body))
class Events(object):
def __init__(self, app=None):
self.app = app
def Receiver(self, connection, handlers=None, routing_key="#",
node_id=None):
return EventReceiver(connection,
handlers=handlers,
routing_key=routing_key,
node_id=node_id,
app=self.app)
def Dispatcher(self, connection=None, hostname=None, enabled=True,
channel=None, buffer_while_offline=True):
return EventDispatcher(connection,
hostname=hostname,
enabled=enabled,
channel=channel,
app=self.app)
def State(self):
from .state import State as _State
return _State()
@contextmanager
def default_dispatcher(self, hostname=None, enabled=True,
buffer_while_offline=False):
with self.app.amqp.publisher_pool.acquire(block=True) as pub:
with self.Dispatcher(pub.connection, hostname, enabled,
pub.channel, buffer_while_offline) as d:
yield d
| agpl-3.0 |
mancoast/CPythonPyc_test | fail/310_seq_tests.py | 7 | 13603 | """
Tests common to tuple, list and UserList.UserList
"""
import unittest
import sys
# Various iterables
# This is used for checking the constructor (here and in test_deque.py)
def iterfunc(seqn):
'Regular generator'
for i in seqn:
yield i
class Sequence:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class IterFunc:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class IterGen:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class IterNextOnly:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class IterNoNext:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class IterGenExc:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class IterFuncStop:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
from itertools import chain
def itermulti(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, iterfunc(IterGen(Sequence(seqn)))))
class CommonTest(unittest.TestCase):
# The type to be tested
type2test = None
def test_constructors(self):
l0 = []
l1 = [0]
l2 = [0, 1]
u = self.type2test()
u0 = self.type2test(l0)
u1 = self.type2test(l1)
u2 = self.type2test(l2)
uu = self.type2test(u)
uu0 = self.type2test(u0)
uu1 = self.type2test(u1)
uu2 = self.type2test(u2)
v = self.type2test(tuple(u))
class OtherSeq:
def __init__(self, initseq):
self.__data = initseq
def __len__(self):
return len(self.__data)
def __getitem__(self, i):
return self.__data[i]
s = OtherSeq(u0)
v0 = self.type2test(s)
self.assertEqual(len(v0), len(s))
s = "this is also a sequence"
vv = self.type2test(s)
self.assertEqual(len(vv), len(s))
# Create from various iteratables
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (Sequence, IterFunc, IterGen,
itermulti, iterfunc):
self.assertEqual(self.type2test(g(s)), self.type2test(s))
self.assertEqual(self.type2test(IterFuncStop(s)), self.type2test())
self.assertEqual(self.type2test(c for c in "123"), self.type2test("123"))
self.assertRaises(TypeError, self.type2test, IterNextOnly(s))
self.assertRaises(TypeError, self.type2test, IterNoNext(s))
self.assertRaises(ZeroDivisionError, self.type2test, IterGenExc(s))
def test_truth(self):
self.assert_(not self.type2test())
self.assert_(self.type2test([42]))
def test_getitem(self):
u = self.type2test([0, 1, 2, 3, 4])
for i in range(len(u)):
self.assertEqual(u[i], i)
self.assertEqual(u[int(i)], i)
for i in range(-len(u), -1):
self.assertEqual(u[i], len(u)+i)
self.assertEqual(u[int(i)], len(u)+i)
self.assertRaises(IndexError, u.__getitem__, -len(u)-1)
self.assertRaises(IndexError, u.__getitem__, len(u))
self.assertRaises(ValueError, u.__getitem__, slice(0,10,0))
u = self.type2test()
self.assertRaises(IndexError, u.__getitem__, 0)
self.assertRaises(IndexError, u.__getitem__, -1)
self.assertRaises(TypeError, u.__getitem__)
a = self.type2test([10, 11])
self.assertEqual(a[0], 10)
self.assertEqual(a[1], 11)
self.assertEqual(a[-2], 10)
self.assertEqual(a[-1], 11)
self.assertRaises(IndexError, a.__getitem__, -3)
self.assertRaises(IndexError, a.__getitem__, 3)
def test_getslice(self):
l = [0, 1, 2, 3, 4]
u = self.type2test(l)
self.assertEqual(u[0:0], self.type2test())
self.assertEqual(u[1:2], self.type2test([1]))
self.assertEqual(u[-2:-1], self.type2test([3]))
self.assertEqual(u[-1000:1000], u)
self.assertEqual(u[1000:-1000], self.type2test([]))
self.assertEqual(u[:], u)
self.assertEqual(u[1:None], self.type2test([1, 2, 3, 4]))
self.assertEqual(u[None:3], self.type2test([0, 1, 2]))
# Extended slices
self.assertEqual(u[::], u)
self.assertEqual(u[::2], self.type2test([0, 2, 4]))
self.assertEqual(u[1::2], self.type2test([1, 3]))
self.assertEqual(u[::-1], self.type2test([4, 3, 2, 1, 0]))
self.assertEqual(u[::-2], self.type2test([4, 2, 0]))
self.assertEqual(u[3::-2], self.type2test([3, 1]))
self.assertEqual(u[3:3:-2], self.type2test([]))
self.assertEqual(u[3:2:-2], self.type2test([3]))
self.assertEqual(u[3:1:-2], self.type2test([3]))
self.assertEqual(u[3:0:-2], self.type2test([3, 1]))
self.assertEqual(u[::-100], self.type2test([4]))
self.assertEqual(u[100:-100:], self.type2test([]))
self.assertEqual(u[-100:100:], u)
self.assertEqual(u[100:-100:-1], u[::-1])
self.assertEqual(u[-100:100:-1], self.type2test([]))
self.assertEqual(u[-100:100:2], self.type2test([0, 2, 4]))
# Test extreme cases with long ints
a = self.type2test([0,1,2,3,4])
self.assertEqual(a[ -pow(2,128): 3 ], self.type2test([0,1,2]))
self.assertEqual(a[ 3: pow(2,145) ], self.type2test([3,4]))
def test_contains(self):
u = self.type2test([0, 1, 2])
for i in u:
self.assert_(i in u)
for i in min(u)-1, max(u)+1:
self.assert_(i not in u)
self.assertRaises(TypeError, u.__contains__)
def test_contains_fake(self):
class AllEq:
# Sequences must use rich comparison against each item
# (unless "is" is true, or an earlier item answered)
# So instances of AllEq must be found in all non-empty sequences.
def __eq__(self, other):
return True
__hash__ = None # Can't meet hash invariant requirements
self.assert_(AllEq() not in self.type2test([]))
self.assert_(AllEq() in self.type2test([1]))
def test_contains_order(self):
# Sequences must test in-order. If a rich comparison has side
# effects, these will be visible to tests against later members.
# In this test, the "side effect" is a short-circuiting raise.
class DoNotTestEq(Exception):
pass
class StopCompares:
def __eq__(self, other):
raise DoNotTestEq
checkfirst = self.type2test([1, StopCompares()])
self.assert_(1 in checkfirst)
checklast = self.type2test([StopCompares(), 1])
self.assertRaises(DoNotTestEq, checklast.__contains__, 1)
def test_len(self):
self.assertEqual(len(self.type2test()), 0)
self.assertEqual(len(self.type2test([])), 0)
self.assertEqual(len(self.type2test([0])), 1)
self.assertEqual(len(self.type2test([0, 1, 2])), 3)
def test_minmax(self):
u = self.type2test([0, 1, 2])
self.assertEqual(min(u), 0)
self.assertEqual(max(u), 2)
def test_addmul(self):
u1 = self.type2test([0])
u2 = self.type2test([0, 1])
self.assertEqual(u1, u1 + self.type2test())
self.assertEqual(u1, self.type2test() + u1)
self.assertEqual(u1 + self.type2test([1]), u2)
self.assertEqual(self.type2test([-1]) + u1, self.type2test([-1, 0]))
self.assertEqual(self.type2test(), u2*0)
self.assertEqual(self.type2test(), 0*u2)
self.assertEqual(self.type2test(), u2*0)
self.assertEqual(self.type2test(), 0*u2)
self.assertEqual(u2, u2*1)
self.assertEqual(u2, 1*u2)
self.assertEqual(u2, u2*1)
self.assertEqual(u2, 1*u2)
self.assertEqual(u2+u2, u2*2)
self.assertEqual(u2+u2, 2*u2)
self.assertEqual(u2+u2, u2*2)
self.assertEqual(u2+u2, 2*u2)
self.assertEqual(u2+u2+u2, u2*3)
self.assertEqual(u2+u2+u2, 3*u2)
class subclass(self.type2test):
pass
u3 = subclass([0, 1])
self.assertEqual(u3, u3*1)
self.assert_(u3 is not u3*1)
def test_iadd(self):
u = self.type2test([0, 1])
u += self.type2test()
self.assertEqual(u, self.type2test([0, 1]))
u += self.type2test([2, 3])
self.assertEqual(u, self.type2test([0, 1, 2, 3]))
u += self.type2test([4, 5])
self.assertEqual(u, self.type2test([0, 1, 2, 3, 4, 5]))
u = self.type2test("spam")
u += self.type2test("eggs")
self.assertEqual(u, self.type2test("spameggs"))
def test_imul(self):
u = self.type2test([0, 1])
u *= 3
self.assertEqual(u, self.type2test([0, 1, 0, 1, 0, 1]))
def test_getitemoverwriteiter(self):
# Verify that __getitem__ overrides are not recognized by __iter__
class T(self.type2test):
def __getitem__(self, key):
return str(key) + '!!!'
self.assertEqual(next(iter(T((1,2)))), 1)
def test_repeat(self):
for m in range(4):
s = tuple(range(m))
for n in range(-3, 5):
self.assertEqual(self.type2test(s*n), self.type2test(s)*n)
self.assertEqual(self.type2test(s)*(-4), self.type2test([]))
self.assertEqual(id(s), id(s*1))
def test_bigrepeat(self):
import sys
if sys.maxsize <= 2147483647:
x = self.type2test([0])
x *= 2**16
self.assertRaises(MemoryError, x.__mul__, 2**16)
if hasattr(x, '__imul__'):
self.assertRaises(MemoryError, x.__imul__, 2**16)
def test_subscript(self):
a = self.type2test([10, 11])
self.assertEqual(a.__getitem__(0), 10)
self.assertEqual(a.__getitem__(1), 11)
self.assertEqual(a.__getitem__(-2), 10)
self.assertEqual(a.__getitem__(-1), 11)
self.assertRaises(IndexError, a.__getitem__, -3)
self.assertRaises(IndexError, a.__getitem__, 3)
self.assertEqual(a.__getitem__(slice(0,1)), self.type2test([10]))
self.assertEqual(a.__getitem__(slice(1,2)), self.type2test([11]))
self.assertEqual(a.__getitem__(slice(0,2)), self.type2test([10, 11]))
self.assertEqual(a.__getitem__(slice(0,3)), self.type2test([10, 11]))
self.assertEqual(a.__getitem__(slice(3,5)), self.type2test([]))
self.assertRaises(ValueError, a.__getitem__, slice(0, 10, 0))
self.assertRaises(TypeError, a.__getitem__, 'x')
def test_count(self):
a = self.type2test([0, 1, 2])*3
self.assertEqual(a.count(0), 3)
self.assertEqual(a.count(1), 3)
self.assertEqual(a.count(3), 0)
self.assertRaises(TypeError, a.count)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
self.assertRaises(BadExc, a.count, BadCmp())
def test_index(self):
u = self.type2test([0, 1])
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(u.count(0), 2)
self.assertEqual(u.index(0), 2)
self.assertEqual(u.index(0, 2), 2)
self.assertEqual(u.index(-2, -10), 0)
self.assertEqual(u.index(0, 3), 3)
self.assertEqual(u.index(0, 3, 4), 3)
self.assertRaises(ValueError, u.index, 2, 0, -10)
self.assertRaises(TypeError, u.index)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.index, BadCmp())
a = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(a.index(0), 2)
self.assertEqual(a.index(0, 2), 2)
self.assertEqual(a.index(0, -4), 2)
self.assertEqual(a.index(-2, -10), 0)
self.assertEqual(a.index(0, 3), 3)
self.assertEqual(a.index(0, -3), 3)
self.assertEqual(a.index(0, 3, 4), 3)
self.assertEqual(a.index(0, -3, -2), 3)
self.assertEqual(a.index(0, -4*sys.maxsize, 4*sys.maxsize), 2)
self.assertRaises(ValueError, a.index, 0, 4*sys.maxsize,-4*sys.maxsize)
self.assertRaises(ValueError, a.index, 2, 0, -10)
| gpl-3.0 |
molden/hanythingondemand | hod/node/node.py | 1 | 8124 | # #
# Copyright 2009-2015 Ghent University
#
# This file is part of hanythingondemand
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/hanythingondemand
#
# hanythingondemand is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# hanythingondemand is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with hanythingondemand. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Network utilities
@author: Stijn De Weirdt (Ghent University)
@author: Ewan Higgs (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
import re
import os
import socket
from collections import namedtuple
from vsc.utils import fancylogger
from vsc.utils.affinity import sched_getaffinity
from hod.commands.command import ULimit
from hod.utils import only_if_module_is_available
# optional packages, not always required
try:
import netifaces
import netaddr
except ImportError:
pass
NetworkInterface = namedtuple('NetworkInterface', 'hostname,addr,device,mask_bits')
_log = fancylogger.getLogger(fname=False)
@only_if_module_is_available('netaddr')
def netmask2maskbits(netmask):
"""Find the number of bits in a netmask."""
mask_as_int = netaddr.IPAddress(netmask).value
return bin(mask_as_int).count('1')
@only_if_module_is_available('netifaces')
def get_networks():
"""
Returns list of NetworkInterface tuples by interface.
Of the form: [hostname, ipaddr, iface, subnetmask]
"""
devices = netifaces.interfaces()
networks = []
for device in devices:
iface = netifaces.ifaddresses(device)
if netifaces.AF_INET in iface:
iface = iface[netifaces.AF_INET][0]
addr = iface['addr']
mask_bits = netmask2maskbits(iface['netmask'])
hostname = socket.getfqdn(addr)
networks.append(NetworkInterface(hostname, addr, device, mask_bits))
return networks
@only_if_module_is_available('netaddr')
def address_in_network(ip, net):
"""
Determine if an ip is in a network.
e.g. 192.168.0.1 is in 192.168.0.0/24 but not 10.0.0.0/24.
Params
------
ip : str`
ipv4 ip address as string.
net : `str`
Network and mask bits as string (e.g. '192.168.0.0/16')
"""
return netaddr.IPAddress(ip) in netaddr.IPNetwork(net)
def ip_interface_to(networks, ip):
"""Which of the detected network interfaces can reach ip
Params
------
networks : `list of NetworkInterface`
ip : `str`
Destination ipv4 address as string.
"""
for intf in networks:
net = "%s/%s" % (intf.addr, intf.mask_bits)
if address_in_network(ip, net):
return intf
return None
def sorted_network(network):
"""Try to find a preferred network (can be advanced like IPoIB of high-speed ethernet)"""
nw = []
_log.debug("Preferred network selection")
# step 1 alphabetical ordering (who knows in what order ip returns the addresses) on hostname field
network.sort()
# filter for interfaces which have not been assigned hostnames
ip_hostname = re.compile(r"^\d+\.\d+\.\d+\.\d+$")
# look for ib network
ib_reg = re.compile(r"^(ib)\d+$")
for intf in network:
if ib_reg.search(intf.device) and not ip_hostname.search(intf.hostname):
if not intf in nw:
_log.debug("Added intf %s as ib interface", str(intf))
nw.append(intf)
# final selection prefer non-vlan
vlan_reg = re.compile(r"^(.*)\.\d+$")
loopback_reg = re.compile(r"^(lo)\d*$")
for intf in network:
if not (vlan_reg.search(intf.device) or
loopback_reg.search(intf.device) or
ip_hostname.search(intf.hostname)):
if not intf in nw:
_log.debug("Added intf %s as non-vlan or non-loopback interface",
str(intf))
nw.append(intf)
# add remainder non-loopback
for intf in network:
if not loopback_reg.search(intf.device):
if not intf in nw:
_log.debug("Added intf %s as remaining non-loopback interface",
str(intf))
nw.append(intf)
# add remainder
for intf in network:
if not intf in nw:
_log.debug("Added intf %s as remaining interface",
str(intf))
nw.append(intf)
_log.debug("ordered network %s",
nw)
return nw
def _get_memory_proc_meminfo():
re_mem = re.compile(r"^\s*(?P<mem>\d+)(?P<unit>(?:k)B)?\s*$")
proc_meminfo_filename = '/proc/meminfo'
meminfo = {}
for line in open(proc_meminfo_filename).read().replace(' ', '').split('\n'):
if not line.strip():
continue
key = line.split(':')[0].lower().strip()
try:
value = line.split(':')[1].strip()
except IndexError:
_log.error("No :-separated entry for line %s in %s",
line, proc_meminfo_filename)
continue
reg = re_mem.search(value)
if reg:
unit = reg.groupdict()['unit']
mem = int(reg.groupdict()['mem'])
multi = 1
if unit in (None, 'B',):
multi = 1
elif unit in ('kB',):
multi = 2 ** 10
else:
_log.error("Unsupported memory unit %s in key %s value %s", unit, key, value)
meminfo[key] = mem * multi
else:
_log.error("Unknown memory entry in key %s value %s", key, value)
_log.debug("Collected meminfo %s", meminfo)
return meminfo
def _get_memory_ulimit_v():
'''
Return the ulimit for virtual memory in bytes or "unlimited"
'''
stdout, _ = ULimit('-v').run()
if stdout == 'unlimited':
return stdout
else:
#ulimit -v returns kbytes; we want bytes
return int(stdout) * 1024
def get_memory():
"""Extract information about the available memory"""
memory = {}
memory['meminfo'] = _get_memory_proc_meminfo()
memory['ulimit'] = _get_memory_ulimit_v()
return memory
class Node(object):
"""Detect localnode properties"""
def __init__(self):
self.log = fancylogger.getLogger(name=self.__class__.__name__, fname=False)
self.fqdn = 'localhost' # base fqdn hostname
self.network = [] # all possible IPs
self.pid = -1
self.cores = -1
self.usablecores = None
self.totalcores = None
self.topology = [0] # default topology plain set
self.memory = {}
def __str__(self):
return "FQDN %s PID %s" % (self.fqdn, self.pid)
def go(self):
"""A wrapper around some common functions"""
self.fqdn = socket.getfqdn()
self.network = sorted_network(get_networks())
self.pid = os.getpid()
self.usablecores = [idx for idx, used in enumerate(sched_getaffinity().cpus) if used]
self.cores = len(self.usablecores)
self.totalcores = os.sysconf('SC_NPROCESSORS_ONLN')
self.memory = get_memory()
descr = {
'fqdn': self.fqdn,
'network': self.network,
'pid': self.pid,
'cores': self.cores,
'usablecores': self.usablecores,
'totalcores': self.totalcores,
'topology': self.topology,
'memory': self.memory,
}
return descr
| gpl-2.0 |
repotvsupertuga/tvsupertuga.repository | script.module.schism.common/lib/requests/packages/urllib3/request.py | 290 | 5946 | from __future__ import absolute_import
from .filepost import encode_multipart_formdata
from .packages.six.moves.urllib.parse import urlencode
__all__ = ['RequestMethods']
class RequestMethods(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
:class:`~urllib3.poolmanager.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are
encoded in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-form-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(self, method, url, body=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**kw): # Abstract
raise NotImplemented("Classes extending RequestMethods must implement "
"their own ``urlopen`` method.")
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the
option to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields,
headers=headers,
**urlopen_kw)
else:
return self.request_encode_body(method, url, fields=fields,
headers=headers,
**urlopen_kw)
def request_encode_url(self, method, url, fields=None, headers=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if headers is None:
headers = self.headers
extra_kw = {'headers': headers}
extra_kw.update(urlopen_kw)
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **extra_kw)
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
the payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request
signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimick behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if headers is None:
headers = self.headers
extra_kw = {'headers': {}}
if fields:
if 'body' in urlopen_kw:
raise TypeError(
"request got values for both 'fields' and 'body', can only specify one.")
if encode_multipart:
body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
else:
body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
extra_kw['body'] = body
extra_kw['headers'] = {'Content-Type': content_type}
extra_kw['headers'].update(headers)
extra_kw.update(urlopen_kw)
return self.urlopen(method, url, **extra_kw)
| gpl-2.0 |
alikins/ansible | lib/ansible/plugins/action/onyx_config.py | 16 | 4123 | #
# (c) 2017, Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| gpl-3.0 |
zstyblik/infernal-twin | build/pip/pip/_vendor/requests/packages/chardet/mbcsgroupprober.py | 2769 | 1967 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .utf8prober import UTF8Prober
from .sjisprober import SJISProber
from .eucjpprober import EUCJPProber
from .gb2312prober import GB2312Prober
from .euckrprober import EUCKRProber
from .cp949prober import CP949Prober
from .big5prober import Big5Prober
from .euctwprober import EUCTWProber
class MBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
UTF8Prober(),
SJISProber(),
EUCJPProber(),
GB2312Prober(),
EUCKRProber(),
CP949Prober(),
Big5Prober(),
EUCTWProber()
]
self.reset()
| gpl-3.0 |
khkaminska/djangoproject.com | docs/search.py | 8 | 7056 | # -*- coding: utf-8 -*-
from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator
from django.utils.html import strip_tags
from django.utils.text import unescape_entities
from elasticsearch.helpers import streaming_bulk
from elasticsearch_dsl import DocType, Long, Nested, Object, String, analysis
from elasticsearch_dsl.connections import connections
from .models import Document, document_url
class SearchPaginator(Paginator):
"""
A better paginator for search results
The normal Paginator does a .count() query and then a slice. Since ES
results contain the total number of results, we can take an optimistic
slice and then adjust the count.
"""
def validate_number(self, number):
"""
Validates the given 1-based page number.
This class overrides the default behavior and ignores the upper bound.
"""
try:
number = int(number)
except (TypeError, ValueError):
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
return number
def page(self, number):
"""
Returns a page object.
This class overrides the default behavior and ignores "orphans" and
assigns the count from the ES result to the Paginator.
"""
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
# Force the search to evaluate and then attach the count. We want to
# avoid an extra useless query even if there are no results, so we
# directly fetch the count from hits.
result = self.object_list[bottom:top].execute()
page = Page(result.hits, number, self)
# Update the `_count`.
self._count = page.object_list.total
# Also store the aggregations, if any.
if hasattr(result, 'aggregations'):
page.aggregations = result.aggregations
# Now that we have the count validate that the page number isn't higher
# than the possible number of pages and adjust accordingly.
if number > self.num_pages:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return page
class ImprovedDocType(DocType):
@classmethod
def index_all(cls, using=None, delete=False, **kwargs):
def actions_generator():
for obj in cls.index_queryset().iterator():
doc_dict = cls.from_django(obj).to_dict()
doc_dict['_id'] = obj.id
yield doc_dict
client = connections.get_connection(using or cls._doc_type.using)
if delete:
client.indices.delete(index=cls._doc_type.index, ignore=[400, 404])
cls._doc_type.init()
for ok, item in streaming_bulk(client, actions_generator(),
index=cls._doc_type.index,
doc_type=cls._doc_type.name,
raise_on_error=True,
refresh=True,
**kwargs):
yield ok, item
@classmethod
def index_queryset(cls):
return cls.model._default_manager.all()
@classmethod
def index_object(cls, obj):
return cls.from_django(obj).save()
@classmethod
def unindex_object(cls, obj):
return cls.get(id=obj.pk).delete()
@classmethod
def from_django(cls, obj):
raise NotImplementedError('You must define a from_django classmethod '
'to map ORM object fields to ES fields')
analysis.Tokenizer._builtins = analysis.TOKENIZERS = frozenset((
'keyword', 'standard', 'path_hierarchy', 'whitespace'
))
class PathHierarchyTokenizer(analysis.Tokenizer):
name = 'path_hierarchy'
class WhitespaceTokenizer(analysis.Tokenizer):
name = 'whitespace'
path_analyzer = analysis.CustomAnalyzer('path',
tokenizer='path_hierarchy',
filter=['lowercase'])
lower_whitespace_analyzer = analysis.analyzer('lower_whitespace',
tokenizer='whitespace',
filter=['lowercase', 'stop'],
char_filter=['html_strip'])
class DocumentDocType(ImprovedDocType):
"""
The main documentation doc type to be used for searching.
It stores a bit of meta data so we don't have to hit the db
when rendering search results.
The search view will be using the 'lang' and 'version' fields
of the document's release to filter the search results, depending
which was found in the URL.
The breadcrumbs are shown under the search result title.
"""
model = Document
id = Long()
title = String(analyzer=lower_whitespace_analyzer, boost=1.2)
path = String(index='no', analyzer=path_analyzer)
content = String(analyzer=lower_whitespace_analyzer)
content_raw = String(index_options='offsets')
release = Object(properties={
'id': Long(),
'version': String(index='not_analyzed'),
'lang': String(index='not_analyzed'),
})
breadcrumbs = Nested(properties={
'title': String(index='not_analyzed'),
'path': String(index='not_analyzed'),
})
class Meta:
index = 'docs'
doc_type = 'document'
@classmethod
def index_queryset(cls):
qs = super(DocumentDocType, cls).index_queryset()
return (
# don't index the module pages since source code is hard to
# combine with full text search
qs.exclude(path__startswith='_modules')
# not the crazy big flattened index of the CBVs
.exclude(path__startswith='ref/class-based-views/flattened-index')
.select_related('release'))
@classmethod
def from_django(cls, obj):
# turns HTML entities into unicode characters again and removes
# all HTML tags, aka "plain text" versio of the document
raw_body = strip_tags(unescape_entities(obj.body).replace(u'¶', ''))
doc = cls(path=obj.path,
title=obj.title,
content=obj.body,
content_raw=raw_body,
meta={'id': obj.id})
doc.release = {
'id': obj.release.id,
'lang': obj.release.lang,
'version': obj.release.version,
}
breadcrumbs = []
for breadcrumb in cls.model.objects.breadcrumbs(obj):
breadcrumbs.append({
'title': breadcrumb.title,
'path': breadcrumb.path,
})
doc.breadcrumbs = breadcrumbs
return doc
def get_absolute_url(self):
return document_url(self)
| bsd-3-clause |
google/gazoo-device | examples/device_tests/unittest_example_test.py | 1 | 3386 | """Example reboot device test with GDM + unittest.
Usage:
python3 unittest_example_test.py -d somedevice-1234
See README.md for more details.
"""
import argparse
import logging
import sys
from typing import List, Tuple
import unittest
import gazoo_device
# If using a device controller from an extension package:
# import my_extension_package
# gazoo_device.register(my_extension_package)
_LOG_LINE_FORMAT = "%(asctime)s.%(msecs).03d %(levelname)s %(message)s"
_LOG_LINE_TIME_FORMAT = "%m-%d %H:%M:%S"
_device_name = None # Global variable set in __main__.
def _parse_cli_args() -> Tuple[str, List[str]]:
"""Parses CLI args to return device name and args for unittest runner."""
parser = argparse.ArgumentParser(
description="Runs a GDM + unittest reboot test on a device. All "
"arguments other than the device name are passed through to "
"the unittest runner.")
parser.add_argument(
"-d", "--device", required=True,
help="GDM device name to run the test on. For example, 'device-1234'. "
"The device must be shown as 'available' or 'connected' in the "
"output of 'gdm devices'.")
args, remaining_argv = parser.parse_known_args()
return args.device, [sys.argv[0]] + remaining_argv
def _set_up_stderr_logging() -> logging.StreamHandler:
"""Directs the output of the root logger to stderr.
Returns:
stderr log handler that was created.
"""
logging.getLogger().setLevel(logging.DEBUG)
stderr_handler = logging.StreamHandler()
stderr_handler.setLevel(logging.INFO)
formatter = logging.Formatter(_LOG_LINE_FORMAT, _LOG_LINE_TIME_FORMAT)
stderr_handler.setFormatter(formatter)
logging.getLogger().addHandler(stderr_handler)
return stderr_handler
class UnittestExampleRebootTest(unittest.TestCase):
"""Example reboot device test with GDM + unittest."""
manager = None
_logger_handler = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.device = None
@classmethod
def setUpClass(cls):
"""Creates a Manager instance and sets up stderr logging."""
super().setUpClass()
cls._logger_handler = _set_up_stderr_logging()
cls.manager = gazoo_device.Manager(
# Avoid log duplication with the test stderr log handler.
stdout_logging=False)
@classmethod
def tearDownClass(cls):
"""Closes the Manager instance and removes the stderr log handler."""
cls.manager.close()
cls.manager = None
logging.getLogger().removeHandler(cls._logger_handler)
cls._logger_handler = None
super().tearDownClass()
def setUp(self):
"""Creates a device controller for the device."""
super().setUp()
self.device = self.manager.create_device(_device_name)
logging.info("Created device for test: %s", self.device.name)
def tearDown(self):
"""Closes the device controller."""
self.device.close()
self.device = None
super().tearDown()
def test_reboot(self):
"""Reboots the device."""
self.device.reboot()
self.assertTrue(
self.device.connected,
f"Device {self.device.name} did not come back online after reboot")
if __name__ == "__main__":
# Parse out the device name from CLI arguments and pass the rest to unittest.
_device_name, unittest_argv = _parse_cli_args()
unittest.main(argv=unittest_argv, verbosity=2)
| apache-2.0 |
sestrella/ansible | test/units/module_utils/network/ftd/test_common.py | 27 | 11620 | # Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.module_utils.network.ftd.common import equal_objects, delete_ref_duplicates, construct_ansible_facts
# simple objects
def test_equal_objects_return_false_with_different_length():
assert not equal_objects(
{'foo': 1},
{'foo': 1, 'bar': 2}
)
def test_equal_objects_return_false_with_different_fields():
assert not equal_objects(
{'foo': 1},
{'bar': 1}
)
def test_equal_objects_return_false_with_different_value_types():
assert not equal_objects(
{'foo': 1},
{'foo': '1'}
)
def test_equal_objects_return_false_with_different_values():
assert not equal_objects(
{'foo': 1},
{'foo': 2}
)
def test_equal_objects_return_false_with_different_nested_values():
assert not equal_objects(
{'foo': {'bar': 1}},
{'foo': {'bar': 2}}
)
def test_equal_objects_return_false_with_different_list_length():
assert not equal_objects(
{'foo': []},
{'foo': ['bar']}
)
def test_equal_objects_return_true_with_equal_objects():
assert equal_objects(
{'foo': 1, 'bar': 2},
{'bar': 2, 'foo': 1}
)
def test_equal_objects_return_true_with_equal_str_like_values():
assert equal_objects(
{'foo': b'bar'},
{'foo': u'bar'}
)
def test_equal_objects_return_true_with_equal_nested_dicts():
assert equal_objects(
{'foo': {'bar': 1, 'buz': 2}},
{'foo': {'buz': 2, 'bar': 1}}
)
def test_equal_objects_return_true_with_equal_lists():
assert equal_objects(
{'foo': ['bar']},
{'foo': ['bar']}
)
def test_equal_objects_return_true_with_ignored_fields():
assert equal_objects(
{'foo': 1, 'version': '123', 'id': '123123'},
{'foo': 1}
)
# objects with object references
def test_equal_objects_return_true_with_different_ref_ids():
assert not equal_objects(
{'foo': {'id': '1', 'type': 'network', 'ignored_field': 'foo'}},
{'foo': {'id': '2', 'type': 'network', 'ignored_field': 'bar'}}
)
def test_equal_objects_return_true_with_different_ref_types():
assert not equal_objects(
{'foo': {'id': '1', 'type': 'network', 'ignored_field': 'foo'}},
{'foo': {'id': '1', 'type': 'accessRule', 'ignored_field': 'bar'}}
)
def test_equal_objects_return_true_with_same_object_refs():
assert equal_objects(
{'foo': {'id': '1', 'type': 'network', 'ignored_field': 'foo'}},
{'foo': {'id': '1', 'type': 'network', 'ignored_field': 'bar'}}
)
# objects with array of object references
def test_equal_objects_return_false_with_different_array_length():
assert not equal_objects(
{'foo': [
{'id': '1', 'type': 'network', 'ignored_field': 'foo'}
]},
{'foo': []}
)
def test_equal_objects_return_false_with_different_array_order():
assert not equal_objects(
{'foo': [
{'id': '1', 'type': 'network', 'ignored_field': 'foo'},
{'id': '2', 'type': 'network', 'ignored_field': 'bar'}
]},
{'foo': [
{'id': '2', 'type': 'network', 'ignored_field': 'foo'},
{'id': '1', 'type': 'network', 'ignored_field': 'bar'}
]}
)
def test_equal_objects_return_true_with_equal_ref_arrays():
assert equal_objects(
{'foo': [
{'id': '1', 'type': 'network', 'ignored_field': 'foo'}
]},
{'foo': [
{'id': '1', 'type': 'network', 'ignored_field': 'bar'}
]}
)
# objects with nested structures and object references
def test_equal_objects_return_true_with_equal_nested_object_references():
assert equal_objects(
{
'name': 'foo',
'config': {
'version': '1',
'port': {
'name': 'oldPortName',
'type': 'port',
'id': '123'
}
}
},
{
'name': 'foo',
'config': {
'version': '1',
'port': {
'name': 'newPortName',
'type': 'port',
'id': '123'
}
}
}
)
def test_equal_objects_return_false_with_different_nested_object_references():
assert not equal_objects(
{
'name': 'foo',
'config': {
'version': '1',
'port': {
'name': 'oldPortName',
'type': 'port',
'id': '123'
}
}
},
{
'name': 'foo',
'config': {
'version': '1',
'port': {
'name': 'oldPortName',
'type': 'port',
'id': '234'
}
}
}
)
def test_equal_objects_return_true_with_equal_nested_list_of_object_references():
assert equal_objects(
{
'name': 'foo',
'config': {
'version': '1',
'ports': [{
'name': 'oldPortName',
'type': 'port',
'id': '123'
}, {
'name': 'oldPortName2',
'type': 'port',
'id': '234'
}]
}
},
{
'name': 'foo',
'config': {
'version': '1',
'ports': [{
'name': 'newPortName',
'type': 'port',
'id': '123'
}, {
'name': 'newPortName2',
'type': 'port',
'id': '234',
'extraField': 'foo'
}]
}
}
)
def test_equal_objects_return_true_with_reference_list_containing_duplicates():
assert equal_objects(
{
'name': 'foo',
'config': {
'version': '1',
'ports': [{
'name': 'oldPortName',
'type': 'port',
'id': '123'
}, {
'name': 'oldPortName',
'type': 'port',
'id': '123'
}, {
'name': 'oldPortName2',
'type': 'port',
'id': '234'
}]
}
},
{
'name': 'foo',
'config': {
'version': '1',
'ports': [{
'name': 'newPortName',
'type': 'port',
'id': '123'
}, {
'name': 'newPortName2',
'type': 'port',
'id': '234',
'extraField': 'foo'
}]
}
}
)
def test_delete_ref_duplicates_with_none():
assert delete_ref_duplicates(None) is None
def test_delete_ref_duplicates_with_empty_dict():
assert {} == delete_ref_duplicates({})
def test_delete_ref_duplicates_with_simple_object():
data = {
'id': '123',
'name': 'foo',
'type': 'bar',
'values': ['a', 'b']
}
assert data == delete_ref_duplicates(data)
def test_delete_ref_duplicates_with_object_containing_refs():
data = {
'id': '123',
'name': 'foo',
'type': 'bar',
'refs': [
{'id': '123', 'type': 'baz'},
{'id': '234', 'type': 'baz'},
{'id': '234', 'type': 'foo'}
]
}
assert data == delete_ref_duplicates(data)
def test_delete_ref_duplicates_with_object_containing_duplicate_refs():
data = {
'id': '123',
'name': 'foo',
'type': 'bar',
'refs': [
{'id': '123', 'type': 'baz'},
{'id': '123', 'type': 'baz'},
{'id': '234', 'type': 'baz'},
{'id': '234', 'type': 'baz'},
{'id': '234', 'type': 'foo'}
]
}
assert {
'id': '123',
'name': 'foo',
'type': 'bar',
'refs': [
{'id': '123', 'type': 'baz'},
{'id': '234', 'type': 'baz'},
{'id': '234', 'type': 'foo'}
]
} == delete_ref_duplicates(data)
def test_delete_ref_duplicates_with_object_containing_duplicate_refs_in_nested_object():
data = {
'id': '123',
'name': 'foo',
'type': 'bar',
'children': {
'refs': [
{'id': '123', 'type': 'baz'},
{'id': '123', 'type': 'baz'},
{'id': '234', 'type': 'baz'},
{'id': '234', 'type': 'baz'},
{'id': '234', 'type': 'foo'}
]
}
}
assert {
'id': '123',
'name': 'foo',
'type': 'bar',
'children': {
'refs': [
{'id': '123', 'type': 'baz'},
{'id': '234', 'type': 'baz'},
{'id': '234', 'type': 'foo'}
]
}
} == delete_ref_duplicates(data)
def test_construct_ansible_facts_should_make_default_fact_with_name_and_type():
response = {
'id': '123',
'name': 'foo',
'type': 'bar'
}
assert {'bar_foo': response} == construct_ansible_facts(response, {})
def test_construct_ansible_facts_should_not_make_default_fact_with_no_name():
response = {
'id': '123',
'name': 'foo'
}
assert {} == construct_ansible_facts(response, {})
def test_construct_ansible_facts_should_not_make_default_fact_with_no_type():
response = {
'id': '123',
'type': 'bar'
}
assert {} == construct_ansible_facts(response, {})
def test_construct_ansible_facts_should_use_register_as_when_given():
response = {
'id': '123',
'name': 'foo',
'type': 'bar'
}
params = {'register_as': 'fact_name'}
assert {'fact_name': response} == construct_ansible_facts(response, params)
def test_construct_ansible_facts_should_extract_items():
response = {'items': [
{
'id': '123',
'name': 'foo',
'type': 'bar'
}, {
'id': '123',
'name': 'foo',
'type': 'bar'
}
]}
params = {'register_as': 'fact_name'}
assert {'fact_name': response['items']} == construct_ansible_facts(response, params)
def test_construct_ansible_facts_should_ignore_items_with_no_register_as():
response = {'items': [
{
'id': '123',
'name': 'foo',
'type': 'bar'
}, {
'id': '123',
'name': 'foo',
'type': 'bar'
}
]}
assert {} == construct_ansible_facts(response, {})
| gpl-3.0 |
ChengyuSong/xen-arm | tools/python/xen/xend/XendAPIStore.py | 44 | 2431 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2007 Tom Wilkie <tom.wilkie@gmail.com>
#============================================================================
"""
This is a place to put instances of XenAPI objects,
instead of just holding them in arbitrary places.
All objects which subclass XendBase should use this
mechanism.
You must register both the uuid and type, and get objects
by type, to ensure safety
"""
import threading
__classes = {}
__classes_lock = threading.RLock()
def register(uuid, type, inst):
__classes_lock.acquire()
try:
__classes[(uuid, type)] = inst
return inst
finally:
__classes_lock.release()
def deregister(uuid, type):
__classes_lock.acquire()
try:
old = get(uuid, type)
if old is not None:
del __classes[(uuid, type)]
return old
finally:
__classes_lock.release()
def get(uuid, type):
"""
Get the instances by uuid and type
"""
__classes_lock.acquire()
try:
return __classes.get((uuid, type), None)
finally:
__classes_lock.release()
def get_all(all_type):
"""
Get all instances by type
"""
__classes_lock.acquire()
try:
return [inst
for ((uuid, t), inst) in __classes.items()
if t == all_type]
finally:
__classes_lock.release()
def get_all_uuid(all_type):
"""
Get all uuids by type
"""
__classes_lock.acquire()
try:
return [uuid
for (uuid, t) in __classes.keys()
if t == all_type]
finally:
__classes_lock.release()
| gpl-2.0 |
cgstudiomap/cgstudiomap | main/parts/odoo/addons/mass_mailing/wizard/test_mailing.py | 140 | 2095 | # -*- coding: utf-8 -*-
from openerp import tools
from openerp.osv import osv, fields
class TestMassMailing(osv.TransientModel):
_name = 'mail.mass_mailing.test'
_description = 'Sample Mail Wizard'
_columns = {
'email_to': fields.char('Recipients', required=True,
help='Comma-separated list of email addresses.'),
'mass_mailing_id': fields.many2one('mail.mass_mailing', 'Mailing', required=True),
}
_defaults = {
'email_to': lambda self, cr, uid, ctx=None: self.pool['mail.message']._get_default_from(cr, uid, context=ctx),
}
def send_mail_test(self, cr, uid, ids, context=None):
Mail = self.pool['mail.mail']
for wizard in self.browse(cr, uid, ids, context=context):
mailing = wizard.mass_mailing_id
test_emails = tools.email_split(wizard.email_to)
mail_ids = []
for test_mail in test_emails:
mail_values = {
'email_from': mailing.email_from,
'reply_to': mailing.reply_to,
'email_to': test_mail,
'subject': mailing.name,
'body_html': '',
'notification': True,
'mailing_id': mailing.id,
'attachment_ids': [(4, attachment.id) for attachment in mailing.attachment_ids],
}
mail_mail_obj = Mail.browse(cr, uid, Mail.create(cr, uid, mail_values, context=context), context=context)
unsubscribe_url = Mail._get_unsubscribe_url(cr, uid, mail_mail_obj, test_mail, context=context)
body = tools.append_content_to_html(mailing.body_html, unsubscribe_url, plaintext=False, container_tag='p')
Mail.write(cr, uid, mail_mail_obj.id, {'body_html': mailing.body_html}, context=context)
mail_ids.append(mail_mail_obj.id)
Mail.send(cr, uid, mail_ids, context=context)
self.pool['mail.mass_mailing'].write(cr, uid, [mailing.id], {'state': 'test'}, context=context)
return True
| agpl-3.0 |
aerickson/ansible | lib/ansible/plugins/action/nxos_template.py | 59 | 3804 | #
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import time
import glob
import urlparse
from ansible.module_utils._text import to_text
from ansible.plugins.action.nxos import ActionModule as _ActionModule
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
try:
self._handle_template()
except (ValueError, AttributeError) as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, __backup__ key may not be in results.
self._write_backup(task_vars['inventory_hostname'], result['__backup__'])
if '__backup__' in result:
del result['__backup__']
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
def _handle_template(self):
src = self._task.args.get('src')
if not src:
raise ValueError('missing required arguments: src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlparse.urlsplit(src).scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
return
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| gpl-3.0 |
Lochlan/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/profiler.py | 182 | 9421 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import re
import itertools
_log = logging.getLogger(__name__)
class ProfilerFactory(object):
@classmethod
def create_profiler(cls, host, executable_path, output_dir, profiler_name=None, identifier=None):
profilers = cls.profilers_for_platform(host.platform)
if not profilers:
return None
profiler_name = profiler_name or cls.default_profiler_name(host.platform)
profiler_class = next(itertools.ifilter(lambda profiler: profiler.name == profiler_name, profilers), None)
if not profiler_class:
return None
return profilers[0](host, executable_path, output_dir, identifier)
@classmethod
def default_profiler_name(cls, platform):
profilers = cls.profilers_for_platform(platform)
return profilers[0].name if profilers else None
@classmethod
def profilers_for_platform(cls, platform):
# GooglePProf requires TCMalloc/google-perftools, but is available everywhere.
profilers_by_os_name = {
'mac': [IProfiler, Sample, GooglePProf],
'linux': [Perf, GooglePProf],
# Note: freebsd, win32 have no profilers defined yet, thus --profile will be ignored
# by default, but a profiler can be selected with --profiler=PROFILER explicitly.
}
return profilers_by_os_name.get(platform.os_name, [])
class Profiler(object):
# Used by ProfilerFactory to lookup a profiler from the --profiler=NAME option.
name = None
def __init__(self, host, executable_path, output_dir, identifier=None):
self._host = host
self._executable_path = executable_path
self._output_dir = output_dir
self._identifier = "test"
self._host.filesystem.maybe_make_directory(self._output_dir)
def adjusted_environment(self, env):
return env
def attach_to_pid(self, pid):
pass
def profile_after_exit(self):
pass
class SingleFileOutputProfiler(Profiler):
def __init__(self, host, executable_path, output_dir, output_suffix, identifier=None):
super(SingleFileOutputProfiler, self).__init__(host, executable_path, output_dir, identifier)
# FIXME: Currently all reports are kept as test.*, until we fix that, search up to 1000 names before giving up.
self._output_path = self._host.workspace.find_unused_filename(self._output_dir, self._identifier, output_suffix, search_limit=1000)
assert(self._output_path)
class GooglePProf(SingleFileOutputProfiler):
name = 'pprof'
def __init__(self, host, executable_path, output_dir, identifier=None):
super(GooglePProf, self).__init__(host, executable_path, output_dir, "pprof", identifier)
def adjusted_environment(self, env):
env['CPUPROFILE'] = self._output_path
return env
def _first_ten_lines_of_profile(self, pprof_output):
match = re.search("^Total:[^\n]*\n((?:[^\n]*\n){0,10})", pprof_output, re.MULTILINE)
return match.group(1) if match else None
def _pprof_path(self):
# FIXME: We should have code to find the right google-pprof executable, some Googlers have
# google-pprof installed as "pprof" on their machines for them.
return '/usr/bin/google-pprof'
def profile_after_exit(self):
# google-pprof doesn't check its arguments, so we have to.
if not (self._host.filesystem.exists(self._output_path)):
print "Failed to gather profile, %s does not exist." % self._output_path
return
pprof_args = [self._pprof_path(), '--text', self._executable_path, self._output_path]
profile_text = self._host.executive.run_command(pprof_args)
print "First 10 lines of pprof --text:"
print self._first_ten_lines_of_profile(profile_text)
print "http://google-perftools.googlecode.com/svn/trunk/doc/cpuprofile.html documents output."
print
print "To interact with the the full profile, including produce graphs:"
print ' '.join([self._pprof_path(), self._executable_path, self._output_path])
class Perf(SingleFileOutputProfiler):
name = 'perf'
def __init__(self, host, executable_path, output_dir, identifier=None):
super(Perf, self).__init__(host, executable_path, output_dir, "data", identifier)
self._perf_process = None
self._pid_being_profiled = None
def _perf_path(self):
# FIXME: We may need to support finding the perf binary in other locations.
return 'perf'
def attach_to_pid(self, pid):
assert(not self._perf_process and not self._pid_being_profiled)
self._pid_being_profiled = pid
cmd = [self._perf_path(), "record", "--call-graph", "--pid", pid, "--output", self._output_path]
self._perf_process = self._host.executive.popen(cmd)
def _first_ten_lines_of_profile(self, perf_output):
match = re.search("^#[^\n]*\n((?: [^\n]*\n){1,10})", perf_output, re.MULTILINE)
return match.group(1) if match else None
def profile_after_exit(self):
# Perf doesn't automatically watch the attached pid for death notifications,
# so we have to do it for it, and then tell it its time to stop sampling. :(
self._host.executive.wait_limited(self._pid_being_profiled, limit_in_seconds=10)
perf_exitcode = self._perf_process.poll()
if perf_exitcode is None: # This should always be the case, unless perf error'd out early.
self._host.executive.interrupt(self._perf_process.pid)
perf_exitcode = self._perf_process.wait()
if perf_exitcode not in (0, -2): # The exit code should always be -2, as we're always interrupting perf.
print "'perf record' failed (exit code: %i), can't process results:" % perf_exitcode
return
perf_args = [self._perf_path(), 'report', '--call-graph', 'none', '--input', self._output_path]
print "First 10 lines of 'perf report --call-graph=none':"
print " ".join(perf_args)
perf_output = self._host.executive.run_command(perf_args)
print self._first_ten_lines_of_profile(perf_output)
print "To view the full profile, run:"
print ' '.join([self._perf_path(), 'report', '-i', self._output_path])
print # An extra line between tests looks nicer.
class Sample(SingleFileOutputProfiler):
name = 'sample'
def __init__(self, host, executable_path, output_dir, identifier=None):
super(Sample, self).__init__(host, executable_path, output_dir, "txt", identifier)
self._profiler_process = None
def attach_to_pid(self, pid):
cmd = ["sample", pid, "-mayDie", "-file", self._output_path]
self._profiler_process = self._host.executive.popen(cmd)
def profile_after_exit(self):
self._profiler_process.wait()
class IProfiler(SingleFileOutputProfiler):
name = 'iprofiler'
def __init__(self, host, executable_path, output_dir, identifier=None):
super(IProfiler, self).__init__(host, executable_path, output_dir, "dtps", identifier)
self._profiler_process = None
def attach_to_pid(self, pid):
# FIXME: iprofiler requires us to pass the directory separately
# from the basename of the file, with no control over the extension.
fs = self._host.filesystem
cmd = ["iprofiler", "-timeprofiler", "-a", pid,
"-d", fs.dirname(self._output_path), "-o", fs.splitext(fs.basename(self._output_path))[0]]
# FIXME: Consider capturing instead of letting instruments spam to stderr directly.
self._profiler_process = self._host.executive.popen(cmd)
def profile_after_exit(self):
# It seems like a nicer user experiance to wait on the profiler to exit to prevent
# it from spewing to stderr at odd times.
self._profiler_process.wait()
| bsd-3-clause |
schlos/eden | modules/s3db/dvr.py | 2 | 11017 | # -*- coding: utf-8 -*-
""" Sahana Eden Disaster Victim Registration Model
@copyright: 2012-15 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3DVRModel",)
from gluon import *
from gluon.storage import Storage
from gluon.tools import callback
from ..s3 import *
from s3layouts import S3AddResourceLink
# =============================================================================
class S3DVRModel(S3Model):
"""
Allow an individual or household to register to receive compensation
&/or Distributions of Relief Items
"""
names = ("dvr_need",
"dvr_case",
"dvr_case_need",
)
def model(self):
T = current.T
db = current.db
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# Needs
#
tablename = "dvr_need"
define_table(tablename,
Field("name",
label = T("name"),
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_NEED = T("Create Need")
crud_strings[tablename] = Storage(
label_create = ADD_NEED,
title_display = T("Need Details"),
title_list = T("Needs"),
title_update = T("Edit Need"),
label_list_button = T("List Needs"),
label_delete_button = T("Delete Need"),
msg_record_created = T("Need added"),
msg_record_modified = T("Need updated"),
msg_record_deleted = T("Need deleted"),
msg_list_empty = T("No Needs found")
)
represent = S3Represent(lookup=tablename, translate=True)
need_id = S3ReusableField("need_id", "reference %s" % tablename,
label = T("Need"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "dvr_need.id",
represent)),
comment=S3AddResourceLink(c="dvr",
f="need",
label=ADD_NEED),
)
# ---------------------------------------------------------------------
# Case
#
#dvr_damage_opts = {
# 1: T("Very High"),
# 2: T("High"),
# 3: T("Medium"),
# 4: T("Low"),
#}
#dvr_status_opts = {
# 1: T("Open"),
# 2: T("Accepted"),
# 3: T("Rejected"),
#}
tablename = "dvr_case"
define_table(tablename,
# @ToDo: Option to autogenerate these, like Waybills, et al
Field("reference",
label = T("Case Number"),
),
self.org_organisation_id(),
self.pr_person_id(
# @ToDo: Modify this to update location_id if the selected person has a Home Address already
comment = None,
represent = self.pr_PersonRepresent(show_link=True),
requires = IS_ADD_PERSON_WIDGET2(),
widget = S3AddPersonWidget2(controller="pr"),
),
#Field("damage", "integer",
# label= T("Damage Assessment"),
# represent = lambda opt: \
# dvr_damage_opts.get(opt, UNKNOWN_OPT),
# requires = IS_EMPTY_OR(IS_IN_SET(dvr_damage_opts)),
# ),
#Field("insurance", "boolean",
# label = T("Insurance"),
# represent = s3_yes_no_represent,
# ),
#Field("status", "integer",
# default = 1,
# label = T("Status"),
# represent = lambda opt: \
# dvr_status_opts.get(opt, UNKNOWN_OPT),
# requires = IS_EMPTY_OR(IS_IN_SET(dvr_status_opts)),
# ),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Case"),
title_display = T("Case Details"),
title_list = T("Cases"),
title_update = T("Edit Case"),
label_list_button = T("List Cases"),
label_delete_button = T("Delete Case"),
msg_record_created = T("Case added"),
msg_record_modified = T("Case updated"),
msg_record_deleted = T("Case deleted"),
msg_list_empty = T("No Cases found")
)
represent = S3Represent(lookup=tablename, fields=("reference",))
case_id = S3ReusableField("case_id", "reference %s" % tablename,
label = T("Case"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "dvr_case.id",
represent)),
)
self.add_components(tablename,
dvr_need = {"link": "dvr_case_need",
"joinby": "case_id",
"key": "need_id",
},
pr_address = ({"name": "current_address",
"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
"filterby": "type",
"filterfor": ("1",),
},
{"name": "permanent_address",
"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
"filterby": "type",
"filterfor": ("2",),
},
),
)
crud_form = S3SQLCustomForm("reference",
"organisation_id",
"person_id",
S3SQLInlineComponent("current_address",
label = T("Current Address"),
fields = [("", "location_id"),
],
default = {"type": 1}, # Current Home Address
link = False,
multiple = False,
),
S3SQLInlineComponent("permanent_address",
comment = T("If Displaced"),
label = T("Normal Address"),
fields = [("", "location_id"),
],
default = {"type": 2}, # Permanent Home Address
link = False,
multiple = False,
),
S3SQLInlineLink("need",
field = "need_id",
),
"comments",
)
self.configure(tablename,
crud_form = crud_form,
)
# ---------------------------------------------------------------------
# Cases <> Needs
#
tablename = "dvr_case_need"
define_table(tablename,
case_id(empty = False,
ondelete = "CASCADE",
),
need_id(empty = False,
ondelete = "CASCADE",
),
s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# END =========================================================================
| mit |
renanalencar/hermes | stops/migrations/0005_auto_20150726_1537.py | 1 | 19308 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('stops', '0004_auto_20150725_1628'),
]
operations = [
migrations.AlterField(
model_name='stop',
name='stop_lat',
field=models.DecimalField(verbose_name='Latitude', max_digits=10, decimal_places=6),
),
migrations.AlterField(
model_name='stop',
name='stop_lon',
field=models.DecimalField(verbose_name='Longitude', max_digits=10, decimal_places=6),
),
migrations.AlterField(
model_name='stop',
name='stop_timezone',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Timezone', choices=[(b'Africa/Abidjan', b'Africa/Abidjan'), (b'Africa/Accra', b'Africa/Accra'), (b'Africa/Addis_Ababa', b'Africa/Addis_Ababa'), (b'Africa/Algiers', b'Africa/Algiers'), (b'Africa/Asmara', b'Africa/Asmara'), (b'Africa/Bamako', b'Africa/Bamako'), (b'Africa/Bangui', b'Africa/Bangui'), (b'Africa/Banjul', b'Africa/Banjul'), (b'Africa/Bissau', b'Africa/Bissau'), (b'Africa/Blantyre', b'Africa/Blantyre'), (b'Africa/Brazzaville', b'Africa/Brazzaville'), (b'Africa/Bujumbura', b'Africa/Bujumbura'), (b'Africa/Cairo', b'Africa/Cairo'), (b'Africa/Casablanca', b'Africa/Casablanca'), (b'Africa/Ceuta', b'Africa/Ceuta'), (b'Africa/Conakry', b'Africa/Conakry'), (b'Africa/Dakar', b'Africa/Dakar'), (b'Africa/Dar_es_Salaam', b'Africa/Dar_es_Salaam'), (b'Africa/Djibouti', b'Africa/Djibouti'), (b'Africa/Douala', b'Africa/Douala'), (b'Africa/El_Aaiun', b'Africa/El_Aaiun'), (b'Africa/Freetown', b'Africa/Freetown'), (b'Africa/Gaborone', b'Africa/Gaborone'), (b'Africa/Harare', b'Africa/Harare'), (b'Africa/Johannesburg', b'Africa/Johannesburg'), (b'Africa/Juba', b'Africa/Juba'), (b'Africa/Kampala', b'Africa/Kampala'), (b'Africa/Khartoum', b'Africa/Khartoum'), (b'Africa/Kigali', b'Africa/Kigali'), (b'Africa/Kinshasa', b'Africa/Kinshasa'), (b'Africa/Lagos', b'Africa/Lagos'), (b'Africa/Libreville', b'Africa/Libreville'), (b'Africa/Lome', b'Africa/Lome'), (b'Africa/Luanda', b'Africa/Luanda'), (b'Africa/Lubumbashi', b'Africa/Lubumbashi'), (b'Africa/Lusaka', b'Africa/Lusaka'), (b'Africa/Malabo', b'Africa/Malabo'), (b'Africa/Maputo', b'Africa/Maputo'), (b'Africa/Maseru', b'Africa/Maseru'), (b'Africa/Mbabane', b'Africa/Mbabane'), (b'Africa/Mogadishu', b'Africa/Mogadishu'), (b'Africa/Monrovia', b'Africa/Monrovia'), (b'Africa/Nairobi', b'Africa/Nairobi'), (b'Africa/Ndjamena', b'Africa/Ndjamena'), (b'Africa/Niamey', b'Africa/Niamey'), (b'Africa/Nouakchott', b'Africa/Nouakchott'), (b'Africa/Ouagadougou', b'Africa/Ouagadougou'), (b'Africa/Porto-Novo', b'Africa/Porto-Novo'), (b'Africa/Sao_Tome', b'Africa/Sao_Tome'), (b'Africa/Tripoli', b'Africa/Tripoli'), (b'Africa/Tunis', b'Africa/Tunis'), (b'Africa/Windhoek', b'Africa/Windhoek'), (b'America/Adak', b'America/Adak'), (b'America/Anchorage', b'America/Anchorage'), (b'America/Anguilla', b'America/Anguilla'), (b'America/Antigua', b'America/Antigua'), (b'America/Araguaina', b'America/Araguaina'), (b'America/Argentina/Buenos_Aires', b'America/Argentina/Buenos_Aires'), (b'America/Argentina/Catamarca', b'America/Argentina/Catamarca'), (b'America/Argentina/Cordoba', b'America/Argentina/Cordoba'), (b'America/Argentina/Jujuy', b'America/Argentina/Jujuy'), (b'America/Argentina/La_Rioja', b'America/Argentina/La_Rioja'), (b'America/Argentina/Mendoza', b'America/Argentina/Mendoza'), (b'America/Argentina/Rio_Gallegos', b'America/Argentina/Rio_Gallegos'), (b'America/Argentina/Salta', b'America/Argentina/Salta'), (b'America/Argentina/San_Juan', b'America/Argentina/San_Juan'), (b'America/Argentina/San_Luis', b'America/Argentina/San_Luis'), (b'America/Argentina/Tucuman', b'America/Argentina/Tucuman'), (b'America/Argentina/Ushuaia', b'America/Argentina/Ushuaia'), (b'America/Aruba', b'America/Aruba'), (b'America/Asuncion', b'America/Asuncion'), (b'America/Atikokan', b'America/Atikokan'), (b'America/Bahia', b'America/Bahia'), (b'America/Bahia_Banderas', b'America/Bahia_Banderas'), (b'America/Barbados', b'America/Barbados'), (b'America/Belem', b'America/Belem'), (b'America/Belize', b'America/Belize'), (b'America/Blanc-Sablon', b'America/Blanc-Sablon'), (b'America/Boa_Vista', b'America/Boa_Vista'), (b'America/Bogota', b'America/Bogota'), (b'America/Boise', b'America/Boise'), (b'America/Cambridge_Bay', b'America/Cambridge_Bay'), (b'America/Campo_Grande', b'America/Campo_Grande'), (b'America/Cancun', b'America/Cancun'), (b'America/Caracas', b'America/Caracas'), (b'America/Cayenne', b'America/Cayenne'), (b'America/Cayman', b'America/Cayman'), (b'America/Chicago', b'America/Chicago'), (b'America/Chihuahua', b'America/Chihuahua'), (b'America/Costa_Rica', b'America/Costa_Rica'), (b'America/Creston', b'America/Creston'), (b'America/Cuiaba', b'America/Cuiaba'), (b'America/Curacao', b'America/Curacao'), (b'America/Danmarkshavn', b'America/Danmarkshavn'), (b'America/Dawson', b'America/Dawson'), (b'America/Dawson_Creek', b'America/Dawson_Creek'), (b'America/Denver', b'America/Denver'), (b'America/Detroit', b'America/Detroit'), (b'America/Dominica', b'America/Dominica'), (b'America/Edmonton', b'America/Edmonton'), (b'America/Eirunepe', b'America/Eirunepe'), (b'America/El_Salvador', b'America/El_Salvador'), (b'America/Fortaleza', b'America/Fortaleza'), (b'America/Glace_Bay', b'America/Glace_Bay'), (b'America/Godthab', b'America/Godthab'), (b'America/Goose_Bay', b'America/Goose_Bay'), (b'America/Grand_Turk', b'America/Grand_Turk'), (b'America/Grenada', b'America/Grenada'), (b'America/Guadeloupe', b'America/Guadeloupe'), (b'America/Guatemala', b'America/Guatemala'), (b'America/Guayaquil', b'America/Guayaquil'), (b'America/Guyana', b'America/Guyana'), (b'America/Halifax', b'America/Halifax'), (b'America/Havana', b'America/Havana'), (b'America/Hermosillo', b'America/Hermosillo'), (b'America/Indiana/Indianapolis', b'America/Indiana/Indianapolis'), (b'America/Indiana/Knox', b'America/Indiana/Knox'), (b'America/Indiana/Marengo', b'America/Indiana/Marengo'), (b'America/Indiana/Petersburg', b'America/Indiana/Petersburg'), (b'America/Indiana/Tell_City', b'America/Indiana/Tell_City'), (b'America/Indiana/Vevay', b'America/Indiana/Vevay'), (b'America/Indiana/Vincennes', b'America/Indiana/Vincennes'), (b'America/Indiana/Winamac', b'America/Indiana/Winamac'), (b'America/Inuvik', b'America/Inuvik'), (b'America/Iqaluit', b'America/Iqaluit'), (b'America/Jamaica', b'America/Jamaica'), (b'America/Juneau', b'America/Juneau'), (b'America/Kentucky/Louisville', b'America/Kentucky/Louisville'), (b'America/Kentucky/Monticello', b'America/Kentucky/Monticello'), (b'America/Kralendijk', b'America/Kralendijk'), (b'America/La_Paz', b'America/La_Paz'), (b'America/Lima', b'America/Lima'), (b'America/Los_Angeles', b'America/Los_Angeles'), (b'America/Lower_Princes', b'America/Lower_Princes'), (b'America/Maceio', b'America/Maceio'), (b'America/Managua', b'America/Managua'), (b'America/Manaus', b'America/Manaus'), (b'America/Marigot', b'America/Marigot'), (b'America/Martinique', b'America/Martinique'), (b'America/Matamoros', b'America/Matamoros'), (b'America/Mazatlan', b'America/Mazatlan'), (b'America/Menominee', b'America/Menominee'), (b'America/Merida', b'America/Merida'), (b'America/Metlakatla', b'America/Metlakatla'), (b'America/Mexico_City', b'America/Mexico_City'), (b'America/Miquelon', b'America/Miquelon'), (b'America/Moncton', b'America/Moncton'), (b'America/Monterrey', b'America/Monterrey'), (b'America/Montevideo', b'America/Montevideo'), (b'America/Montserrat', b'America/Montserrat'), (b'America/Nassau', b'America/Nassau'), (b'America/New_York', b'America/New_York'), (b'America/Nipigon', b'America/Nipigon'), (b'America/Nome', b'America/Nome'), (b'America/Noronha', b'America/Noronha'), (b'America/North_Dakota/Beulah', b'America/North_Dakota/Beulah'), (b'America/North_Dakota/Center', b'America/North_Dakota/Center'), (b'America/North_Dakota/New_Salem', b'America/North_Dakota/New_Salem'), (b'America/Ojinaga', b'America/Ojinaga'), (b'America/Panama', b'America/Panama'), (b'America/Pangnirtung', b'America/Pangnirtung'), (b'America/Paramaribo', b'America/Paramaribo'), (b'America/Phoenix', b'America/Phoenix'), (b'America/Port-au-Prince', b'America/Port-au-Prince'), (b'America/Port_of_Spain', b'America/Port_of_Spain'), (b'America/Porto_Velho', b'America/Porto_Velho'), (b'America/Puerto_Rico', b'America/Puerto_Rico'), (b'America/Rainy_River', b'America/Rainy_River'), (b'America/Rankin_Inlet', b'America/Rankin_Inlet'), (b'America/Recife', b'America/Recife'), (b'America/Regina', b'America/Regina'), (b'America/Resolute', b'America/Resolute'), (b'America/Rio_Branco', b'America/Rio_Branco'), (b'America/Santa_Isabel', b'America/Santa_Isabel'), (b'America/Santarem', b'America/Santarem'), (b'America/Santiago', b'America/Santiago'), (b'America/Santo_Domingo', b'America/Santo_Domingo'), (b'America/Sao_Paulo', b'America/Sao_Paulo'), (b'America/Scoresbysund', b'America/Scoresbysund'), (b'America/Sitka', b'America/Sitka'), (b'America/St_Barthelemy', b'America/St_Barthelemy'), (b'America/St_Johns', b'America/St_Johns'), (b'America/St_Kitts', b'America/St_Kitts'), (b'America/St_Lucia', b'America/St_Lucia'), (b'America/St_Thomas', b'America/St_Thomas'), (b'America/St_Vincent', b'America/St_Vincent'), (b'America/Swift_Current', b'America/Swift_Current'), (b'America/Tegucigalpa', b'America/Tegucigalpa'), (b'America/Thule', b'America/Thule'), (b'America/Thunder_Bay', b'America/Thunder_Bay'), (b'America/Tijuana', b'America/Tijuana'), (b'America/Toronto', b'America/Toronto'), (b'America/Tortola', b'America/Tortola'), (b'America/Vancouver', b'America/Vancouver'), (b'America/Whitehorse', b'America/Whitehorse'), (b'America/Winnipeg', b'America/Winnipeg'), (b'America/Yakutat', b'America/Yakutat'), (b'America/Yellowknife', b'America/Yellowknife'), (b'Antarctica/Casey', b'Antarctica/Casey'), (b'Antarctica/Davis', b'Antarctica/Davis'), (b'Antarctica/DumontDUrville', b'Antarctica/DumontDUrville'), (b'Antarctica/Macquarie', b'Antarctica/Macquarie'), (b'Antarctica/Mawson', b'Antarctica/Mawson'), (b'Antarctica/McMurdo', b'Antarctica/McMurdo'), (b'Antarctica/Palmer', b'Antarctica/Palmer'), (b'Antarctica/Rothera', b'Antarctica/Rothera'), (b'Antarctica/Syowa', b'Antarctica/Syowa'), (b'Antarctica/Troll', b'Antarctica/Troll'), (b'Antarctica/Vostok', b'Antarctica/Vostok'), (b'Arctic/Longyearbyen', b'Arctic/Longyearbyen'), (b'Asia/Aden', b'Asia/Aden'), (b'Asia/Almaty', b'Asia/Almaty'), (b'Asia/Amman', b'Asia/Amman'), (b'Asia/Anadyr', b'Asia/Anadyr'), (b'Asia/Aqtau', b'Asia/Aqtau'), (b'Asia/Aqtobe', b'Asia/Aqtobe'), (b'Asia/Ashgabat', b'Asia/Ashgabat'), (b'Asia/Baghdad', b'Asia/Baghdad'), (b'Asia/Bahrain', b'Asia/Bahrain'), (b'Asia/Baku', b'Asia/Baku'), (b'Asia/Bangkok', b'Asia/Bangkok'), (b'Asia/Beirut', b'Asia/Beirut'), (b'Asia/Bishkek', b'Asia/Bishkek'), (b'Asia/Brunei', b'Asia/Brunei'), (b'Asia/Chita', b'Asia/Chita'), (b'Asia/Choibalsan', b'Asia/Choibalsan'), (b'Asia/Colombo', b'Asia/Colombo'), (b'Asia/Damascus', b'Asia/Damascus'), (b'Asia/Dhaka', b'Asia/Dhaka'), (b'Asia/Dili', b'Asia/Dili'), (b'Asia/Dubai', b'Asia/Dubai'), (b'Asia/Dushanbe', b'Asia/Dushanbe'), (b'Asia/Gaza', b'Asia/Gaza'), (b'Asia/Hebron', b'Asia/Hebron'), (b'Asia/Ho_Chi_Minh', b'Asia/Ho_Chi_Minh'), (b'Asia/Hong_Kong', b'Asia/Hong_Kong'), (b'Asia/Hovd', b'Asia/Hovd'), (b'Asia/Irkutsk', b'Asia/Irkutsk'), (b'Asia/Jakarta', b'Asia/Jakarta'), (b'Asia/Jayapura', b'Asia/Jayapura'), (b'Asia/Jerusalem', b'Asia/Jerusalem'), (b'Asia/Kabul', b'Asia/Kabul'), (b'Asia/Kamchatka', b'Asia/Kamchatka'), (b'Asia/Karachi', b'Asia/Karachi'), (b'Asia/Kathmandu', b'Asia/Kathmandu'), (b'Asia/Khandyga', b'Asia/Khandyga'), (b'Asia/Kolkata', b'Asia/Kolkata'), (b'Asia/Krasnoyarsk', b'Asia/Krasnoyarsk'), (b'Asia/Kuala_Lumpur', b'Asia/Kuala_Lumpur'), (b'Asia/Kuching', b'Asia/Kuching'), (b'Asia/Kuwait', b'Asia/Kuwait'), (b'Asia/Macau', b'Asia/Macau'), (b'Asia/Magadan', b'Asia/Magadan'), (b'Asia/Makassar', b'Asia/Makassar'), (b'Asia/Manila', b'Asia/Manila'), (b'Asia/Muscat', b'Asia/Muscat'), (b'Asia/Nicosia', b'Asia/Nicosia'), (b'Asia/Novokuznetsk', b'Asia/Novokuznetsk'), (b'Asia/Novosibirsk', b'Asia/Novosibirsk'), (b'Asia/Omsk', b'Asia/Omsk'), (b'Asia/Oral', b'Asia/Oral'), (b'Asia/Phnom_Penh', b'Asia/Phnom_Penh'), (b'Asia/Pontianak', b'Asia/Pontianak'), (b'Asia/Pyongyang', b'Asia/Pyongyang'), (b'Asia/Qatar', b'Asia/Qatar'), (b'Asia/Qyzylorda', b'Asia/Qyzylorda'), (b'Asia/Rangoon', b'Asia/Rangoon'), (b'Asia/Riyadh', b'Asia/Riyadh'), (b'Asia/Sakhalin', b'Asia/Sakhalin'), (b'Asia/Samarkand', b'Asia/Samarkand'), (b'Asia/Seoul', b'Asia/Seoul'), (b'Asia/Shanghai', b'Asia/Shanghai'), (b'Asia/Singapore', b'Asia/Singapore'), (b'Asia/Srednekolymsk', b'Asia/Srednekolymsk'), (b'Asia/Taipei', b'Asia/Taipei'), (b'Asia/Tashkent', b'Asia/Tashkent'), (b'Asia/Tbilisi', b'Asia/Tbilisi'), (b'Asia/Tehran', b'Asia/Tehran'), (b'Asia/Thimphu', b'Asia/Thimphu'), (b'Asia/Tokyo', b'Asia/Tokyo'), (b'Asia/Ulaanbaatar', b'Asia/Ulaanbaatar'), (b'Asia/Urumqi', b'Asia/Urumqi'), (b'Asia/Ust-Nera', b'Asia/Ust-Nera'), (b'Asia/Vientiane', b'Asia/Vientiane'), (b'Asia/Vladivostok', b'Asia/Vladivostok'), (b'Asia/Yakutsk', b'Asia/Yakutsk'), (b'Asia/Yekaterinburg', b'Asia/Yekaterinburg'), (b'Asia/Yerevan', b'Asia/Yerevan'), (b'Atlantic/Azores', b'Atlantic/Azores'), (b'Atlantic/Bermuda', b'Atlantic/Bermuda'), (b'Atlantic/Canary', b'Atlantic/Canary'), (b'Atlantic/Cape_Verde', b'Atlantic/Cape_Verde'), (b'Atlantic/Faroe', b'Atlantic/Faroe'), (b'Atlantic/Madeira', b'Atlantic/Madeira'), (b'Atlantic/Reykjavik', b'Atlantic/Reykjavik'), (b'Atlantic/South_Georgia', b'Atlantic/South_Georgia'), (b'Atlantic/St_Helena', b'Atlantic/St_Helena'), (b'Atlantic/Stanley', b'Atlantic/Stanley'), (b'Australia/Adelaide', b'Australia/Adelaide'), (b'Australia/Brisbane', b'Australia/Brisbane'), (b'Australia/Broken_Hill', b'Australia/Broken_Hill'), (b'Australia/Currie', b'Australia/Currie'), (b'Australia/Darwin', b'Australia/Darwin'), (b'Australia/Eucla', b'Australia/Eucla'), (b'Australia/Hobart', b'Australia/Hobart'), (b'Australia/Lindeman', b'Australia/Lindeman'), (b'Australia/Lord_Howe', b'Australia/Lord_Howe'), (b'Australia/Melbourne', b'Australia/Melbourne'), (b'Australia/Perth', b'Australia/Perth'), (b'Australia/Sydney', b'Australia/Sydney'), (b'Canada/Atlantic', b'Canada/Atlantic'), (b'Canada/Central', b'Canada/Central'), (b'Canada/Eastern', b'Canada/Eastern'), (b'Canada/Mountain', b'Canada/Mountain'), (b'Canada/Newfoundland', b'Canada/Newfoundland'), (b'Canada/Pacific', b'Canada/Pacific'), (b'Europe/Amsterdam', b'Europe/Amsterdam'), (b'Europe/Andorra', b'Europe/Andorra'), (b'Europe/Athens', b'Europe/Athens'), (b'Europe/Belgrade', b'Europe/Belgrade'), (b'Europe/Berlin', b'Europe/Berlin'), (b'Europe/Bratislava', b'Europe/Bratislava'), (b'Europe/Brussels', b'Europe/Brussels'), (b'Europe/Bucharest', b'Europe/Bucharest'), (b'Europe/Budapest', b'Europe/Budapest'), (b'Europe/Busingen', b'Europe/Busingen'), (b'Europe/Chisinau', b'Europe/Chisinau'), (b'Europe/Copenhagen', b'Europe/Copenhagen'), (b'Europe/Dublin', b'Europe/Dublin'), (b'Europe/Gibraltar', b'Europe/Gibraltar'), (b'Europe/Guernsey', b'Europe/Guernsey'), (b'Europe/Helsinki', b'Europe/Helsinki'), (b'Europe/Isle_of_Man', b'Europe/Isle_of_Man'), (b'Europe/Istanbul', b'Europe/Istanbul'), (b'Europe/Jersey', b'Europe/Jersey'), (b'Europe/Kaliningrad', b'Europe/Kaliningrad'), (b'Europe/Kiev', b'Europe/Kiev'), (b'Europe/Lisbon', b'Europe/Lisbon'), (b'Europe/Ljubljana', b'Europe/Ljubljana'), (b'Europe/London', b'Europe/London'), (b'Europe/Luxembourg', b'Europe/Luxembourg'), (b'Europe/Madrid', b'Europe/Madrid'), (b'Europe/Malta', b'Europe/Malta'), (b'Europe/Mariehamn', b'Europe/Mariehamn'), (b'Europe/Minsk', b'Europe/Minsk'), (b'Europe/Monaco', b'Europe/Monaco'), (b'Europe/Moscow', b'Europe/Moscow'), (b'Europe/Oslo', b'Europe/Oslo'), (b'Europe/Paris', b'Europe/Paris'), (b'Europe/Podgorica', b'Europe/Podgorica'), (b'Europe/Prague', b'Europe/Prague'), (b'Europe/Riga', b'Europe/Riga'), (b'Europe/Rome', b'Europe/Rome'), (b'Europe/Samara', b'Europe/Samara'), (b'Europe/San_Marino', b'Europe/San_Marino'), (b'Europe/Sarajevo', b'Europe/Sarajevo'), (b'Europe/Simferopol', b'Europe/Simferopol'), (b'Europe/Skopje', b'Europe/Skopje'), (b'Europe/Sofia', b'Europe/Sofia'), (b'Europe/Stockholm', b'Europe/Stockholm'), (b'Europe/Tallinn', b'Europe/Tallinn'), (b'Europe/Tirane', b'Europe/Tirane'), (b'Europe/Uzhgorod', b'Europe/Uzhgorod'), (b'Europe/Vaduz', b'Europe/Vaduz'), (b'Europe/Vatican', b'Europe/Vatican'), (b'Europe/Vienna', b'Europe/Vienna'), (b'Europe/Vilnius', b'Europe/Vilnius'), (b'Europe/Volgograd', b'Europe/Volgograd'), (b'Europe/Warsaw', b'Europe/Warsaw'), (b'Europe/Zagreb', b'Europe/Zagreb'), (b'Europe/Zaporozhye', b'Europe/Zaporozhye'), (b'Europe/Zurich', b'Europe/Zurich'), (b'GMT', b'GMT'), (b'Indian/Antananarivo', b'Indian/Antananarivo'), (b'Indian/Chagos', b'Indian/Chagos'), (b'Indian/Christmas', b'Indian/Christmas'), (b'Indian/Cocos', b'Indian/Cocos'), (b'Indian/Comoro', b'Indian/Comoro'), (b'Indian/Kerguelen', b'Indian/Kerguelen'), (b'Indian/Mahe', b'Indian/Mahe'), (b'Indian/Maldives', b'Indian/Maldives'), (b'Indian/Mauritius', b'Indian/Mauritius'), (b'Indian/Mayotte', b'Indian/Mayotte'), (b'Indian/Reunion', b'Indian/Reunion'), (b'Pacific/Apia', b'Pacific/Apia'), (b'Pacific/Auckland', b'Pacific/Auckland'), (b'Pacific/Bougainville', b'Pacific/Bougainville'), (b'Pacific/Chatham', b'Pacific/Chatham'), (b'Pacific/Chuuk', b'Pacific/Chuuk'), (b'Pacific/Easter', b'Pacific/Easter'), (b'Pacific/Efate', b'Pacific/Efate'), (b'Pacific/Enderbury', b'Pacific/Enderbury'), (b'Pacific/Fakaofo', b'Pacific/Fakaofo'), (b'Pacific/Fiji', b'Pacific/Fiji'), (b'Pacific/Funafuti', b'Pacific/Funafuti'), (b'Pacific/Galapagos', b'Pacific/Galapagos'), (b'Pacific/Gambier', b'Pacific/Gambier'), (b'Pacific/Guadalcanal', b'Pacific/Guadalcanal'), (b'Pacific/Guam', b'Pacific/Guam'), (b'Pacific/Honolulu', b'Pacific/Honolulu'), (b'Pacific/Johnston', b'Pacific/Johnston'), (b'Pacific/Kiritimati', b'Pacific/Kiritimati'), (b'Pacific/Kosrae', b'Pacific/Kosrae'), (b'Pacific/Kwajalein', b'Pacific/Kwajalein'), (b'Pacific/Majuro', b'Pacific/Majuro'), (b'Pacific/Marquesas', b'Pacific/Marquesas'), (b'Pacific/Midway', b'Pacific/Midway'), (b'Pacific/Nauru', b'Pacific/Nauru'), (b'Pacific/Niue', b'Pacific/Niue'), (b'Pacific/Norfolk', b'Pacific/Norfolk'), (b'Pacific/Noumea', b'Pacific/Noumea'), (b'Pacific/Pago_Pago', b'Pacific/Pago_Pago'), (b'Pacific/Palau', b'Pacific/Palau'), (b'Pacific/Pitcairn', b'Pacific/Pitcairn'), (b'Pacific/Pohnpei', b'Pacific/Pohnpei'), (b'Pacific/Port_Moresby', b'Pacific/Port_Moresby'), (b'Pacific/Rarotonga', b'Pacific/Rarotonga'), (b'Pacific/Saipan', b'Pacific/Saipan'), (b'Pacific/Tahiti', b'Pacific/Tahiti'), (b'Pacific/Tarawa', b'Pacific/Tarawa'), (b'Pacific/Tongatapu', b'Pacific/Tongatapu'), (b'Pacific/Wake', b'Pacific/Wake'), (b'Pacific/Wallis', b'Pacific/Wallis'), (b'US/Alaska', b'US/Alaska'), (b'US/Arizona', b'US/Arizona'), (b'US/Central', b'US/Central'), (b'US/Eastern', b'US/Eastern'), (b'US/Hawaii', b'US/Hawaii'), (b'US/Mountain', b'US/Mountain'), (b'US/Pacific', b'US/Pacific'), (b'UTC', b'UTC')]),
),
]
| mit |
lbybee/kaggle_contests | titanic/.ropeproject/config.py | 387 | 3461 | # The default ``config.py``
def set_prefs(prefs):
"""This function is called before opening the project"""
# Specify which files and folders to ignore in the project.
# Changes to ignored resources are not added to the history and
# VCSs. Also they are not returned in `Project.get_files()`.
# Note that ``?`` and ``*`` match all characters but slashes.
# '*.pyc': matches 'test.pyc' and 'pkg/test.pyc'
# 'mod*.pyc': matches 'test/mod1.pyc' but not 'mod/1.pyc'
# '.svn': matches 'pkg/.svn' and all of its children
# 'build/*.o': matches 'build/lib.o' but not 'build/sub/lib.o'
# 'build//*.o': matches 'build/lib.o' and 'build/sub/lib.o'
prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject',
'.hg', '.svn', '_svn', '.git']
# Specifies which files should be considered python files. It is
# useful when you have scripts inside your project. Only files
# ending with ``.py`` are considered to be python files by
# default.
#prefs['python_files'] = ['*.py']
# Custom source folders: By default rope searches the project
# for finding source folders (folders that should be searched
# for finding modules). You can add paths to that list. Note
# that rope guesses project source folders correctly most of the
# time; use this if you have any problems.
# The folders should be relative to project root and use '/' for
# separating folders regardless of the platform rope is running on.
# 'src/my_source_folder' for instance.
#prefs.add('source_folders', 'src')
# You can extend python path for looking up modules
#prefs.add('python_path', '~/python/')
# Should rope save object information or not.
prefs['save_objectdb'] = True
prefs['compress_objectdb'] = False
# If `True`, rope analyzes each module when it is being saved.
prefs['automatic_soa'] = True
# The depth of calls to follow in static object analysis
prefs['soa_followed_calls'] = 0
# If `False` when running modules or unit tests "dynamic object
# analysis" is turned off. This makes them much faster.
prefs['perform_doa'] = True
# Rope can check the validity of its object DB when running.
prefs['validate_objectdb'] = True
# How many undos to hold?
prefs['max_history_items'] = 32
# Shows whether to save history across sessions.
prefs['save_history'] = True
prefs['compress_history'] = False
# Set the number spaces used for indenting. According to
# :PEP:`8`, it is best to use 4 spaces. Since most of rope's
# unit-tests use 4 spaces it is more reliable, too.
prefs['indent_size'] = 4
# Builtin and c-extension modules that are allowed to be imported
# and inspected by rope.
prefs['extension_modules'] = []
# Add all standard c-extensions to extension_modules list.
prefs['import_dynload_stdmods'] = True
# If `True` modules with syntax errors are considered to be empty.
# The default value is `False`; When `False` syntax errors raise
# `rope.base.exceptions.ModuleSyntaxError` exception.
prefs['ignore_syntax_errors'] = False
# If `True`, rope ignores unresolvable imports. Otherwise, they
# appear in the importing namespace.
prefs['ignore_bad_imports'] = False
def project_opened(project):
"""This function is called after opening the project"""
# Do whatever you like here!
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.