repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
ganzenmg/lammps_current | python/examples/pizza/pdbfile.py | 85 | 9342 | # Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# pdb tool
oneline = "Read, write PDB files in combo with LAMMPS snapshots"
docstr = """
p = pdbfile("3CRO") create pdb object from PDB file or WWW
p = pdbfile("pep1 pep2") read in multiple PDB files
p = pdbfile("pep*") can use wildcards
p = pdbfile(d) read in snapshot data with no PDB file
p = pdbfile("3CRO",d) read in single PDB file with snapshot data
string arg contains one or more PDB files
don't need .pdb suffix except wildcard must expand to file.pdb
if only one 4-char file specified and it is not found,
it will be downloaded from http://www.rcsb.org as 3CRO.pdb
d arg is object with atom coordinates (dump, data)
p.one() write all output as one big PDB file to tmp.pdb
p.one("mine") write to mine.pdb
p.many() write one PDB file per snapshot: tmp0000.pdb, ...
p.many("mine") write as mine0000.pdb, mine0001.pdb, ...
p.single(N) write timestamp N as tmp.pdb
p.single(N,"new") write as new.pdb
how new PDB files are created depends on constructor inputs:
if no d: one new PDB file for each file in string arg (just a copy)
if only d specified: one new PDB file per snapshot in generic format
if one file in str arg and d: one new PDB file per snapshot
using input PDB file as template
multiple input PDB files with a d is not allowed
index,time,flag = p.iterator(0)
index,time,flag = p.iterator(1)
iterator = loop over number of PDB files
call first time with arg = 0, thereafter with arg = 1
N = length = # of snapshots or # of input PDB files
index = index of snapshot or input PDB file (0 to N-1)
time = timestep value (time stamp for snapshot, index for multiple PDB)
flag = -1 when iteration is done, 1 otherwise
typically call p.single(time) in iterated loop to write out one PDB file
"""
# History
# 8/05, Steve Plimpton (SNL): original version
# ToDo list
# for generic PDB file (no template) from a LJ unit system,
# the atoms in PDB file are too close together
# Variables
# files = list of input PDB files
# data = data object (ccell,data,dump) to read snapshots from
# atomlines = dict of ATOM lines in original PDB file
# key = atom id, value = tuple of (beginning,end) of line
# Imports and external programs
import sys, types, glob, urllib
# Class definition
class pdbfile:
# --------------------------------------------------------------------
def __init__(self,*args):
if len(args) == 1:
if type(args[0]) is types.StringType:
filestr = args[0]
self.data = None
else:
filestr = None
self.data = args[0]
elif len(args) == 2:
filestr = args[0]
self.data = args[1]
else: raise StandardError, "invalid args for pdb()"
# flist = full list of all PDB input file names
# append .pdb if needed
if filestr:
list = filestr.split()
flist = []
for file in list:
if '*' in file: flist += glob.glob(file)
else: flist.append(file)
for i in xrange(len(flist)):
if flist[i][-4:] != ".pdb": flist[i] += ".pdb"
if len(flist) == 0:
raise StandardError,"no PDB file specified"
self.files = flist
else: self.files = []
if len(self.files) > 1 and self.data:
raise StandardError, "cannot use multiple PDB files with data object"
if len(self.files) == 0 and not self.data:
raise StandardError, "no input PDB file(s)"
# grab PDB file from http://rcsb.org if not a local file
if len(self.files) == 1 and len(self.files[0]) == 8:
try:
open(self.files[0],'r').close()
except:
print "downloading %s from http://rcsb.org" % self.files[0]
fetchstr = "http://www.rcsb.org/pdb/cgi/export.cgi/%s?format=PDB&pdbId=2cpk&compression=None" % self.files[0]
urllib.urlretrieve(fetchstr,self.files[0])
if self.data and len(self.files): self.read_template(self.files[0])
# --------------------------------------------------------------------
# write a single large PDB file for concatenating all input data or files
# if data exists:
# only selected atoms returned by extract
# atoms written in order they appear in snapshot
# atom only written if its tag is in PDB template file
# if no data:
# concatenate all input files to one output file
def one(self,*args):
if len(args) == 0: file = "tmp.pdb"
elif args[0][-4:] == ".pdb": file = args[0]
else: file = args[0] + ".pdb"
f = open(file,'w')
# use template PDB file with each snapshot
if self.data:
n = flag = 0
while 1:
which,time,flag = self.data.iterator(flag)
if flag == -1: break
self.convert(f,which)
print >>f,"END"
print time,
sys.stdout.flush()
n += 1
else:
for file in self.files:
f.write(open(file,'r').read())
print >>f,"END"
print file,
sys.stdout.flush()
f.close()
print "\nwrote %d datasets to %s in PDB format" % (n,file)
# --------------------------------------------------------------------
# write series of numbered PDB files
# if data exists:
# only selected atoms returned by extract
# atoms written in order they appear in snapshot
# atom only written if its tag is in PDB template file
# if no data:
# just copy all input files to output files
def many(self,*args):
if len(args) == 0: root = "tmp"
else: root = args[0]
if self.data:
n = flag = 0
while 1:
which,time,flag = self.data.iterator(flag)
if flag == -1: break
if n < 10:
file = root + "000" + str(n)
elif n < 100:
file = root + "00" + str(n)
elif n < 1000:
file = root + "0" + str(n)
else:
file = root + str(n)
file += ".pdb"
f = open(file,'w')
self.convert(f,which)
f.close()
print time,
sys.stdout.flush()
n += 1
else:
n = 0
for infile in self.files:
if n < 10:
file = root + "000" + str(n)
elif n < 100:
file = root + "00" + str(n)
elif n < 1000:
file = root + "0" + str(n)
else:
file = root + str(n)
file += ".pdb"
f = open(file,'w')
f.write(open(infile,'r').read())
f.close()
print file,
sys.stdout.flush()
n += 1
print "\nwrote %d datasets to %s*.pdb in PDB format" % (n,root)
# --------------------------------------------------------------------
# write a single PDB file
# if data exists:
# time is timestamp in snapshot
# only selected atoms returned by extract
# atoms written in order they appear in snapshot
# atom only written if its tag is in PDB template file
# if no data:
# time is index into list of input PDB files
# just copy one input file to output file
def single(self,time,*args):
if len(args) == 0: file = "tmp.pdb"
elif args[0][-4:] == ".pdb": file = args[0]
else: file = args[0] + ".pdb"
f = open(file,'w')
if self.data:
which = self.data.findtime(time)
self.convert(f,which)
else:
f.write(open(self.files[time],'r').read())
f.close()
# --------------------------------------------------------------------
# iterate over list of input files or selected snapshots
# latter is done via data objects iterator
def iterator(self,flag):
if not self.data:
if not flag: self.iterate = 0
else:
self.iterate += 1
if self.iterate > len(self.files): return 0,0,-1
return self.iterate,self.iterate,1
return self.data.iterator(flag)
# --------------------------------------------------------------------
# read a PDB file and store ATOM lines
def read_template(self,file):
lines = open(file,'r').readlines()
self.atomlines = {}
for line in lines:
if line.find("ATOM") == 0:
tag = int(line[4:11])
begin = line[:30]
end = line[54:]
self.atomlines[tag] = (begin,end)
# --------------------------------------------------------------------
# convert one set of atoms to PDB format and write to f
def convert(self,f,which):
time,box,atoms,bonds,tris,lines = self.data.viz(which)
if len(self.files):
for atom in atoms:
id = atom[0]
if self.atomlines.has_key(id):
(begin,end) = self.atomlines[id]
line = "%s%8.3f%8.3f%8.3f%s" % (begin,atom[2],atom[3],atom[4],end)
print >>f,line,
else:
for atom in atoms:
begin = "ATOM %6d %2d R00 1 " % (atom[0],atom[1])
middle = "%8.3f%8.3f%8.3f" % (atom[2],atom[3],atom[4])
end = " 1.00 0.00 NONE"
print >>f,begin+middle+end
| gpl-2.0 |
Lind-Project/native_client | tests/spec2k/extract_timings.py | 1 | 3119 | #!/usr/bin/python2
# Copyright (c) 2011 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script produces csv data from multiple benchmarking runs with the
# spec2k harness.
#
# A typical usage would be
#
# export SPEC_RUN_REPETITIONS=3
# ./run_all.sh RunTimedBenchmarks SetupPnaclX8664Opt ref > ../timings.setting1
# [change the compiler settings]
# ./run_all.sh RunTimedBenchmarks SetupPnaclX8664Opt ref > ../timings.setting2
#
# tests/spec2k/extract_timings.py time.inline time.noinline time.lowinline
#
# which produces output like:
# name , inline , noinline , lowinline
# ammp , 250.47 , 263.83 , 262.20
# art , 222.12 , 219.36 , 259.28
# bzip2 , 179.05 , 194.05 , missing
# crafty , 60.24 , 73.33 , missing
# ...
#
# Alternatively, if your data already has the form:
#
# <bechmark> <setting> <value>
#
# You can run the tool like so:
# tests/spec2k/extract_timings.py < <data-file>
import sys
# The name the individual settings derived from the filename in the order
# they were given on the command-line
SETTINGS = []
# dictionary of dictionaries accessed like so:
# BENCHMARKS['benchmark']['setting']
BENCHMARKS = {}
def AddDataPoint(benchmark, setting, v):
if setting not in SETTINGS:
# TODO: linear search is slightly inefficient
SETTINGS.append(setting)
values = BENCHMARKS.get(benchmark, {})
values[setting] = v
BENCHMARKS[benchmark] = values
def ExtractResults(name, inp):
for line in inp:
if not line.startswith('RESULT'):
continue
tokens = line.split()
# NOTE: the line we care about look like this:
# 'RESULT runtime_equake: pnacl.opt.x8664= [107.36,116.28,116.4] secs'
assert tokens[0] == 'RESULT'
assert tokens[1].endswith(':')
assert tokens[2].endswith('=')
assert tokens[3].startswith('[')
assert tokens[3].endswith(']')
benchmark = tokens[1][:-1].split('_')[-1]
data = tokens[3][1:][:-1].split(',')
data = [float(d) for d in data]
m = min(data)
AddDataPoint(benchmark, name, m)
# Note: we are intentionally not using the csv module
# as it does not provide nicely formatted output
def DumpRow(row):
sys.stdout.write('%-20s' % row[0])
for val in row[1:]:
if type(val) == str:
sys.stdout.write(', %10s' % val)
else:
sys.stdout.write(', %10.2f' % val)
sys.stdout.write('\n')
def DumpCsv():
row = ['name'] + SETTINGS
DumpRow(row)
for k in sorted(BENCHMARKS.keys()):
row = [k]
values = BENCHMARKS[k]
for s in SETTINGS:
if s in values:
row.append(values[s])
else:
row.append('missing')
DumpRow(row)
if len(sys.argv) > 1:
for f in sys.argv[1:]:
setting = f.split('.')[-1]
fin = open(f)
ExtractResults(setting, fin)
fin.close()
else:
for line in sys.stdin:
tokens = line.split()
if not tokens: continue
assert len(tokens) == 3
AddDataPoint(tokens[0], tokens[1], float(tokens[2]))
DumpCsv()
| bsd-3-clause |
ressu/SickGear | lib/html5lib/trie/py.py | 817 | 1763 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
| gpl-3.0 |
leppa/home-assistant | homeassistant/components/mhz19/sensor.py | 3 | 4631 | """Support for CO2 sensor connected to a serial port."""
from datetime import timedelta
import logging
from pmsensor import co2sensor
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
TEMP_FAHRENHEIT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from homeassistant.util.temperature import celsius_to_fahrenheit
_LOGGER = logging.getLogger(__name__)
CONF_SERIAL_DEVICE = "serial_device"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=10)
DEFAULT_NAME = "CO2 Sensor"
ATTR_CO2_CONCENTRATION = "co2_concentration"
SENSOR_TEMPERATURE = "temperature"
SENSOR_CO2 = "co2"
SENSOR_TYPES = {SENSOR_TEMPERATURE: ["Temperature", None], SENSOR_CO2: ["CO2", "ppm"]}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_SERIAL_DEVICE): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=[SENSOR_CO2]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the available CO2 sensors."""
try:
co2sensor.read_mh_z19(config.get(CONF_SERIAL_DEVICE))
except OSError as err:
_LOGGER.error(
"Could not open serial connection to %s (%s)",
config.get(CONF_SERIAL_DEVICE),
err,
)
return False
SENSOR_TYPES[SENSOR_TEMPERATURE][1] = hass.config.units.temperature_unit
data = MHZClient(co2sensor, config.get(CONF_SERIAL_DEVICE))
dev = []
name = config.get(CONF_NAME)
for variable in config[CONF_MONITORED_CONDITIONS]:
dev.append(MHZ19Sensor(data, variable, SENSOR_TYPES[variable][1], name))
add_entities(dev, True)
return True
class MHZ19Sensor(Entity):
"""Representation of an CO2 sensor."""
def __init__(self, mhz_client, sensor_type, temp_unit, name):
"""Initialize a new PM sensor."""
self._mhz_client = mhz_client
self._sensor_type = sensor_type
self._temp_unit = temp_unit
self._name = name
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._ppm = None
self._temperature = None
@property
def name(self):
"""Return the name of the sensor."""
return "{}: {}".format(self._name, SENSOR_TYPES[self._sensor_type][0])
@property
def state(self):
"""Return the state of the sensor."""
return self._ppm if self._sensor_type == SENSOR_CO2 else self._temperature
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Read from sensor and update the state."""
self._mhz_client.update()
data = self._mhz_client.data
self._temperature = data.get(SENSOR_TEMPERATURE)
if self._temperature is not None and self._temp_unit == TEMP_FAHRENHEIT:
self._temperature = round(celsius_to_fahrenheit(self._temperature), 1)
self._ppm = data.get(SENSOR_CO2)
@property
def device_state_attributes(self):
"""Return the state attributes."""
result = {}
if self._sensor_type == SENSOR_TEMPERATURE and self._ppm is not None:
result[ATTR_CO2_CONCENTRATION] = self._ppm
if self._sensor_type == SENSOR_CO2 and self._temperature is not None:
result[ATTR_TEMPERATURE] = self._temperature
return result
class MHZClient:
"""Get the latest data from the MH-Z sensor."""
def __init__(self, co2sens, serial):
"""Initialize the sensor."""
self.co2sensor = co2sens
self._serial = serial
self.data = dict()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data the MH-Z19 sensor."""
self.data = {}
try:
result = self.co2sensor.read_mh_z19_with_temperature(self._serial)
if result is None:
return
co2, temperature = result
except OSError as err:
_LOGGER.error(
"Could not open serial connection to %s (%s)", self._serial, err
)
return
if temperature is not None:
self.data[SENSOR_TEMPERATURE] = temperature
if co2 is not None and 0 < co2 <= 5000:
self.data[SENSOR_CO2] = co2
| apache-2.0 |
CXQERP/ODOOERP | addons/account_followup/wizard/account_followup_print.py | 217 | 16379 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_followup_stat_by_partner(osv.osv):
_name = "account_followup.stat.by.partner"
_description = "Follow-up Statistics by Partner"
_rec_name = 'partner_id'
_auto = False
def _get_invoice_partner_id(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for rec in self.browse(cr, uid, ids, context=context):
result[rec.id] = rec.partner_id.address_get(adr_pref=['invoice']).get('invoice', rec.partner_id.id)
return result
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'date_move':fields.date('First move', readonly=True),
'date_move_last':fields.date('Last move', readonly=True),
'date_followup':fields.date('Latest follow-up', readonly=True),
'max_followup_id': fields.many2one('account_followup.followup.line',
'Max Follow Up Level', readonly=True, ondelete="cascade"),
'balance':fields.float('Balance', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'invoice_partner_id': fields.function(_get_invoice_partner_id, type='many2one', relation='res.partner', string='Invoice Address')
}
_depends = {
'account.move.line': [
'account_id', 'company_id', 'credit', 'date', 'debit',
'followup_date', 'followup_line_id', 'partner_id', 'reconcile_id',
],
'account.account': ['active', 'type'],
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'account_followup_stat_by_partner')
# Here we don't have other choice but to create a virtual ID based on the concatenation
# of the partner_id and the company_id, because if a partner is shared between 2 companies,
# we want to see 2 lines for him in this table. It means that both company should be able
# to send him follow-ups separately . An assumption that the number of companies will not
# reach 10 000 records is made, what should be enough for a time.
cr.execute("""
create view account_followup_stat_by_partner as (
SELECT
l.partner_id * 10000::bigint + l.company_id as id,
l.partner_id AS partner_id,
min(l.date) AS date_move,
max(l.date) AS date_move_last,
max(l.followup_date) AS date_followup,
max(l.followup_line_id) AS max_followup_id,
sum(l.debit - l.credit) AS balance,
l.company_id as company_id
FROM
account_move_line l
LEFT JOIN account_account a ON (l.account_id = a.id)
WHERE
a.active AND
a.type = 'receivable' AND
l.reconcile_id is NULL AND
l.partner_id IS NOT NULL
GROUP BY
l.partner_id, l.company_id
)""")
class account_followup_sending_results(osv.osv_memory):
def do_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
return context.get('report_data')
def do_done(self, cr, uid, ids, context=None):
return {}
def _get_description(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('description')
def _get_need_printing(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('needprinting')
_name = 'account_followup.sending.results'
_description = 'Results from the sending of the different letters and emails'
_columns = {
'description': fields.text("Description", readonly=True),
'needprinting': fields.boolean("Needs Printing")
}
_defaults = {
'needprinting':_get_need_printing,
'description':_get_description,
}
class account_followup_print(osv.osv_memory):
_name = 'account_followup.print'
_description = 'Print Follow-up & Send Mail to Customers'
_columns = {
'date': fields.date('Follow-up Sending Date', required=True,
help="This field allow you to select a forecast date to plan your follow-ups"),
'followup_id': fields.many2one('account_followup.followup', 'Follow-Up', required=True, readonly = True),
'partner_ids': fields.many2many('account_followup.stat.by.partner', 'partner_stat_rel',
'osv_memory_id', 'partner_id', 'Partners', required=True),
'company_id':fields.related('followup_id', 'company_id', type='many2one',
relation='res.company', store=True, readonly=True),
'email_conf': fields.boolean('Send Email Confirmation'),
'email_subject': fields.char('Email Subject', size=64),
'partner_lang': fields.boolean('Send Email in Partner Language',
help='Do not change message text, if you want to send email in partner language, or configure from company'),
'email_body': fields.text('Email Body'),
'summary': fields.text('Summary', readonly=True),
'test_print': fields.boolean('Test Print',
help='Check if you want to print follow-ups without changing follow-up level.'),
}
def _get_followup(self, cr, uid, context=None):
if context is None:
context = {}
if context.get('active_model', 'ir.ui.menu') == 'account_followup.followup':
return context.get('active_id', False)
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
followp_id = self.pool.get('account_followup.followup').search(cr, uid, [('company_id', '=', company_id)], context=context)
return followp_id and followp_id[0] or False
def process_partners(self, cr, uid, partner_ids, data, context=None):
partner_obj = self.pool.get('res.partner')
partner_ids_to_print = []
nbmanuals = 0
manuals = {}
nbmails = 0
nbunknownmails = 0
nbprints = 0
resulttext = " "
for partner in self.pool.get('account_followup.stat.by.partner').browse(cr, uid, partner_ids, context=context):
if partner.max_followup_id.manual_action:
partner_obj.do_partner_manual_action(cr, uid, [partner.partner_id.id], context=context)
nbmanuals = nbmanuals + 1
key = partner.partner_id.payment_responsible_id.name or _("Anybody")
if not key in manuals.keys():
manuals[key]= 1
else:
manuals[key] = manuals[key] + 1
if partner.max_followup_id.send_email:
nbunknownmails += partner_obj.do_partner_mail(cr, uid, [partner.partner_id.id], context=context)
nbmails += 1
if partner.max_followup_id.send_letter:
partner_ids_to_print.append(partner.id)
nbprints += 1
message = "%s<I> %s </I>%s" % (_("Follow-up letter of "), partner.partner_id.latest_followup_level_id_without_lit.name, _(" will be sent"))
partner_obj.message_post(cr, uid, [partner.partner_id.id], body=message, context=context)
if nbunknownmails == 0:
resulttext += str(nbmails) + _(" email(s) sent")
else:
resulttext += str(nbmails) + _(" email(s) should have been sent, but ") + str(nbunknownmails) + _(" had unknown email address(es)") + "\n <BR/> "
resulttext += "<BR/>" + str(nbprints) + _(" letter(s) in report") + " \n <BR/>" + str(nbmanuals) + _(" manual action(s) assigned:")
needprinting = False
if nbprints > 0:
needprinting = True
resulttext += "<p align=\"center\">"
for item in manuals:
resulttext = resulttext + "<li>" + item + ":" + str(manuals[item]) + "\n </li>"
resulttext += "</p>"
result = {}
action = partner_obj.do_partner_print(cr, uid, partner_ids_to_print, data, context=context)
result['needprinting'] = needprinting
result['resulttext'] = resulttext
result['action'] = action or {}
return result
def do_update_followup_level(self, cr, uid, to_update, partner_list, date, context=None):
#update the follow-up level on account.move.line
for id in to_update.keys():
if to_update[id]['partner_id'] in partner_list:
self.pool.get('account.move.line').write(cr, uid, [int(id)], {'followup_line_id': to_update[id]['level'],
'followup_date': date})
def clear_manual_actions(self, cr, uid, partner_list, context=None):
# Partnerlist is list to exclude
# Will clear the actions of partners that have no due payments anymore
partner_list_ids = [partner.partner_id.id for partner in self.pool.get('account_followup.stat.by.partner').browse(cr, uid, partner_list, context=context)]
ids = self.pool.get('res.partner').search(cr, uid, ['&', ('id', 'not in', partner_list_ids), '|',
('payment_responsible_id', '!=', False),
('payment_next_action_date', '!=', False)], context=context)
partners_to_clear = []
for part in self.pool.get('res.partner').browse(cr, uid, ids, context=context):
if not part.unreconciled_aml_ids:
partners_to_clear.append(part.id)
self.pool.get('res.partner').action_done(cr, uid, partners_to_clear, context=context)
return len(partners_to_clear)
def do_process(self, cr, uid, ids, context=None):
context = dict(context or {})
#Get partners
tmp = self._get_partners_followp(cr, uid, ids, context=context)
partner_list = tmp['partner_ids']
to_update = tmp['to_update']
date = self.browse(cr, uid, ids, context=context)[0].date
data = self.read(cr, uid, ids, context=context)[0]
data['followup_id'] = data['followup_id'][0]
#Update partners
self.do_update_followup_level(cr, uid, to_update, partner_list, date, context=context)
#process the partners (send mails...)
restot_context = context.copy()
restot = self.process_partners(cr, uid, partner_list, data, context=restot_context)
context.update(restot_context)
#clear the manual actions if nothing is due anymore
nbactionscleared = self.clear_manual_actions(cr, uid, partner_list, context=context)
if nbactionscleared > 0:
restot['resulttext'] = restot['resulttext'] + "<li>" + _("%s partners have no credits and as such the action is cleared") %(str(nbactionscleared)) + "</li>"
#return the next action
mod_obj = self.pool.get('ir.model.data')
model_data_ids = mod_obj.search(cr, uid, [('model','=','ir.ui.view'),('name','=','view_account_followup_sending_results')], context=context)
resource_id = mod_obj.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
context.update({'description': restot['resulttext'], 'needprinting': restot['needprinting'], 'report_data': restot['action']})
return {
'name': _('Send Letters and Emails: Actions Summary'),
'view_type': 'form',
'context': context,
'view_mode': 'tree,form',
'res_model': 'account_followup.sending.results',
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new',
}
def _get_msg(self, cr, uid, context=None):
return self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.follow_up_msg
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d'),
'followup_id': _get_followup,
'email_body': "",
'email_subject': _('Invoices Reminder'),
'partner_lang': True,
}
def _get_partners_followp(self, cr, uid, ids, context=None):
data = {}
data = self.browse(cr, uid, ids, context=context)[0]
company_id = data.company_id.id
cr.execute(
"SELECT l.partner_id, l.followup_line_id,l.date_maturity, l.date, l.id "\
"FROM account_move_line AS l "\
"LEFT JOIN account_account AS a "\
"ON (l.account_id=a.id) "\
"WHERE (l.reconcile_id IS NULL) "\
"AND (a.type='receivable') "\
"AND (l.state<>'draft') "\
"AND (l.partner_id is NOT NULL) "\
"AND (a.active) "\
"AND (l.debit > 0) "\
"AND (l.company_id = %s) " \
"AND (l.blocked = False)" \
"ORDER BY l.date", (company_id,)) #l.blocked added to take litigation into account and it is not necessary to change follow-up level of account move lines without debit
move_lines = cr.fetchall()
old = None
fups = {}
fup_id = 'followup_id' in context and context['followup_id'] or data.followup_id.id
date = 'date' in context and context['date'] or data.date
current_date = datetime.date(*time.strptime(date,
'%Y-%m-%d')[:3])
cr.execute(
"SELECT * "\
"FROM account_followup_followup_line "\
"WHERE followup_id=%s "\
"ORDER BY delay", (fup_id,))
#Create dictionary of tuples where first element is the date to compare with the due date and second element is the id of the next level
for result in cr.dictfetchall():
delay = datetime.timedelta(days=result['delay'])
fups[old] = (current_date - delay, result['id'])
old = result['id']
partner_list = []
to_update = {}
#Fill dictionary of accountmovelines to_update with the partners that need to be updated
for partner_id, followup_line_id, date_maturity,date, id in move_lines:
if not partner_id:
continue
if followup_line_id not in fups:
continue
stat_line_id = partner_id * 10000 + company_id
if date_maturity:
if date_maturity <= fups[followup_line_id][0].strftime('%Y-%m-%d'):
if stat_line_id not in partner_list:
partner_list.append(stat_line_id)
to_update[str(id)]= {'level': fups[followup_line_id][1], 'partner_id': stat_line_id}
elif date and date <= fups[followup_line_id][0].strftime('%Y-%m-%d'):
if stat_line_id not in partner_list:
partner_list.append(stat_line_id)
to_update[str(id)]= {'level': fups[followup_line_id][1], 'partner_id': stat_line_id}
return {'partner_ids': partner_list, 'to_update': to_update}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
plxaye/chromium | src/tools/code_coverage/croc_scan.py | 178 | 4383 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Crocodile source scanners."""
import re
class Scanner(object):
"""Generic source scanner."""
def __init__(self):
"""Constructor."""
self.re_token = re.compile('#')
self.comment_to_eol = ['#']
self.comment_start = None
self.comment_end = None
def ScanLines(self, lines):
"""Scans the lines for executable statements.
Args:
lines: Iterator returning source lines.
Returns:
An array of line numbers which are executable.
"""
exe_lines = []
lineno = 0
in_string = None
in_comment = None
comment_index = None
for line in lines:
lineno += 1
in_string_at_start = in_string
for t in self.re_token.finditer(line):
tokenstr = t.groups()[0]
if in_comment:
# Inside a multi-line comment, so look for end token
if tokenstr == in_comment:
in_comment = None
# Replace comment with spaces
line = (line[:comment_index]
+ ' ' * (t.end(0) - comment_index)
+ line[t.end(0):])
elif in_string:
# Inside a string, so look for end token
if tokenstr == in_string:
in_string = None
elif tokenstr in self.comment_to_eol:
# Single-line comment, so truncate line at start of token
line = line[:t.start(0)]
break
elif tokenstr == self.comment_start:
# Multi-line comment start - end token is comment_end
in_comment = self.comment_end
comment_index = t.start(0)
else:
# Starting a string - end token is same as start
in_string = tokenstr
# If still in comment at end of line, remove comment
if in_comment:
line = line[:comment_index]
# Next line, delete from the beginnine
comment_index = 0
# If line-sans-comments is not empty, claim it may be executable
if line.strip() or in_string_at_start:
exe_lines.append(lineno)
# Return executable lines
return exe_lines
def Scan(self, filename):
"""Reads the file and scans its lines.
Args:
filename: Path to file to scan.
Returns:
An array of line numbers which are executable.
"""
# TODO: All manner of error checking
f = None
try:
f = open(filename, 'rt')
return self.ScanLines(f)
finally:
if f:
f.close()
class PythonScanner(Scanner):
"""Python source scanner."""
def __init__(self):
"""Constructor."""
Scanner.__init__(self)
# TODO: This breaks for strings ending in more than 2 backslashes. Need
# a pattern which counts only an odd number of backslashes, so the last
# one thus escapes the quote.
self.re_token = re.compile(r'(#|\'\'\'|"""|(?<!(?<!\\)\\)["\'])')
self.comment_to_eol = ['#']
self.comment_start = None
self.comment_end = None
class CppScanner(Scanner):
"""C / C++ / ObjC / ObjC++ source scanner."""
def __init__(self):
"""Constructor."""
Scanner.__init__(self)
# TODO: This breaks for strings ending in more than 2 backslashes. Need
# a pattern which counts only an odd number of backslashes, so the last
# one thus escapes the quote.
self.re_token = re.compile(r'(^\s*#|//|/\*|\*/|(?<!(?<!\\)\\)["\'])')
# TODO: Treat '\' at EOL as a token, and handle it as continuing the
# previous line. That is, if in a comment-to-eol, this line is a comment
# too.
# Note that we treat # at beginning of line as a comment, so that we ignore
# preprocessor definitions
self.comment_to_eol = ['//', '#']
self.comment_start = '/*'
self.comment_end = '*/'
def ScanFile(filename, language):
"""Scans a file for executable lines.
Args:
filename: Path to file to scan.
language: Language for file ('C', 'C++', 'python', 'ObjC', 'ObjC++')
Returns:
A list of executable lines, or an empty list if the file was not a handled
language.
"""
if language == 'python':
return PythonScanner().Scan(filename)
elif language in ['C', 'C++', 'ObjC', 'ObjC++']:
return CppScanner().Scan(filename)
# Something we don't handle
return []
| apache-2.0 |
edmond-chhung/linkchecker | third_party/dnspython/dns/tsig.py | 66 | 7681 | # Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS TSIG support."""
import hmac
import struct
import sys
import dns.exception
import dns.hash
import dns.rdataclass
import dns.name
class BadTime(dns.exception.DNSException):
"""Raised if the current time is not within the TSIG's validity time."""
pass
class BadSignature(dns.exception.DNSException):
"""Raised if the TSIG signature fails to verify."""
pass
class PeerError(dns.exception.DNSException):
"""Base class for all TSIG errors generated by the remote peer"""
pass
class PeerBadKey(PeerError):
"""Raised if the peer didn't know the key we used"""
pass
class PeerBadSignature(PeerError):
"""Raised if the peer didn't like the signature we sent"""
pass
class PeerBadTime(PeerError):
"""Raised if the peer didn't like the time we sent"""
pass
class PeerBadTruncation(PeerError):
"""Raised if the peer didn't like amount of truncation in the TSIG we sent"""
pass
# TSIG Algorithms
HMAC_MD5 = dns.name.from_text("HMAC-MD5.SIG-ALG.REG.INT")
HMAC_SHA1 = dns.name.from_text("hmac-sha1")
HMAC_SHA224 = dns.name.from_text("hmac-sha224")
HMAC_SHA256 = dns.name.from_text("hmac-sha256")
HMAC_SHA384 = dns.name.from_text("hmac-sha384")
HMAC_SHA512 = dns.name.from_text("hmac-sha512")
default_algorithm = HMAC_MD5
BADSIG = 16
BADKEY = 17
BADTIME = 18
BADTRUNC = 22
def sign(wire, keyname, secret, time, fudge, original_id, error,
other_data, request_mac, ctx=None, multi=False, first=True,
algorithm=default_algorithm):
"""Return a (tsig_rdata, mac, ctx) tuple containing the HMAC TSIG rdata
for the input parameters, the HMAC MAC calculated by applying the
TSIG signature algorithm, and the TSIG digest context.
@rtype: (string, string, hmac.HMAC object)
@raises ValueError: I{other_data} is too long
@raises NotImplementedError: I{algorithm} is not supported
"""
(algorithm_name, digestmod) = get_algorithm(algorithm)
if first:
ctx = hmac.new(secret, digestmod=digestmod)
ml = len(request_mac)
if ml > 0:
ctx.update(struct.pack('!H', ml))
ctx.update(request_mac)
id = struct.pack('!H', original_id)
ctx.update(id)
ctx.update(wire[2:])
if first:
ctx.update(keyname.to_digestable())
ctx.update(struct.pack('!H', dns.rdataclass.ANY))
ctx.update(struct.pack('!I', 0))
long_time = time + 0L
upper_time = (long_time >> 32) & 0xffffL
lower_time = long_time & 0xffffffffL
time_mac = struct.pack('!HIH', upper_time, lower_time, fudge)
pre_mac = algorithm_name + time_mac
ol = len(other_data)
if ol > 65535:
raise ValueError('TSIG Other Data is > 65535 bytes')
post_mac = struct.pack('!HH', error, ol) + other_data
if first:
ctx.update(pre_mac)
ctx.update(post_mac)
else:
ctx.update(time_mac)
mac = ctx.digest()
mpack = struct.pack('!H', len(mac))
tsig_rdata = pre_mac + mpack + mac + id + post_mac
if multi:
ctx = hmac.new(secret)
ml = len(mac)
ctx.update(struct.pack('!H', ml))
ctx.update(mac)
else:
ctx = None
return (tsig_rdata, mac, ctx)
def hmac_md5(wire, keyname, secret, time, fudge, original_id, error,
other_data, request_mac, ctx=None, multi=False, first=True,
algorithm=default_algorithm):
return sign(wire, keyname, secret, time, fudge, original_id, error,
other_data, request_mac, ctx, multi, first, algorithm)
def validate(wire, keyname, secret, now, request_mac, tsig_start, tsig_rdata,
tsig_rdlen, ctx=None, multi=False, first=True):
"""Validate the specified TSIG rdata against the other input parameters.
@raises FormError: The TSIG is badly formed.
@raises BadTime: There is too much time skew between the client and the
server.
@raises BadSignature: The TSIG signature did not validate
@rtype: hmac.HMAC object"""
(adcount,) = struct.unpack("!H", wire[10:12])
if adcount == 0:
raise dns.exception.FormError
adcount -= 1
new_wire = wire[0:10] + struct.pack("!H", adcount) + wire[12:tsig_start]
current = tsig_rdata
(aname, used) = dns.name.from_wire(wire, current)
current = current + used
(upper_time, lower_time, fudge, mac_size) = \
struct.unpack("!HIHH", wire[current:current + 10])
time = ((upper_time + 0L) << 32) + (lower_time + 0L)
current += 10
mac = wire[current:current + mac_size]
current += mac_size
(original_id, error, other_size) = \
struct.unpack("!HHH", wire[current:current + 6])
current += 6
other_data = wire[current:current + other_size]
current += other_size
if current != tsig_rdata + tsig_rdlen:
raise dns.exception.FormError
if error != 0:
if error == BADSIG:
raise PeerBadSignature
elif error == BADKEY:
raise PeerBadKey
elif error == BADTIME:
raise PeerBadTime
elif error == BADTRUNC:
raise PeerBadTruncation
else:
raise PeerError('unknown TSIG error code %d' % error)
time_low = time - fudge
time_high = time + fudge
if now < time_low or now > time_high:
raise BadTime
(junk, our_mac, ctx) = sign(new_wire, keyname, secret, time, fudge,
original_id, error, other_data,
request_mac, ctx, multi, first, aname)
if (our_mac != mac):
raise BadSignature
return ctx
_hashes = None
def _maybe_add_hash(tsig_alg, hash_alg):
try:
_hashes[tsig_alg] = dns.hash.get(hash_alg)
except KeyError:
pass
def _setup_hashes():
global _hashes
_hashes = {}
_maybe_add_hash(HMAC_SHA224, 'SHA224')
_maybe_add_hash(HMAC_SHA256, 'SHA256')
_maybe_add_hash(HMAC_SHA384, 'SHA384')
_maybe_add_hash(HMAC_SHA512, 'SHA512')
_maybe_add_hash(HMAC_SHA1, 'SHA1')
_maybe_add_hash(HMAC_MD5, 'MD5')
def get_algorithm(algorithm):
"""Returns the wire format string and the hash module to use for the
specified TSIG algorithm
@rtype: (string, hash constructor)
@raises NotImplementedError: I{algorithm} is not supported
"""
global _hashes
if _hashes is None:
_setup_hashes()
if isinstance(algorithm, (str, unicode)):
algorithm = dns.name.from_text(algorithm)
if sys.hexversion < 0x02050200 and \
(algorithm == HMAC_SHA384 or algorithm == HMAC_SHA512):
raise NotImplementedError("TSIG algorithm " + str(algorithm) +
" requires Python 2.5.2 or later")
try:
return (algorithm.to_digestable(), _hashes[algorithm])
except KeyError:
raise NotImplementedError("TSIG algorithm " + str(algorithm) +
" is not supported")
| gpl-2.0 |
biskett/mic | mic/utils/fs_related.py | 5 | 31322 | #!/usr/bin/python -tt
#
# Copyright (c) 2007, Red Hat, Inc.
# Copyright (c) 2009, 2010, 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from __future__ import with_statement
import os
import sys
import errno
import stat
import random
import string
import time
import uuid
from mic import msger
from mic.utils import runner
from mic.utils.errors import *
def find_binary_inchroot(binary, chroot):
paths = ["/usr/sbin",
"/usr/bin",
"/sbin",
"/bin"
]
for path in paths:
bin_path = "%s/%s" % (path, binary)
if os.path.exists("%s/%s" % (chroot, bin_path)):
return bin_path
return None
def find_binary_path(binary):
if os.environ.has_key("PATH"):
paths = os.environ["PATH"].split(":")
else:
paths = []
if os.environ.has_key("HOME"):
paths += [os.environ["HOME"] + "/bin"]
paths += ["/usr/local/sbin", "/usr/local/bin", "/usr/sbin", "/usr/bin", "/sbin", "/bin"]
for path in paths:
bin_path = "%s/%s" % (path, binary)
if os.path.exists(bin_path):
return bin_path
raise CreatorError("Command '%s' is not available." % binary)
def makedirs(dirname):
"""A version of os.makedirs() that doesn't throw an
exception if the leaf directory already exists.
"""
try:
os.makedirs(dirname)
except OSError, err:
if err.errno != errno.EEXIST:
raise
def mksquashfs(in_img, out_img):
fullpathmksquashfs = find_binary_path("mksquashfs")
args = [fullpathmksquashfs, in_img, out_img]
if not sys.stdout.isatty():
args.append("-no-progress")
ret = runner.show(args)
if ret != 0:
raise SquashfsError("'%s' exited with error (%d)" % (' '.join(args), ret))
def resize2fs(fs, size):
resize2fs = find_binary_path("resize2fs")
if size == 0:
# it means to minimalize it
return runner.show([resize2fs, '-M', fs])
else:
return runner.show([resize2fs, fs, "%sK" % (size / 1024,)])
class BindChrootMount:
"""Represents a bind mount of a directory into a chroot."""
def __init__(self, src, chroot, dest = None, option = None):
self.root = os.path.abspath(os.path.expanduser(chroot))
self.mount_option = option
self.orig_src = self.src = src
if os.path.islink(src):
self.src = os.readlink(src)
if not self.src.startswith('/'):
self.src = os.path.abspath(os.path.join(os.path.dirname(src),
self.src))
if not dest:
dest = self.src
self.dest = os.path.join(self.root, dest.lstrip('/'))
self.mounted = False
self.mountcmd = find_binary_path("mount")
self.umountcmd = find_binary_path("umount")
def ismounted(self):
with open('/proc/mounts') as f:
for line in f:
if line.split()[1] == os.path.abspath(self.dest):
return True
return False
def mount(self):
if self.mounted or self.ismounted():
return
try:
makedirs(self.dest)
except OSError, err:
if err.errno == errno.ENOSPC:
msger.warning("No space left on device '%s'" % err.filename)
return
if self.mount_option:
cmdline = [self.mountcmd, "--bind", "-o", "%s" % \
self.mount_option, self.src, self.dest]
else:
cmdline = [self.mountcmd, "--bind", self.src, self.dest]
rc, errout = runner.runtool(cmdline, catch=2)
if rc != 0:
raise MountError("Bind-mounting '%s' to '%s' failed: %s" %
(self.src, self.dest, errout))
self.mounted = True
if os.path.islink(self.orig_src):
dest = os.path.join(self.root, self.orig_src.lstrip('/'))
if not os.path.exists(dest):
os.symlink(self.src, dest)
def unmount(self):
if self.mounted or self.ismounted():
runner.show([self.umountcmd, "-l", self.dest])
self.mounted = False
class LoopbackMount:
"""LoopbackMount compatibility layer for old API"""
def __init__(self, lofile, mountdir, fstype = None):
self.diskmount = DiskMount(LoopbackDisk(lofile,size = 0),mountdir,fstype,rmmountdir = True)
self.losetup = False
self.losetupcmd = find_binary_path("losetup")
def cleanup(self):
self.diskmount.cleanup()
def unmount(self):
self.diskmount.unmount()
def lounsetup(self):
if self.losetup:
runner.show([self.losetupcmd, "-d", self.loopdev])
self.losetup = False
self.loopdev = None
def loopsetup(self):
if self.losetup:
return
self.loopdev = get_loop_device(self.losetupcmd, self.lofile)
self.losetup = True
def mount(self):
self.diskmount.mount()
class SparseLoopbackMount(LoopbackMount):
"""SparseLoopbackMount compatibility layer for old API"""
def __init__(self, lofile, mountdir, size, fstype = None):
self.diskmount = DiskMount(SparseLoopbackDisk(lofile,size),mountdir,fstype,rmmountdir = True)
def expand(self, create = False, size = None):
self.diskmount.disk.expand(create, size)
def truncate(self, size = None):
self.diskmount.disk.truncate(size)
def create(self):
self.diskmount.disk.create()
class SparseExtLoopbackMount(SparseLoopbackMount):
"""SparseExtLoopbackMount compatibility layer for old API"""
def __init__(self, lofile, mountdir, size, fstype, blocksize, fslabel):
self.diskmount = ExtDiskMount(SparseLoopbackDisk(lofile,size), mountdir, fstype, blocksize, fslabel, rmmountdir = True)
def __format_filesystem(self):
self.diskmount.__format_filesystem()
def create(self):
self.diskmount.disk.create()
def resize(self, size = None):
return self.diskmount.__resize_filesystem(size)
def mount(self):
self.diskmount.mount()
def __fsck(self):
self.extdiskmount.__fsck()
def __get_size_from_filesystem(self):
return self.diskmount.__get_size_from_filesystem()
def __resize_to_minimal(self):
return self.diskmount.__resize_to_minimal()
def resparse(self, size = None):
return self.diskmount.resparse(size)
class Disk:
"""Generic base object for a disk
The 'create' method must make the disk visible as a block device - eg
by calling losetup. For RawDisk, this is obviously a no-op. The 'cleanup'
method must undo the 'create' operation.
"""
def __init__(self, size, device = None):
self._device = device
self._size = size
def create(self):
pass
def cleanup(self):
pass
def get_device(self):
return self._device
def set_device(self, path):
self._device = path
device = property(get_device, set_device)
def get_size(self):
return self._size
size = property(get_size)
class RawDisk(Disk):
"""A Disk backed by a block device.
Note that create() is a no-op.
"""
def __init__(self, size, device):
Disk.__init__(self, size, device)
def fixed(self):
return True
def exists(self):
return True
class LoopbackDisk(Disk):
"""A Disk backed by a file via the loop module."""
def __init__(self, lofile, size):
Disk.__init__(self, size)
self.lofile = lofile
self.losetupcmd = find_binary_path("losetup")
def fixed(self):
return False
def exists(self):
return os.path.exists(self.lofile)
def create(self):
if self.device is not None:
return
self.device = get_loop_device(self.losetupcmd, self.lofile)
def cleanup(self):
if self.device is None:
return
msger.debug("Losetup remove %s" % self.device)
rc = runner.show([self.losetupcmd, "-d", self.device])
self.device = None
class SparseLoopbackDisk(LoopbackDisk):
"""A Disk backed by a sparse file via the loop module."""
def __init__(self, lofile, size):
LoopbackDisk.__init__(self, lofile, size)
def expand(self, create = False, size = None):
flags = os.O_WRONLY
if create:
flags |= os.O_CREAT
if not os.path.exists(self.lofile):
makedirs(os.path.dirname(self.lofile))
if size is None:
size = self.size
msger.debug("Extending sparse file %s to %d" % (self.lofile, size))
if create:
fd = os.open(self.lofile, flags, 0644)
else:
fd = os.open(self.lofile, flags)
if size <= 0:
size = 1
try:
os.ftruncate(fd, size)
except:
# may be limited by 2G in 32bit env
os.ftruncate(fd, 2**31L)
os.close(fd)
def truncate(self, size = None):
if size is None:
size = self.size
msger.debug("Truncating sparse file %s to %d" % (self.lofile, size))
fd = os.open(self.lofile, os.O_WRONLY)
os.ftruncate(fd, size)
os.close(fd)
def create(self):
self.expand(create = True)
LoopbackDisk.create(self)
class Mount:
"""A generic base class to deal with mounting things."""
def __init__(self, mountdir):
self.mountdir = mountdir
def cleanup(self):
self.unmount()
def mount(self, options = None):
pass
def unmount(self):
pass
class DiskMount(Mount):
"""A Mount object that handles mounting of a Disk."""
def __init__(self, disk, mountdir, fstype = None, rmmountdir = True):
Mount.__init__(self, mountdir)
self.disk = disk
self.fstype = fstype
self.rmmountdir = rmmountdir
self.mounted = False
self.rmdir = False
if fstype:
self.mkfscmd = find_binary_path("mkfs." + self.fstype)
else:
self.mkfscmd = None
self.mountcmd = find_binary_path("mount")
self.umountcmd = find_binary_path("umount")
def cleanup(self):
Mount.cleanup(self)
self.disk.cleanup()
def unmount(self):
if self.mounted:
msger.debug("Unmounting directory %s" % self.mountdir)
runner.quiet('sync') # sync the data on this mount point
rc = runner.show([self.umountcmd, "-l", self.mountdir])
if rc == 0:
self.mounted = False
else:
raise MountError("Failed to umount %s" % self.mountdir)
if self.rmdir and not self.mounted:
try:
os.rmdir(self.mountdir)
except OSError, e:
pass
self.rmdir = False
def __create(self):
self.disk.create()
def mount(self, options = None):
if self.mounted:
return
if not os.path.isdir(self.mountdir):
msger.debug("Creating mount point %s" % self.mountdir)
os.makedirs(self.mountdir)
self.rmdir = self.rmmountdir
self.__create()
msger.debug("Mounting %s at %s" % (self.disk.device, self.mountdir))
if options:
args = [ self.mountcmd, "-o", options, self.disk.device, self.mountdir ]
else:
args = [ self.mountcmd, self.disk.device, self.mountdir ]
if self.fstype:
args.extend(["-t", self.fstype])
rc = runner.show(args)
if rc != 0:
raise MountError("Failed to mount '%s' to '%s' with command '%s'. Retval: %s" %
(self.disk.device, self.mountdir, " ".join(args), rc))
self.mounted = True
class ExtDiskMount(DiskMount):
"""A DiskMount object that is able to format/resize ext[23] filesystems."""
def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None, fsuuid=None):
DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir)
self.blocksize = blocksize
self.fslabel = fslabel.replace("/", "")
self.uuid = fsuuid or str(uuid.uuid4())
self.skipformat = skipformat
self.fsopts = fsopts
self.extopts = None
self.dumpe2fs = find_binary_path("dumpe2fs")
self.tune2fs = find_binary_path("tune2fs")
def __parse_field(self, output, field):
for line in output.split("\n"):
if line.startswith(field + ":"):
return line[len(field) + 1:].strip()
raise KeyError("Failed to find field '%s' in output" % field)
def __format_filesystem(self):
if self.skipformat:
msger.debug("Skip filesystem format.")
return
msger.verbose("Formating %s filesystem on %s" % (self.fstype, self.disk.device))
cmdlist = [self.mkfscmd, "-F", "-L", self.fslabel, "-m", "1", "-b",
str(self.blocksize), "-U", self.uuid]
if self.extopts:
cmdlist.extend(self.extopts.split())
cmdlist.extend([self.disk.device])
rc, errout = runner.runtool(cmdlist, catch=2)
if rc != 0:
raise MountError("Error creating %s filesystem on disk %s:\n%s" %
(self.fstype, self.disk.device, errout))
if not self.extopts:
msger.debug("Tuning filesystem on %s" % self.disk.device)
runner.show([self.tune2fs, "-c0", "-i0", "-Odir_index", "-ouser_xattr,acl", self.disk.device])
def __resize_filesystem(self, size = None):
msger.info("Resizing filesystem ...")
current_size = os.stat(self.disk.lofile)[stat.ST_SIZE]
if size is None:
size = self.disk.size
if size == current_size:
return
if size > current_size:
self.disk.expand(size)
self.__fsck()
resize2fs(self.disk.lofile, size)
return size
def __create(self):
resize = False
if not self.disk.fixed() and self.disk.exists():
resize = True
self.disk.create()
if resize:
self.__resize_filesystem()
else:
self.__format_filesystem()
def mount(self, options = None):
self.__create()
DiskMount.mount(self, options)
def __fsck(self):
msger.info("Checking filesystem %s" % self.disk.lofile)
runner.quiet(["/sbin/e2fsck", "-f", "-y", self.disk.lofile])
def __get_size_from_filesystem(self):
return int(self.__parse_field(runner.outs([self.dumpe2fs, '-h', self.disk.lofile]),
"Block count")) * self.blocksize
def __resize_to_minimal(self):
msger.info("Resizing filesystem to minimal ...")
self.__fsck()
#
# Use a binary search to find the minimal size
# we can resize the image to
#
bot = 0
top = self.__get_size_from_filesystem()
while top != (bot + 1):
t = bot + ((top - bot) / 2)
if not resize2fs(self.disk.lofile, t):
top = t
else:
bot = t
return top
def resparse(self, size = None):
self.cleanup()
if size == 0:
minsize = 0
else:
minsize = self.__resize_to_minimal()
self.disk.truncate(minsize)
self.__resize_filesystem(size)
return minsize
class VfatDiskMount(DiskMount):
"""A DiskMount object that is able to format vfat/msdos filesystems."""
def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None, fsuuid = None):
DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir)
self.blocksize = blocksize
self.fslabel = fslabel.replace("/", "")
rand1 = random.randint(0, 2**16 - 1)
rand2 = random.randint(0, 2**16 - 1)
self.uuid = fsuuid or "%04X-%04X" % (rand1, rand2)
self.skipformat = skipformat
self.fsopts = fsopts
self.fsckcmd = find_binary_path("fsck." + self.fstype)
def __format_filesystem(self):
if self.skipformat:
msger.debug("Skip filesystem format.")
return
msger.verbose("Formating %s filesystem on %s" % (self.fstype, self.disk.device))
rc = runner.show([self.mkfscmd, "-n", self.fslabel,
"-i", self.uuid.replace("-", ""), self.disk.device])
if rc != 0:
raise MountError("Error creating %s filesystem on disk %s" % (self.fstype,self.disk.device))
msger.verbose("Tuning filesystem on %s" % self.disk.device)
def __resize_filesystem(self, size = None):
current_size = os.stat(self.disk.lofile)[stat.ST_SIZE]
if size is None:
size = self.disk.size
if size == current_size:
return
if size > current_size:
self.disk.expand(size)
self.__fsck()
#resize2fs(self.disk.lofile, size)
return size
def __create(self):
resize = False
if not self.disk.fixed() and self.disk.exists():
resize = True
self.disk.create()
if resize:
self.__resize_filesystem()
else:
self.__format_filesystem()
def mount(self, options = None):
self.__create()
DiskMount.mount(self, options)
def __fsck(self):
msger.debug("Checking filesystem %s" % self.disk.lofile)
runner.show([self.fsckcmd, "-y", self.disk.lofile])
def __get_size_from_filesystem(self):
return self.disk.size
def __resize_to_minimal(self):
self.__fsck()
#
# Use a binary search to find the minimal size
# we can resize the image to
#
bot = 0
top = self.__get_size_from_filesystem()
return top
def resparse(self, size = None):
self.cleanup()
minsize = self.__resize_to_minimal()
self.disk.truncate(minsize)
self.__resize_filesystem(size)
return minsize
class BtrfsDiskMount(DiskMount):
"""A DiskMount object that is able to format/resize btrfs filesystems."""
def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None, fsuuid = None):
self.__check_btrfs()
DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir)
self.blocksize = blocksize
self.fslabel = fslabel.replace("/", "")
self.uuid = fsuuid or None
self.skipformat = skipformat
self.fsopts = fsopts
self.blkidcmd = find_binary_path("blkid")
self.btrfsckcmd = find_binary_path("btrfsck")
def __check_btrfs(self):
found = False
""" Need to load btrfs module to mount it """
load_module("btrfs")
for line in open("/proc/filesystems").xreadlines():
if line.find("btrfs") > -1:
found = True
break
if not found:
raise MountError("Your system can't mount btrfs filesystem, please make sure your kernel has btrfs support and the module btrfs.ko has been loaded.")
# disable selinux, selinux will block write
if os.path.exists("/usr/sbin/setenforce"):
runner.show(["/usr/sbin/setenforce", "0"])
def __parse_field(self, output, field):
for line in output.split(" "):
if line.startswith(field + "="):
return line[len(field) + 1:].strip().replace("\"", "")
raise KeyError("Failed to find field '%s' in output" % field)
def __format_filesystem(self):
if self.skipformat:
msger.debug("Skip filesystem format.")
return
msger.verbose("Formating %s filesystem on %s" % (self.fstype, self.disk.device))
rc = runner.show([self.mkfscmd, "-L", self.fslabel, self.disk.device])
if rc != 0:
raise MountError("Error creating %s filesystem on disk %s" % (self.fstype,self.disk.device))
self.uuid = self.__parse_field(runner.outs([self.blkidcmd, self.disk.device]), "UUID")
def __resize_filesystem(self, size = None):
current_size = os.stat(self.disk.lofile)[stat.ST_SIZE]
if size is None:
size = self.disk.size
if size == current_size:
return
if size > current_size:
self.disk.expand(size)
self.__fsck()
return size
def __create(self):
resize = False
if not self.disk.fixed() and self.disk.exists():
resize = True
self.disk.create()
if resize:
self.__resize_filesystem()
else:
self.__format_filesystem()
def mount(self, options = None):
self.__create()
DiskMount.mount(self, options)
def __fsck(self):
msger.debug("Checking filesystem %s" % self.disk.lofile)
runner.quiet([self.btrfsckcmd, self.disk.lofile])
def __get_size_from_filesystem(self):
return self.disk.size
def __resize_to_minimal(self):
self.__fsck()
return self.__get_size_from_filesystem()
def resparse(self, size = None):
self.cleanup()
minsize = self.__resize_to_minimal()
self.disk.truncate(minsize)
self.__resize_filesystem(size)
return minsize
class DeviceMapperSnapshot(object):
def __init__(self, imgloop, cowloop):
self.imgloop = imgloop
self.cowloop = cowloop
self.__created = False
self.__name = None
self.dmsetupcmd = find_binary_path("dmsetup")
"""Load dm_snapshot if it isn't loaded"""
load_module("dm_snapshot")
def get_path(self):
if self.__name is None:
return None
return os.path.join("/dev/mapper", self.__name)
path = property(get_path)
def create(self):
if self.__created:
return
self.imgloop.create()
self.cowloop.create()
self.__name = "imgcreate-%d-%d" % (os.getpid(),
random.randint(0, 2**16))
size = os.stat(self.imgloop.lofile)[stat.ST_SIZE]
table = "0 %d snapshot %s %s p 8" % (size / 512,
self.imgloop.device,
self.cowloop.device)
args = [self.dmsetupcmd, "create", self.__name, "--table", table]
if runner.show(args) != 0:
self.cowloop.cleanup()
self.imgloop.cleanup()
raise SnapshotError("Could not create snapshot device using: " + ' '.join(args))
self.__created = True
def remove(self, ignore_errors = False):
if not self.__created:
return
time.sleep(2)
rc = runner.show([self.dmsetupcmd, "remove", self.__name])
if not ignore_errors and rc != 0:
raise SnapshotError("Could not remove snapshot device")
self.__name = None
self.__created = False
self.cowloop.cleanup()
self.imgloop.cleanup()
def get_cow_used(self):
if not self.__created:
return 0
#
# dmsetup status on a snapshot returns e.g.
# "0 8388608 snapshot 416/1048576"
# or, more generally:
# "A B snapshot C/D"
# where C is the number of 512 byte sectors in use
#
out = runner.outs([self.dmsetupcmd, "status", self.__name])
try:
return int((out.split()[3]).split('/')[0]) * 512
except ValueError:
raise SnapshotError("Failed to parse dmsetup status: " + out)
def create_image_minimizer(path, image, minimal_size):
"""
Builds a copy-on-write image which can be used to
create a device-mapper snapshot of an image where
the image's filesystem is as small as possible
The steps taken are:
1) Create a sparse COW
2) Loopback mount the image and the COW
3) Create a device-mapper snapshot of the image
using the COW
4) Resize the filesystem to the minimal size
5) Determine the amount of space used in the COW
6) Restroy the device-mapper snapshot
7) Truncate the COW, removing unused space
8) Create a squashfs of the COW
"""
imgloop = LoopbackDisk(image, None) # Passing bogus size - doesn't matter
cowloop = SparseLoopbackDisk(os.path.join(os.path.dirname(path), "osmin"),
64L * 1024L * 1024L)
snapshot = DeviceMapperSnapshot(imgloop, cowloop)
try:
snapshot.create()
resize2fs(snapshot.path, minimal_size)
cow_used = snapshot.get_cow_used()
finally:
snapshot.remove(ignore_errors = (not sys.exc_info()[0] is None))
cowloop.truncate(cow_used)
mksquashfs(cowloop.lofile, path)
os.unlink(cowloop.lofile)
def load_module(module):
found = False
for line in open('/proc/modules').xreadlines():
if line.startswith("%s " % module):
found = True
break
if not found:
msger.info("Loading %s..." % module)
runner.quiet(['modprobe', module])
class LoopDevice(object):
def __init__(self, loopid=None):
self.device = None
self.loopid = loopid
self.created = False
self.kpartxcmd = find_binary_path("kpartx")
self.losetupcmd = find_binary_path("losetup")
def register(self, device):
self.device = device
self.loopid = None
self.created = True
def reg_atexit(self):
import atexit
atexit.register(self.close)
def _genloopid(self):
import glob
if not glob.glob("/dev/loop[0-9]*"):
return 10
fint = lambda x: x[9:].isdigit() and int(x[9:]) or 0
maxid = 1 + max(filter(lambda x: x<100,
map(fint, glob.glob("/dev/loop[0-9]*"))))
if maxid < 10: maxid = 10
if maxid >= 100: raise
return maxid
def _kpseek(self, device):
rc, out = runner.runtool([self.kpartxcmd, '-l', '-v', device])
if rc != 0:
raise MountError("Can't query dm snapshot on %s" % device)
for line in out.splitlines():
if line and line.startswith("loop"):
return True
return False
def _loseek(self, device):
import re
rc, out = runner.runtool([self.losetupcmd, '-a'])
if rc != 0:
raise MountError("Failed to run 'losetup -a'")
for line in out.splitlines():
m = re.match("([^:]+): .*", line)
if m and m.group(1) == device:
return True
return False
def create(self):
if not self.created:
if not self.loopid:
self.loopid = self._genloopid()
self.device = "/dev/loop%d" % self.loopid
if os.path.exists(self.device):
if self._loseek(self.device):
raise MountError("Device busy: %s" % self.device)
else:
self.created = True
return
mknod = find_binary_path('mknod')
rc = runner.show([mknod, '-m664', self.device, 'b', '7', str(self.loopid)])
if rc != 0:
raise MountError("Failed to create device %s" % self.device)
else:
self.created = True
def close(self):
if self.created:
try:
self.cleanup()
self.device = None
except MountError, e:
raise CreatorError("%s" % e)
def cleanup(self):
if self.device is None:
return
if self._kpseek(self.device):
runner.quiet([self.kpartxcmd, "-d", self.device])
if self._loseek(self.device):
runner.quiet([self.losetupcmd, "-d", self.device])
# FIXME: should sleep a while between two loseek
if self._loseek(self.device):
msger.warning("Can't cleanup loop device %s" % self.device)
elif self.loopid:
os.unlink(self.device)
DEVICE_PIDFILE_DIR = "/var/tmp/mic/device"
DEVICE_LOCKFILE = "/var/lock/__mic_loopdev.lock"
def get_loop_device(losetupcmd, lofile):
global DEVICE_PIDFILE_DIR
global DEVICE_LOCKFILE
import fcntl
makedirs(os.path.dirname(DEVICE_LOCKFILE))
fp = open(DEVICE_LOCKFILE, 'w')
fcntl.flock(fp, fcntl.LOCK_EX)
try:
loopdev = None
devinst = LoopDevice()
# clean up left loop device first
clean_loop_devices()
# provide an avaible loop device
rc, out = runner.runtool([losetupcmd, "--find"])
if rc == 0 and out:
loopdev = out.split()[0]
devinst.register(loopdev)
if not loopdev or not os.path.exists(loopdev):
devinst.create()
loopdev = devinst.device
# setup a loop device for image file
rc = runner.show([losetupcmd, loopdev, lofile])
if rc != 0:
raise MountError("Failed to setup loop device for '%s'" % lofile)
devinst.reg_atexit()
# try to save device and pid
makedirs(DEVICE_PIDFILE_DIR)
pidfile = os.path.join(DEVICE_PIDFILE_DIR, os.path.basename(loopdev))
if os.path.exists(pidfile):
os.unlink(pidfile)
with open(pidfile, 'w') as wf:
wf.write(str(os.getpid()))
except MountError, err:
raise CreatorError("%s" % str(err))
except:
raise
finally:
try:
fcntl.flock(fp, fcntl.LOCK_UN)
fp.close()
os.unlink(DEVICE_LOCKFILE)
except:
pass
return loopdev
def clean_loop_devices(piddir=DEVICE_PIDFILE_DIR):
if not os.path.exists(piddir) or not os.path.isdir(piddir):
return
for loopdev in os.listdir(piddir):
pidfile = os.path.join(piddir, loopdev)
try:
with open(pidfile, 'r') as rf:
devpid = int(rf.read())
except:
devpid = None
# if the process using this device is alive, skip it
if not devpid or os.path.exists(os.path.join('/proc', str(devpid))):
continue
# try to clean it up
try:
devinst = LoopDevice()
devinst.register(os.path.join('/dev', loopdev))
devinst.cleanup()
os.unlink(pidfile)
except:
pass
| gpl-2.0 |
intel-analytics/analytics-zoo | pyzoo/test/zoo/common/test_util.py | 1 | 1233 | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from bigdl.util.common import get_node_and_core_number
from test.zoo.pipeline.utils.test_utils import ZooTestCase
from zoo.common import set_core_number
class TestUtil(ZooTestCase):
def test_set_core_num(self):
_, core_num = get_node_and_core_number()
set_core_number(core_num + 1)
_, new_core_num = get_node_and_core_number()
assert new_core_num == core_num + 1, \
"set_core_num failed, set the core" \
" number to be {} but got {}".format(core_num + 1, new_core_num)
set_core_number(core_num)
if __name__ == "__main__":
pytest.main([__file__])
| apache-2.0 |
sffjunkie/home-assistant | tests/components/binary_sensor/test_tcp.py | 17 | 2422 | """The tests for the TCP binary sensor platform."""
from copy import copy
from unittest.mock import patch, Mock
from homeassistant.components.sensor import tcp
from homeassistant.components.binary_sensor import tcp as bin_tcp
from tests.common import get_test_home_assistant
from tests.components.sensor import test_tcp
@patch('homeassistant.components.sensor.tcp.Sensor.update')
def test_setup_platform_valid_config(mock_update):
"""Should check the supplied config and call add_entities with Sensor."""
add_entities = Mock()
ret = bin_tcp.setup_platform(None, test_tcp.TEST_CONFIG, add_entities)
assert ret is None, "setup_platform() should return None if successful."
assert add_entities.called
assert isinstance(add_entities.call_args[0][0][0], bin_tcp.BinarySensor)
def test_setup_platform_invalid_config():
"""Should check the supplied config and return False if it is invalid."""
config = copy(test_tcp.TEST_CONFIG)
del config[tcp.CONF_HOST]
assert bin_tcp.setup_platform(None, config, None) is False
class TestTCPBinarySensor():
"""Test the TCP Binary Sensor."""
def setup_class(cls):
"""Setup things to be run when tests are started."""
cls.hass = get_test_home_assistant()
def teardown_class(cls):
"""Stop down everything that was started."""
cls.hass.stop()
def test_requires_additional_values(self):
"""Should require the additional config values specified."""
config = copy(test_tcp.TEST_CONFIG)
for key in bin_tcp.BinarySensor.required:
del config[key]
assert len(config) != len(test_tcp.TEST_CONFIG)
assert not bin_tcp.BinarySensor.validate_config(config)
@patch('homeassistant.components.sensor.tcp.Sensor.update')
def test_is_on_true(self, mock_update):
"""Should return True if _state is the same as value_on."""
sensor = bin_tcp.BinarySensor(self.hass, test_tcp.TEST_CONFIG)
sensor._state = test_tcp.TEST_CONFIG[tcp.CONF_VALUE_ON]
assert sensor.is_on
@patch('homeassistant.components.sensor.tcp.Sensor.update')
def test_is_on_false(self, mock_update):
"""Should return False if _state is not the same as value_on."""
sensor = bin_tcp.BinarySensor(self.hass, test_tcp.TEST_CONFIG)
sensor._state = "%s abc" % test_tcp.TEST_CONFIG[tcp.CONF_VALUE_ON]
assert not sensor.is_on
| mit |
Mitchkoens/sympy | bin/mailmap_update.py | 12 | 3323 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A tool to help keep .mailmap and AUTHORS up-to-date.
"""
# TODO:
# - Check doc/src/aboutus.rst
# - Make it easier to update .mailmap or AUTHORS with the correct entries.
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
from fabric.api import local, env
from fabric.colors import yellow, blue, green, red
from fabric.utils import error
mailmap_update_path = os.path.abspath(__file__)
mailmap_update_dir = os.path.dirname(mailmap_update_path)
sympy_top = os.path.split(mailmap_update_dir)[0]
sympy_dir = os.path.join(sympy_top, 'sympy')
if os.path.isdir(sympy_dir):
sys.path.insert(0, sympy_top)
from sympy.utilities.misc import filldedent
try:
# Only works in newer versions of fabric
env.colorize_errors = True
except AttributeError:
pass
git_command = 'git log --format="%aN <%aE>" | sort -u'
git_people = unicode(local(git_command, capture=True), 'utf-8').strip().split("\n")
from distutils.version import LooseVersion
git_ver = local('git --version', capture=True)[12:]
if LooseVersion(git_ver) < LooseVersion('1.8.4.2'):
print(yellow("Please use a newer git version >= 1.8.4.2"))
with open(os.path.realpath(os.path.join(__file__, os.path.pardir,
os.path.pardir, "AUTHORS"))) as fd:
AUTHORS = unicode(fd.read(), 'utf-8')
firstauthor = "Ondřej Čertík"
authors = AUTHORS[AUTHORS.find(firstauthor):].strip().split('\n')
# People who don't want to be listed in AUTHORS
authors_skip = ["Kirill Smelkov <kirr@landau.phys.spbu.ru>"]
predate_git = 0
exit1 = False
print(blue(filldedent("""Read the text at the top of AUTHORS and the text at
the top of .mailmap for information on how to fix the below errors. If
someone is missing from AUTHORS, add them where they would have been if they
were added after their first pull request was merged (checkout the merge
commit from the first pull request and see who is at the end of the AUTHORS
file at that commit.)""")))
print()
print(yellow("People who are in AUTHORS but not in git:"))
print()
for name in sorted(set(authors) - set(git_people)):
if name.startswith("*"):
# People who are in AUTHORS but predate git
predate_git += 1
continue
exit1 = True
print(name)
print()
print(yellow("People who are in git but not in AUTHORS:"))
print()
for name in sorted(set(git_people) - set(authors) - set(authors_skip)):
exit1 = True
print(name)
# + 1 because the last newline is stripped by strip()
authors_count = AUTHORS[AUTHORS.find(firstauthor):].strip().count("\n") + 1
adjusted_authors_count = (
authors_count
- predate_git
+ len(authors_skip)
)
git_count = len(git_people)
print()
print(yellow("There are {git_count} people in git, and {adjusted_authors_count} "
"(adjusted) people from AUTHORS".format(git_count=git_count,
adjusted_authors_count=adjusted_authors_count)))
if git_count != adjusted_authors_count:
error("These two numbers are not the same!")
else:
print()
print(green(filldedent("""Congratulations. The AUTHORS and .mailmap files
appear to be up to date. You should now verify that doc/src/aboutus has %s
people.""" % authors_count)))
if exit1:
print()
print(red("There were errors. Please fix them."))
sys.exit(1)
| bsd-3-clause |
tripplet/watch-cat | backend/LogHandler.py | 1 | 1234 | #!/usr/bin/env python
import webapp2
import jinja2
import os
from LogEntry import LogEntry
from WatchJob import WatchJob
from GermanTzInfo import GermanTzInfo
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
class LogHandler(webapp2.RequestHandler):
@staticmethod
def format_datetime(value):
german_time = GermanTzInfo()
if value is None:
return 'Never'
else:
return german_time.utc_to_local(value).strftime('%d.%m.%Y - %H:%M:%S')
@staticmethod
def add_breakchars(value):
return value.replace(':', ':<wbr/>').replace('.', '.<wbr/>')
def get(self, job_name):
template = jinja_environment.get_template('templates/log_template.htm')
job_id = WatchJob.all().filter('name =', job_name).get()
job_logs = LogEntry.all().filter('job =', job_id).order('-event_time').run(limit=100)
self.response.out.write(template.render(name=job_name, logging=job_logs))
jinja_environment.filters['format_datetime'] = LogHandler.format_datetime
jinja_environment.filters['add_breakchars'] = LogHandler.add_breakchars
app = webapp2.WSGIApplication([('/log/(\w+)', LogHandler)], debug=False)
| agpl-3.0 |
TheProjecter/google-apis-client-generator | src/googleapis/codegen/api.py | 4 | 52242 | #!/usr/bin/python2.7
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create an API definition by interpreting a discovery document.
This module interprets a discovery document to create a tree of classes which
represent the API structure in a way that is useful for generating a library.
For each discovery element (e.g. schemas, resources, methods, ...) there is
a class to represent it which is directly usable in the templates. The
instances of those classes are annotated with extra variables for use
in the template which are language specific.
The current way to make use of this class is to create a programming language
specific subclass of Api, which adds annotations and template variables
appropriate for that language.
TODO(user): Refactor this so that the API can be loaded first, then annotated.
"""
__author__ = 'aiuto@google.com (Tony Aiuto)'
import collections
import json
import logging
import operator
import urlparse
from googleapis.codegen import data_types
from googleapis.codegen import template_objects
from googleapis.codegen import utilities
from googleapis.codegen.utilities import convert_size
_ADDITIONAL_PROPERTIES = 'additionalProperties'
_DEFAULT_SERVICE_HOST = 'www.googleapis.com'
_DEFAULT_OWNER_DOMAIN = 'google.com'
_DEFAULT_OWNER_NAME = 'Google'
_RECOGNIZED_GOOGLE_DOMAINS = (
'google.com',
'googleapis.com',
'googleplex.com'
)
_LOGGER = logging.getLogger('codegen')
class ApiException(Exception):
"""The base class for all API parsing exceptions."""
def __init__(self, reason, def_dict=None):
"""Create an exception.
Args:
reason: (str) The human readable explanation of this exception.
def_dict: (dict) The discovery dictionary we failed on.
"""
super(ApiException, self).__init__()
self._reason = reason
self._def_dict = def_dict
def __str__(self):
if self._def_dict:
return '%s: %s' % (self._reason, self._def_dict)
return self._reason
class Api(template_objects.CodeObject):
"""An API definition.
This class holds a discovery centric definition of an API. It contains
members such as "resources" and "schemas" which relate directly to discovery
concepts. It defines several properties that can be used in code generation
templates:
name: The API name.
version: The API version.
versionNoDots: The API version with all '.' characters replaced with '_'.
This is typically used in class names.
versionNoDash: The API version with all '-' characters replaced with '_'.
This is typically used in file names where '-' has meaning.
authScopes: The list of the OAuth scopes used by this API.
dataWrapper: True if the API definition contains the 'dataWrapper' feature.
methods: The list of top level API methods.
models: The list of API data models, both from the schema section of
discovery and from anonymous objects defined in method definitions.
parameters: The list of global method parameters (applicable to all methods)
resources: The list of API resources
"""
def __init__(self, discovery_doc, language=None):
super(Api, self).__init__(discovery_doc, self,
wire_name=discovery_doc['name'])
name = self.values['name']
self._validator.ValidateApiName(name)
if name != 'freebase':
self._validator.ValidateApiVersion(self.values['version'])
canonical_name = self.values.get('canonicalName', name)
self._class_name = self.ToClassName(canonical_name, self)
# Guard against language implementor not taking care of spaces
self._class_name = self._class_name.replace(' ', '')
self._NormalizeOwnerInformation()
self._language = language
self._template_dir = None
self._surface_features = {}
self._schemas = {}
self._methods_by_name = {}
self._all_methods = []
self.SetTemplateValue('className', self._class_name)
self.SetTemplateValue('versionNoDots',
self.values['version'].replace('.', '_'))
self.SetTemplateValue('versionNoDash',
self.values['version'].replace('-', '_'))
self.SetTemplateValue('dataWrapper',
'dataWrapper' in discovery_doc.get('features', []))
self.values.setdefault('title', name)
if not self.values.get('revision'):
self.values['revision'] = 'snapshot'
self._NormalizeUrlComponents()
# Information for variant subtypes, a dictionary of the format:
#
# { 'wireName': {'discriminant': discriminant, 'value': value,
# 'schema': schema},
# ... }
#
# ... where wireName is the name of variant subtypes, discriminant
# the field name of the discriminant, value the discriminant value
# for this variant, and schema the base schema.
#
# This information cannot be stored in the referred schema at
# reading time because at the time we read it from the base
# schema, the referenced variant schemas may not yet be loaded. So
# we first store it here, and after all schemas have been loaded,
# update the schema template properties.
self._variant_info = {}
# Build data types and methods
self._SetupModules()
self.void_type = data_types.Void(self)
self._BuildSchemaDefinitions()
self._BuildResourceDefinitions()
self.SetTemplateValue('resources', self._resources)
# Make data models part of the api dictionary
self.SetTemplateValue('models', self.ModelClasses())
# Replace methods dict with Methods
self._top_level_methods = []
method_dict = self.values.get('methods') or {}
for name in sorted(method_dict):
self._top_level_methods.append(Method(self, name, method_dict[name]))
self.SetTemplateValue('methods', self._top_level_methods)
# Global parameters
self._parameters = []
param_dict = self.values.get('parameters') or {}
for name in sorted(param_dict):
parameter = Parameter(self, name, param_dict[name], self)
self._parameters.append(parameter)
if name == 'alt':
self.SetTemplateValue('alt', parameter)
self.SetTemplateValue('parameters', self._parameters)
# Auth scopes
self._authscopes = []
if (self.values.get('auth') and
self.values['auth'].get('oauth2') and
self.values['auth']['oauth2'].get('scopes')):
for value, auth_dict in sorted(
self.values['auth']['oauth2']['scopes'].iteritems()):
self._authscopes.append(AuthScope(self, value, auth_dict))
self.SetTemplateValue('authscopes', self._authscopes)
@property
def all_schemas(self):
"""The dictionary of all the schema objects found in the API."""
return self._schemas
def _SetupModules(self):
"""Compute and set the module(s) which this API belongs under."""
# The containing module is based on the owner information.
path = self.values.get('modulePath') or self.values.get('packagePath')
self._containing_module = template_objects.Module(
package_path=path,
owner_name=self.values.get('owner'),
owner_domain=self.values.get('ownerDomain'))
self.SetTemplateValue('containingModule', self._containing_module)
# The API is a child of the containing_module
base = self.values['name']
# TODO(user): Introduce a breaking change where we always prefer
# canonicalName.
if self.values.get('packagePath'):
base = self.values.get('canonicalName') or base
if self.values.get('version_module'):
base = '%s/%s' % (base, self.values['versionNoDots'])
self._module = template_objects.Module(package_path=base,
parent=self._containing_module)
self.SetTemplateValue('module', self._module)
# The default module for data models defined by this API.
self._model_module = template_objects.Module(package_path=None,
parent=self._module)
def _BuildResourceDefinitions(self):
"""Loop over the resources in the discovery doc and build definitions."""
self._resources = []
def_dict = self.values.get('resources') or {}
for name in sorted(def_dict):
resource = Resource(self, name, def_dict[name], parent=self)
self._resources.append(resource)
def _BuildSchemaDefinitions(self):
"""Loop over the schemas in the discovery doc and build definitions."""
schemas = self.values.get('schemas')
if schemas:
for name, def_dict in schemas.iteritems():
# Upgrade the string format schema to a dict.
if isinstance(def_dict, unicode):
def_dict = json.loads(def_dict)
self._schemas[name] = self.DataTypeFromJson(def_dict, name)
# Late bind info for variant types, and mark the discriminant
# field and value.
for name, info in self._variant_info.iteritems():
if name not in self._schemas:
# The error will be reported elsewhere
continue
schema = self._schemas[name]
for prop in schema.values.get('properties'):
if prop.values['wireName'] == info['discriminant']:
# Filter out the discriminant property as it is already
# contained in the base type.
schema.SetTemplateValue(
'properties',
[p for p in schema.values.get('properties') if p != prop])
break
else:
logging.warn("Variant schema '%s' for base schema '%s' "
"has not the expected discriminant property '%s'.",
name, info['schema'].values['wireName'],
info['discriminant'])
schema.SetTemplateValue('superClass', info['schema'].class_name)
# TODO(user): baseType is for backwards compatability only. It should
# have always been a different name. When the old Java generators roll
# off, remove it.
schema.SetTemplateValue('baseType', info['schema'].class_name)
schema.SetTemplateValue('discriminantValue', info['value'])
def _NormalizeOwnerInformation(self):
"""Ensure that owner and ownerDomain are set to sane values."""
owner_domain = self.get('ownerDomain', '')
if not owner_domain:
root_url = self.get('rootUrl')
if root_url:
owner_domain = urlparse.urlparse(root_url).hostname
# Normalize google domains.
if any(owner_domain.endswith(d) for d in _RECOGNIZED_GOOGLE_DOMAINS):
owner_domain = 'google.com'
if owner_domain:
owner_domain = utilities.SanitizeDomain(owner_domain)
else:
owner_domain = _DEFAULT_OWNER_DOMAIN
self.SetTemplateValue('ownerDomain', owner_domain)
if not self.get('ownerName'):
if owner_domain == _DEFAULT_OWNER_DOMAIN:
owner_name = _DEFAULT_OWNER_NAME
else:
owner_name = owner_domain.replace('.', '_')
self.SetTemplateValue('ownerName', owner_name)
if not self.get('owner'):
self.SetTemplateValue('owner', self['ownerName'].lower())
def _NormalizeUrlComponents(self):
"""Sets template values concerning the path to the service.
Sets baseUrl, basePath and serviceHost from the values given or defaults
based on what is available. Verifies them for safeness. The hierarchy of
the possible inputs is:
use rootUrl + servicePath as the best choice if it exists (v1new)
or use baseUrl (v1)
or use basePath (v1)
or restBasePath (v0.3)
or default to 'api/version'
Raises:
ValueError: if the values available are inconsistent or disallowed.
"""
# If both rootUrl and servicePath exist, they equal what is in baseUrl.
root_url = self.values.get('rootUrl')
service_path = self.values.get('servicePath')
if root_url:
# TODO(user): Revert to 'if not service_path' once oauth2 is fixed.
if service_path is None:
raise ValueError('servicePath is not defined')
base_url = root_url + service_path
else:
base_url = self.values.get('baseUrl')
# If we have a full path ('https://superman.appspot.com/kryptonite/hurts'),
# then go with that, otherwise just use the various things which might
# hint at the servicePath.
best_path = (base_url
or self.values.get('basePath')
or self.values.get('restBasePath')
or '/%s/%s/' % (self.values['name'], self.values['version']))
if best_path.find('..') >= 0:
raise ValueError('api path must not contain ".." (%s)' % best_path)
# And let urlparse to the grunt work of normalizing and parsing.
url_parts = urlparse.urlparse(best_path)
scheme = url_parts.scheme or 'https'
service_host = url_parts.netloc or _DEFAULT_SERVICE_HOST
base_path = url_parts.path
# TODO(user): Replace use of basePath & serviceHost in templates with
# rootUrl and servicePath
self._api.SetTemplateValue('basePath', base_path)
self._api.SetTemplateValue('serviceHost',
'%s://%s' % (scheme, service_host))
if not root_url:
self._api.SetTemplateValue('rootUrl', '%s://%s/' % (scheme, service_host))
# TODO(user): Revert to 'if not service_path' once oauth2 is fixed.
if service_path is None:
self._api.SetTemplateValue('servicePath', base_path[1:])
# Make sure template writers do not revert
self._api.DeleteTemplateValue('baseUrl')
def ModelClasses(self):
"""Return all the model classes."""
ret = set(
s for s in self._schemas.itervalues()
if isinstance(s, Schema) or isinstance(s, data_types.MapDataType))
return sorted(ret, key=operator.attrgetter('class_name'))
def TopLevelModelClasses(self):
"""Return the models which are not children of another model."""
return [m for m in self.ModelClasses() if not m.parent]
def DataTypeFromJson(self, type_dict, default_name, parent=None,
wire_name=None):
"""Returns a schema object represented by a JSON Schema dictionary.
Evaluate a JSON schema dictionary and return an appropriate schema object.
If a data type is defined in-line, then create the schema dynamically. If
the schema is a $ref to another, return the previously created schema or
a lazy reference.
If the type_dict is None, a blank schema will be created.
Args:
type_dict: A dict of the form expected of a request or response member
of a method description. See the Discovery specification for more.
default_name: The unique name to give the schema if we have to create it.
parent: The schema where I was referenced. If we cannot determine that
this is a top level schema, set the parent to this.
wire_name: The name which will identify objects of this type in data on
the wire.
Returns:
A Schema object.
"""
# new or not initialized, create a fresh one
schema = Schema.Create(self, default_name, type_dict or {}, wire_name,
parent)
# Only put it in our by-name list if it is a real object
if isinstance(schema, Schema) or isinstance(schema, data_types.MapDataType):
# Use the path to the schema as a key. This means that an anonymous class
# for the 'person' property under the schema 'Activity' will have the
# unique name 'Activity.person', rather than 'ActivityPerson'.
path = '.'.join(
[a.values.get('wireName', '<anon>') for a in schema.full_path])
_LOGGER.debug('DataTypeFromJson: add %s to cache', path)
self._schemas[path] = schema
return schema
def AddMethod(self, method):
"""Add a new method to the set of all methods."""
self._all_methods.append(method)
self._methods_by_name[method.values['rpcMethod']] = method
def MethodByName(self, method_name):
"""Find a method by name.
Args:
method_name: (str) the full RPC name of a method defined by this API.
Returns:
Method object or None if not found.
"""
return self._methods_by_name.get(method_name)
def SchemaByName(self, schema_name):
"""Find a schema by name.
Args:
schema_name: (str) name of a schema defined by this API.
Returns:
Schema object or None if not found.
"""
return self._schemas.get(schema_name, None)
def SetVariantInfo(self, ref, discriminant, value, schema):
"""Sets variant info for the given reference."""
if ref in self._variant_info:
logging.warning("Base type of '%s' changed from '%s' to '%s'. "
"This is an indication that a variant schema is used "
"from multiple base schemas and may result in an "
"inconsistent model.",
ref, self._base_type[ref].wireName, schema.wireName)
self._variant_info[ref] = {'discriminant': discriminant, 'value': value,
'schema': schema}
def VisitAll(self, func):
"""Visit all nodes of an API tree and apply a function to each.
Walks a tree and calls a function on each element of it. This should be
called after the API is fully loaded.
Args:
func: (function) Method to call on each object.
"""
_LOGGER.debug('Applying function to all nodes')
func(self._containing_module)
func(self._module)
func(self._model_module)
for resource in self.values['resources']:
self._VisitResource(resource, func)
# Top level methods
for method in self.values['methods']:
self._VisitMethod(method, func)
for parameter in self.values['parameters']:
func(parameter)
func(parameter.data_type)
for schema in self._schemas.values():
self._VisitSchema(schema, func)
for scope in self.GetTemplateValue('authscopes') or []:
func(scope)
def _VisitMethod(self, method, func):
"""Visit a method, calling a function on every child.
Args:
method: (Method) The Method to visit.
func: (function) Method to call on each object.
"""
func(method)
for parameter in method.parameters:
func(parameter)
def _VisitResource(self, resource, func):
"""Visit a resource tree, calling a function on every child.
Calls down recursively to sub resources.
Args:
resource: (Resource) The Resource to visit.
func: (function) Method to call on each object.
"""
func(resource)
for method in resource.values['methods']:
self._VisitMethod(method, func)
for r in resource.values['resources']:
self._VisitResource(r, func)
def _VisitSchema(self, schema, func):
"""Visit a schema tree, calling a function on every child.
Args:
schema: (Schema) The Schema to visit.
func: (function) Method to call on each object.
"""
func(schema)
func(schema.module)
for prop in schema.values.get('properties', []):
func(prop)
for child in self.children:
func(child)
# Do not warn about unused arguments, pylint: disable=unused-argument
def ToClassName(self, s, element, element_type=None):
"""Convert a name to a suitable class name in the target language.
This default implementation camel cases the string, which is appropriate
for some languages. Subclasses are encouraged to override this.
Args:
s: (str) A rosy name of data element.
element: (object) The object we are making a class name for.
element_type: (str) Deprecated. The kind of object we are making a class
name for. E.g. resource, method, schema.
TODO(user): replace type in favor of class of element, but that will
require changing the place where we call ToClassName with no element.
Returns:
A name suitable for use as a class in the generator's target language.
"""
return utilities.CamelCase(s).replace(' ', '')
def NestedClassNameForProperty(self, name, schema):
"""Returns the class name of an object nested in a property."""
# TODO(user): This functionality belongs in the language model, but
# because of the way the api is bootstrapped, that isn't available when we
# need it. When language model is available from the start, this should be
# moved.
return '%s%s' % (schema.class_name, utilities.CamelCase(name))
@property
def class_name(self):
return self.values['className']
@property
def model_module(self):
return self._model_module
@property
def containing_module(self):
return self._containing_module
@property
def all_methods(self):
"""All the methods in the entire API."""
return self._all_methods
@property
def top_level_methods(self):
"""All the methods at the API top level (not in a resource)."""
return self._top_level_methods
class Schema(data_types.ComplexDataType):
"""The definition of a schema."""
def __init__(self, api, default_name, def_dict, parent=None):
"""Construct a Schema object from a discovery dictionary.
Schemas represent data models in the API.
Args:
api: (Api) the Api instance owning the Schema
default_name: (str) the default name of the Schema. If there is an 'id'
member in the definition, that is used for the name instead.
def_dict: (dict) a discovery dictionary
parent: (Schema) The containing schema. To be used to establish unique
names for anonymous sub-schemas.
"""
super(Schema, self).__init__(default_name, def_dict, api, parent=parent)
name = def_dict.get('id', default_name)
_LOGGER.debug('Schema(%s)', name)
# Protect against malicious discovery
template_objects.CodeObject.ValidateName(name)
self.SetTemplateValue('wireName', name)
class_name = api.ToClassName(name, self, element_type='schema')
self.SetTemplateValue('className', class_name)
self.SetTemplateValue('isSchema', True)
self.SetTemplateValue('properties', [])
self._module = (template_objects.Module.ModuleFromDictionary(self.values)
or api.model_module)
@classmethod
def Create(cls, api, default_name, def_dict, wire_name, parent=None):
"""Construct a Schema or DataType from a discovery dictionary.
Schemas contain either object declarations, simple type declarations, or
references to other Schemas. Object declarations conceptually map to real
classes. Simple types will map to a target language built-in type.
References should effectively be replaced by the referenced Schema.
Args:
api: (Api) the Api instance owning the Schema
default_name: (str) the default name of the Schema. If there is an 'id'
member in the definition, that is used for the name instead.
def_dict: (dict) a discovery dictionary
wire_name: The name which will identify objects of this type in data on
the wire. The path of wire_names can trace an item back through
discovery.
parent: (Schema) The containing schema. To be used to establish nesting
for anonymous sub-schemas.
Returns:
A Schema or DataType.
Raises:
ApiException: If the definition dict is not correct.
"""
schema_id = def_dict.get('id')
if schema_id:
name = schema_id
else:
name = default_name
class_name = api.ToClassName(name, None, element_type='schema')
_LOGGER.debug('Create: %s, parent=%s', name,
parent.values.get('wireName', '<anon>') if parent else 'None')
# Schema objects come in several patterns.
#
# 1. Simple objects
# { type: object, properties: { "foo": {schema} ... }}
#
# 2. Maps of objects
# { type: object, additionalProperties: { "foo": {inner_schema} ... }}
#
# What we want is a data type which is Map<string, {inner_schema}>
# The schema we create here is essentially a built in type which we
# don't want to generate a class for.
#
# 3. Arrays of objects
# { type: array, items: { inner_schema }}
#
# Same kind of issue as the map, but with List<{inner_schema}>
#
# 4. Primitive data types, described by type and format.
# { type: string, format: int32 }
#
# 5. Refs to another schema.
# { $ref: name }
#
# 6. Variant schemas
# { type: object, variant: { discriminant: "prop", map:
# [ { 'type_value': value, '$ref': wireName }, ... ] } }
#
# What we do is map the variant schema to a schema with a single
# property for the discriminant. To that property, we attach
# the variant map which specifies which discriminator values map
# to which schema references. We also collect variant information
# in the api so we can later associate discriminator value and
# base type with the generated variant subtypes.
if 'type' in def_dict:
# The 'type' field of the schema can either be 'array', 'object', or a
# base json type.
json_type = def_dict['type']
if json_type == 'object':
# Look for variants
variant = def_dict.get('variant')
if variant:
return cls._CreateVariantType(variant, api, name,
def_dict, wire_name, parent)
# Look for full object definition. You can have properties or
# additionalProperties, but it does not do anything useful to have
# both.
# Replace properties dict with Property's
props = def_dict.get('properties')
if props:
# This case 1 from above
return cls._CreateObjectWithProperties(props, api, name,
def_dict, wire_name, parent)
# Look for case 2
additional_props = def_dict.get(_ADDITIONAL_PROPERTIES)
if additional_props:
return cls._CreateMapType(additional_props, api, name, wire_name,
class_name, parent)
# no properties
return cls._CreateSchemaWithoutProperties(api, name, def_dict,
wire_name, parent)
elif json_type == 'array':
# Case 3: Look for array definition
return cls._CreateArrayType(api, def_dict, wire_name, class_name,
schema_id, parent)
else:
# Case 4: This must be a basic type. Create a DataType for it.
return data_types.PrimitiveDataType(def_dict, api, parent=parent)
referenced_schema = def_dict.get('$ref')
if referenced_schema:
# Case 5: Reference to another Schema.
#
# There are 4 ways you can see '$ref' in discovery.
# 1. In a property of a schema or a method request/response, pointing
# back to a previously defined schema
# 2. As above, pointing to something not defined yet.
# 3. In a method request or response or property of a schema pointing to
# something undefined.
#
# For case 1, the schema will be in the API name to schema map.
#
# For case 2, just creating this placeholder here is fine. When the
# actual schema is hit in the loop in _BuildSchemaDefinitions, we will
# replace the entry and DataTypeFromJson will resolve the to the new def.
#
# For case 3, we will end up with a dangling reference and fail later.
schema = api.SchemaByName(referenced_schema)
# The stored "schema" may not be an instance of Schema, but rather a
# data_types.PrimitiveDataType, which has no 'wireName' value.
if schema:
_LOGGER.debug('Schema.Create: %s => %s',
default_name, schema.values.get('wireName', '<unknown>'))
return schema
return data_types.SchemaReference(referenced_schema, api)
raise ApiException('Cannot decode JSON Schema for: %s' % def_dict)
@classmethod
def _CreateObjectWithProperties(cls, props, api, name, def_dict,
wire_name, parent):
properties = []
schema = cls(api, name, def_dict, parent=parent)
if wire_name:
schema.SetTemplateValue('wireName', wire_name)
for prop_name in sorted(props):
prop_dict = props[prop_name]
_LOGGER.debug(' adding prop: %s to %s', prop_name, name)
properties.append(Property(api, schema, prop_name, prop_dict))
# Some APIs express etag directly in the response, others don't.
# Knowing that we have it explicitly makes special case code generation
# easier
if prop_name == 'etag':
schema.SetTemplateValue('hasEtagProperty', True)
schema.SetTemplateValue('properties', properties)
# check for @ clashing. E.g. No 'foo' and '@foo' in the same object.
names = set()
for p in properties:
wire_name = p.GetTemplateValue('wireName')
no_at_sign = wire_name.replace('@', '')
if no_at_sign in names:
raise ApiException(
'Property name clash in schema %s:'
' %s conflicts with another property' % (name, wire_name))
names.add(no_at_sign)
return schema
@classmethod
def _CreateVariantType(cls, variant, api, name, def_dict,
wire_name, parent):
"""Creates a variant type."""
variants = collections.OrderedDict()
schema = cls(api, name, def_dict, parent=parent)
if wire_name:
schema.SetTemplateValue('wireName', wire_name)
discriminant = variant['discriminant']
# Walk over variants building the variant map and register
# variant info on the api.
for variant_entry in variant['map']:
discriminant_value = variant_entry['type_value']
variant_schema = api.DataTypeFromJson(variant_entry, name, parent=parent)
variants[discriminant_value] = variant_schema
# Set variant info. We get the original wire name from the JSON properties
# via '$ref' it is not currently accessible via variant_schema.
api.SetVariantInfo(variant_entry.get('$ref'), discriminant,
discriminant_value, schema)
prop = Property(api, schema, discriminant, {'type': 'string'},
key_for_variants=variants)
schema.SetTemplateValue('is_variant_base', True)
schema.SetTemplateValue('discriminant', prop)
schema.SetTemplateValue('properties', [prop])
return schema
@classmethod
def _CreateMapType(cls, additional_props, api, name, wire_name,
class_name, parent):
_LOGGER.debug('Have only additionalProps for %s, dict=%s',
name, additional_props)
# TODO(user): Remove this hack at the next large breaking change
# The "Items" added to the end is unneeded and ugly. This is for
# temporary backwards compatibility. Same for _CreateArrayType().
if additional_props.get('type') == 'array':
name = '%sItem' % name
subtype_name = additional_props.get('id', name + 'Element')
# Note, since this is an interim, non class just to hold the map
# make the parent schema the parent passed in, not myself.
_LOGGER.debug('name:%s, wire_name:%s, subtype name %s', name, wire_name,
subtype_name)
# When there is a parent, we synthesize a wirename when none exists.
# Purpose is to avoid generating an extremely long class name, since we
# don't do so for other nested classes.
if parent and wire_name:
base_wire_name = wire_name + 'Element'
else:
base_wire_name = None
base_type = api.DataTypeFromJson(
additional_props, subtype_name, parent=parent,
wire_name=base_wire_name)
map_type = data_types.MapDataType(name, base_type, parent=parent,
wire_name=wire_name)
map_type.SetTemplateValue('className', class_name)
_LOGGER.debug(' %s is MapOf<string, %s>',
class_name, base_type.class_name)
return map_type
@classmethod
def _CreateSchemaWithoutProperties(cls, api, name, def_dict, wire_name,
parent):
if parent:
# code objects have __getitem__(), but not .get()
try:
pname = parent['id']
except KeyError:
pname = '<unknown>'
name_to_log = '%s.%s' % (pname, name)
else:
name_to_log = name
logging.warning('object without properties %s: %s',
name_to_log, def_dict)
schema = cls(api, name, def_dict, parent=parent)
if wire_name:
schema.SetTemplateValue('wireName', wire_name)
return schema
@classmethod
def _CreateArrayType(cls, api, def_dict, wire_name,
class_name, schema_id, parent):
items = def_dict.get('items')
if not items:
raise ApiException('array without items in: %s' % def_dict)
tentative_class_name = class_name
# TODO(user): THIS IS STUPID. We should not rename things items.
# if we have an anonymous type within a map or array, it should be
# called 'Item', and let the namespacing sort it out.
if schema_id:
_LOGGER.debug('Top level schema %s is an array', class_name)
tentative_class_name += 'Items'
base_type = api.DataTypeFromJson(items, tentative_class_name,
parent=parent, wire_name=wire_name)
_LOGGER.debug(' %s is ArrayOf<%s>', class_name, base_type.class_name)
array_type = data_types.ArrayDataType(tentative_class_name, base_type,
wire_name=wire_name,
parent=parent)
if schema_id:
array_type.SetTemplateValue('className', schema_id)
return array_type
@property
def class_name(self):
return self.values['className']
@property
def anonymous(self):
return 'id' not in self.raw
@property
def properties(self):
return self.values['properties']
@property
def isContainerWrapper(self):
"""Is this schema just a simple wrapper around another container.
A schema is just a wrapper for another datatype if it is an object that
contains just a single container datatype and (optionally) a kind and
etag field. This may be used by language generators to create iterators
directly on the schema. E.g. You could have
SeriesList ret = api.GetSomeSeriesMethod(args).Execute();
for (series in ret) { ... }
rather than
for (series in ret->items) { ... }
Returns:
None or ContainerDataType
"""
return self._GetPropertyWhichWeWrap() is not None
@property
def containerProperty(self):
"""If isContainerWrapper, returns the propery which holds the container."""
return self._GetPropertyWhichWeWrap()
def _GetPropertyWhichWeWrap(self):
"""Returns the property which is the type we are wrapping."""
container_property = None
for p in self.values['properties']:
if p.values['wireName'] == 'kind' or p.values['wireName'] == 'etag':
continue
if p.data_type.GetTemplateValue('isContainer'):
if container_property:
return None
container_property = p
else:
return None
return container_property
def __str__(self):
return '<%s Schema {%s}>' % (self.values['wireName'], self.values)
class Property(template_objects.CodeObject):
"""The definition of a schema property.
Example property in the discovery schema:
"id": {"type": "string"}
"""
def __init__(self, api, schema, name, def_dict, key_for_variants=None):
"""Construct a Property.
A Property requires several elements in its template value dictionary which
are set here:
wireName: the string which labels this Property in the JSON serialization.
dataType: the DataType of this property.
Args:
api: (Api) The Api which owns this Property
schema: (Schema) the schema this Property is part of
name: (string) the name for this Property
def_dict: (dict) the JSON schema dictionary
key_for_variants: (dict) if given, maps discriminator values to
variant schemas.
Raises:
ApiException: If we have an array type without object definitions.
"""
super(Property, self).__init__(def_dict, api, wire_name=name)
self.ValidateName(name)
self.schema = schema
self._key_for_variants = key_for_variants
# TODO(user): find a better way to mark a schema as an array type
# so we can display schemas like BlogList in method responses
try:
if self.values['wireName'] == 'items' and self.values['type'] == 'array':
self.schema.values['isList'] = True
except KeyError:
pass
# If the schema value for this property defines a new object directly,
# rather than refering to another schema, we will have to create a class
# name for it. We create a unique name by prepending the schema we are
# in to the object name.
tentative_class_name = api.NestedClassNameForProperty(name, schema)
self._data_type = api.DataTypeFromJson(def_dict, tentative_class_name,
parent=schema, wire_name=name)
@property
def code_type(self):
if self._language_model:
self._data_type.SetLanguageModel(self._language_model)
return self._data_type.code_type
@property
def safe_code_type(self):
if self._language_model:
self._data_type.SetLanguageModel(self._language_model)
return self._data_type.safe_code_type
@property
def data_type(self):
return self._data_type
@property
def is_variant_key(self):
return self._key_for_variants
@property
def variant_map(self):
return self._key_for_variants
class Resource(template_objects.CodeObject):
def __init__(self, api, name, def_dict, parent=None):
"""Creates a Resource.
Args:
api: (Api) The Api which owns this Resource.
name: (string) The discovery name of the Resource.
def_dict: (dict) The discovery dictionary for this Resource.
parent: (CodeObject) The resource containing this method, if any. Top
level resources have the API as a parent.
"""
super(Resource, self).__init__(def_dict, api, parent=parent, wire_name=name)
self.ValidateName(name)
class_name = api.ToClassName(name, self, element_type='resource')
self.SetTemplateValue('className', class_name)
# Replace methods dict with Methods
self._methods = []
method_dict = self.values.get('methods') or {}
for name in sorted(method_dict):
self._methods.append(Method(api, name, method_dict[name], parent=self))
self.SetTemplateValue('methods', self._methods)
# Get sub resources
self._resources = []
r_def_dict = self.values.get('resources') or {}
for name in sorted(r_def_dict):
r = Resource(api, name, r_def_dict[name], parent=self)
self._resources.append(r)
self.SetTemplateValue('resources', self._resources)
@property
def methods(self):
return self._methods
@property
def methods_dict(self):
return {method['wireName']: method for method in self._methods}
class AuthScope(template_objects.CodeObject):
"""The definition of an auth scope.
An AuthScope defines these template values
value: The scope url
name: a sanitized version of the value, transformed so it generally can
be used as an indentifier in code. Deprecated, use constantName
description: the description of the scope.
It also provides a template property which can be used after a language
binding is set.
constantName: A transformation of the value so it is suitable as a constant
name in the specific language.
"""
GOOGLE_PREFIX = 'https://www.googleapis.com/auth/'
HTTPS_PREFIX = 'https://'
def __init__(self, api, value, def_dict):
"""Construct an auth scope.
Args:
api: (Api) The Api which owns this Property
value: (string) The unique identifier of this scope, often a URL
def_dict: (dict) The discovery dictionary for this auth scope.
"""
super(AuthScope, self).__init__(def_dict, api, wire_name=value)
self._module = api.module
while value.endswith('/'):
value = value[:-1]
self.SetTemplateValue('value', value)
if 'description' not in self.values:
self.SetTemplateValue('description', value)
# Strip the common prefix to get a unique identifying name
if value.startswith(AuthScope.GOOGLE_PREFIX):
scope_id = value[len(AuthScope.GOOGLE_PREFIX):]
elif value.startswith(AuthScope.HTTPS_PREFIX):
# some comon scopes are are just a URL
scope_id = value[len(AuthScope.HTTPS_PREFIX):]
else:
scope_id = value
# We preserve the value stripped of the most common prefixes so we can
# use it for building constantName in templates.
self.SetTemplateValue('lastPart', scope_id)
# replace all non alphanumeric with '_' to form 'name'
name = ''.join([(c if c.isalnum() else '_') for c in scope_id.upper()])
self.SetTemplateValue('name', name)
@property
def constantName(self): # pylint: disable=g-bad-name
"""Overrides default behavior of constantName."""
return self._language_model.ApplyPolicy('constant', self,
self.values['lastPart'])
class Method(template_objects.CodeObject):
"""The definition of a method."""
def __init__(self, api, name, def_dict, parent=None):
"""Construct a method.
Methods in REST discovery are inside of a resource. Note that the method
name and id are calculable from each other. id will always be equal to
api_name.resource_name[.sub_resource...].method_name. At least it should
be, as that is the transformation Discovery makes from the API definition,
which is essentially a flat list of methods, into a hierarchy of resources.
Args:
api: (Api) The Api which owns this Method.
name: (string) The discovery name of the Method.
def_dict: (dict) The discovery dictionary for this Method.
parent: (CodeObject) The resource containing this Method, if any.
Raises:
ApiException: If the httpMethod type is not one we know how to
handle.
"""
super(Method, self).__init__(def_dict, api, parent=(parent or api))
# TODO(user): Fix java templates to name vs. wireName correctly. Then
# change the __init__ to have wire_name=def_dict.get('id') or name
# then eliminate this line.
self.SetTemplateValue('wireName', name)
self.ValidateName(name)
class_name = api.ToClassName(name, self, element_type='method')
if parent and class_name == parent.values['className']:
# Some languages complain when the collection name is the same as the
# method name.
class_name = '%sRequest' % class_name
# The name is the key of the dict defining use. The id field is what you
# have to use to call the method via RPC. That is unique, name might not be.
self.SetTemplateValue('name', name)
# Fix up very old discovery, which does not have an id.
if 'id' not in self.values:
self.values['id'] = name
self.SetTemplateValue('className', class_name)
http_method = def_dict.get('httpMethod', 'POST').upper()
self.SetTemplateValue('httpMethod', http_method)
self.SetTemplateValue('rpcMethod',
def_dict.get('rpcMethod') or def_dict['id'])
rest_path = def_dict.get('path') or def_dict.get('restPath')
# TODO(user): if rest_path is not set, raise a good error and fail fast.
self.SetTemplateValue('restPath', rest_path)
# Figure out the input and output types and schemas for this method.
expected_request = self.values.get('request')
if expected_request:
# TODO(user): RequestBody is only used if the schema is anonymous.
# When we go to nested models, this could be a nested class off the
# Method, making it unique without the silly name. Same for ResponseBody.
request_schema = api.DataTypeFromJson(expected_request,
'%sRequestContent' % name,
parent=self)
self.SetTemplateValue('requestType', request_schema)
expected_response = def_dict.get('response') or def_dict.get('returns')
if expected_response:
response_schema = api.DataTypeFromJson(expected_response,
'%sResponse' % name,
parent=self)
if self.values['wireName'] == 'get':
response_schema.values['associatedResource'] = parent
self.SetTemplateValue('responseType', response_schema)
else:
self.SetTemplateValue('responseType', api.void_type)
# Make sure we can handle this method type and do any fixups.
if http_method not in ['DELETE', 'GET', 'OPTIONS', 'PATCH', 'POST', 'PUT',
'PROPFIND', 'PROPPATCH', 'REPORT']:
raise ApiException('Unknown HTTP method: %s' % http_method, def_dict)
if http_method == 'GET':
self.SetTemplateValue('requestType', None)
# Replace parameters dict with Parameters. We try to order them by their
# position in the request path so that the generated code can track the
# more human readable definition, rather than the order of the parameters
# in the discovery doc.
order = self.values.get('parameterOrder', [])
req_parameters = []
opt_parameters = []
for name, def_dict in self.values.get('parameters', {}).iteritems():
param = Parameter(api, name, def_dict, self)
if name == 'alt':
# Treat the alt parameter differently
self.SetTemplateValue('alt', param)
continue
# Standard params are part of the generic request class
# We want to push all parameters that aren't declared inside
# parameterOrder after those that are.
if param.values['wireName'] in order:
req_parameters.append(param)
else:
# optional parameters are appended in the order they're declared.
opt_parameters.append(param)
# pylint: disable=g-long-lambda
req_parameters.sort(lambda x, y: cmp(order.index(x.values['wireName']),
order.index(y.values['wireName'])))
req_parameters.extend(opt_parameters)
self.SetTemplateValue('parameters', req_parameters)
self._InitMediaUpload(parent)
self._InitPageable(api)
api.AddMethod(self)
def _InitMediaUpload(self, parent):
media_upload = self.values.get('mediaUpload')
if media_upload:
if parent:
parent.SetTemplateValue('isMedia', True)
# Get which MIME Media Ranges are accepted for media uploads to this
# method.
accepted_mime_ranges = media_upload.get('accept')
self.SetTemplateValue('accepted_mime_ranges', accepted_mime_ranges)
max_size = media_upload.get('maxSize')
self.SetTemplateValue('max_size', max_size)
self.SetTemplateValue('max_size_bytes',
convert_size.ConvertSize(max_size))
# Find which upload protocols are supported.
upload_protocols = media_upload['protocols']
for upload_protocol in upload_protocols:
self._SetUploadTemplateValues(
upload_protocol, upload_protocols[upload_protocol])
def _InitPageable(self, api):
response_type = self.values.get('responseType')
if (response_type != api.void_type
and self.FindCodeObjectWithWireName(
response_type.values.get('properties'), 'nextPageToken')
and self.FindCodeObjectWithWireName(
self.optional_parameters, 'pageToken')):
self.SetTemplateValue('isPageable', True)
def _SetUploadTemplateValues(self, upload_protocol, protocol_dict):
"""Sets upload specific template values.
Args:
upload_protocol: (str) The name of the upload protocol. Eg: 'simple' or
'resumable'.
protocol_dict: (dict) The dictionary that corresponds to this upload
protocol. It typically contains keys like 'path', 'multipart' etc.
"""
self.SetTemplateValue('%s_upload_supported' % upload_protocol, True)
upload_path = protocol_dict.get('path')
if upload_path:
self.SetTemplateValue('%s_upload_path' % upload_protocol, upload_path)
self.SetTemplateValue('%s_upload_multipart' % upload_protocol,
protocol_dict.get('multipart', False))
@property
def media_upload_parameters(self):
return self.values.get('mediaUpload')
@property
def parameters(self):
return self.values['parameters']
@property
def optional_parameters(self):
return [p for p in self.values['parameters'] if not p.required]
@property
def required_parameters(self):
return [p for p in self.values['parameters'] if p.required]
@property
def path_parameters(self):
return [p for p in self.values['parameters'] if p.location == 'path']
@property
def query_parameters(self):
return [p for p in self.values['parameters'] if p.location == 'query']
@staticmethod
def FindCodeObjectWithWireName(things, wire_name):
"""Looks for an element having the given wire_name.
Args:
things: (array of DataType) List of parameters or properties to search.
wire_name: (str) The wireName we are looking to find.
Returns:
None or element with the given wire_name.
"""
if not things: return None
for e in things:
if e.values['wireName'] == wire_name: return e
return None
#
# Expose some properties with the naming convention we use in templates
#
def optionalParameters(self): # pylint: disable=g-bad-name
return self.optional_parameters
def requiredParameters(self): # pylint: disable=g-bad-name
return self.required_parameters
def pathParameters(self): # pylint: disable=g-bad-name
return self.path_parameters
def queryParameters(self): # pylint: disable=g-bad-name
return self.query_parameters
class Parameter(template_objects.CodeObject):
"""The definition of a method parameter."""
def __init__(self, api, name, def_dict, method):
super(Parameter, self).__init__(def_dict, api, parent=method,
wire_name=name)
self.ValidateName(name)
self.schema = api
# TODO(user): Deal with dots in names better. What we should do is:
# For x.y, x.z create a little class X, with members y and z. Then
# have the constructor method take an X.
self._repeated = self.values.get('repeated', False)
self._required = self.values.get('required', False)
self._location = (self.values.get('location')
or self.values.get('restParameterType')
or 'query')
if self.values.get('enum'):
self._data_type = data_types.Enum(def_dict,
api,
name,
self.values.get('enum'),
self.values.get('enumDescriptions'),
parent=method)
self.SetTemplateValue('enumType', self._data_type)
else:
self._data_type = data_types.PrimitiveDataType(def_dict, api, parent=self)
if self._repeated:
self._data_type = data_types.ArrayDataType(name, self._data_type,
parent=self)
@property
def repeated(self):
return self._repeated
@property
def required(self):
return self._required
@property
def location(self):
return self._location
@property
def code_type(self):
return self._data_type.code_type
@property
def data_type(self):
return self._data_type
| apache-2.0 |
idea4bsd/idea4bsd | python/helpers/profiler/thriftpy/transport/buffered/__init__.py | 25 | 1561 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from io import BytesIO
from thriftpy._compat import CYTHON
from .. import TTransportBase
class TBufferedTransport(TTransportBase):
"""Class that wraps another transport and buffers its I/O.
The implementation uses a (configurable) fixed-size read buffer
but buffers all writes until a flush is performed.
"""
DEFAULT_BUFFER = 4096
def __init__(self, trans, buf_size=DEFAULT_BUFFER):
self._trans = trans
self._wbuf = BytesIO()
self._rbuf = BytesIO(b"")
self._buf_size = buf_size
def is_open(self):
return self._trans.is_open()
def open(self):
return self._trans.open()
def close(self):
return self._trans.close()
def _read(self, sz):
ret = self._rbuf.read(sz)
if len(ret) != 0:
return ret
self._rbuf = BytesIO(self._trans.read(max(sz, self._buf_size)))
return self._rbuf.read(sz)
def write(self, buf):
self._wbuf.write(buf)
def flush(self):
out = self._wbuf.getvalue()
# reset wbuf before write/flush to preserve state on underlying failure
self._wbuf = BytesIO()
self._trans.write(out)
self._trans.flush()
def getvalue(self):
return self._trans.getvalue()
class TBufferedTransportFactory(object):
def get_transport(self, trans):
return TBufferedTransport(trans)
if CYTHON:
from .cybuffered import TCyBufferedTransport, TCyBufferedTransportFactory # noqa
| apache-2.0 |
michalkurka/h2o-3 | h2o-bindings/bin/custom/R/gen_kmeans.py | 2 | 2447 | extensions = dict(
required_params=['training_frame', 'x'],
validate_required_params="",
set_required_params="""
parms$training_frame <- training_frame
if(!missing(x)){
parms$ignored_columns <- .verify_datacols(training_frame, x)$cols_ignore
if(!missing(fold_column)){
parms$ignored_columns <- setdiff(parms$ignored_columns, fold_column)
}
}
""",
set_params="""
# Check if user_points is an acceptable set of user-specified starting points
if( is.data.frame(user_points) || is.matrix(user_points) || is.list(user_points) || is.H2OFrame(user_points) ) {
if ( length(init) > 1 || init == 'User') {
parms[["init"]] <- "User"
} else {
warning(paste0("Parameter init must equal 'User' when user_points is set. Ignoring init = '", init, "'. Setting init = 'User'."))
}
parms[["init"]] <- "User"
# Convert user-specified starting points to H2OFrame
if( is.data.frame(user_points) || is.matrix(user_points) || is.list(user_points) ) {
if( !is.data.frame(user_points) && !is.matrix(user_points) ) user_points <- t(as.data.frame(user_points))
user_points <- as.h2o(user_points)
}
parms[["user_points"]] <- user_points
# Set k
if( !(missing(k)) && k!=as.integer(nrow(user_points)) ) {
warning("Parameter k is not equal to the number of user-specified starting points. Ignoring k. Using specified starting points.")
}
parms[["k"]] <- as.numeric(nrow(user_points))
} else if ( is.character(init) ) { # Furthest, Random, PlusPlus{
parms[["user_points"]] <- NULL
} else{
stop ("argument init must be set to Furthest, Random, PlusPlus, or a valid set of user-defined starting points.")
}
""",
)
doc = dict(
preamble="""
Performs k-means clustering on an H2O dataset
""",
params=dict(
x="""A vector containing the \code{character} names of the predictors in the model."""
),
returns="""
an object of class \linkS4class{H2OClusteringModel}.
""",
seealso="""
\code{\link{h2o.cluster_sizes}}, \code{\link{h2o.totss}}, \code{\link{h2o.num_iterations}}, \code{\link{h2o.betweenss}}, \code{\link{h2o.tot_withinss}}, \code{\link{h2o.withinss}}, \code{\link{h2o.centersSTD}}, \code{\link{h2o.centers}}
""",
examples="""
library(h2o)
h2o.init()
prostate_path <- system.file("extdata", "prostate.csv", package = "h2o")
prostate <- h2o.uploadFile(path = prostate_path)
h2o.kmeans(training_frame = prostate, k = 10, x = c("AGE", "RACE", "VOL", "GLEASON"))
"""
)
| apache-2.0 |
AKS1996/VOCOWA | Monte Carlo Model.py | 1 | 4186 | # The function localize takes the following arguments:
#
# colors:
# 2D list, each entry either 'R' (for red cell) or 'G' (for green cell)
#
# measurements:
# list of measurements taken by the robot, each entry either 'R' or 'G'
#
# motions:
# list of actions taken by the robot, each entry of the form [dy,dx],
# where dx refers to the change in the x-direction (positive meaning
# movement to the right) and dy refers to the change in the y-direction
# (positive meaning movement downward)
# NOTE: the *first* coordinate is change in y; the *second* coordinate is
# change in x
#
# sensor_right:
# float between 0 and 1, giving the probability that any given
# measurement is correct; the probability that the measurement is
# incorrect is 1-sensor_right
#
# p_move:
# float between 0 and 1, giving the probability that any given movement
# command takes place; the probability that the movement command fails
# (and the robot remains still) is 1-p_move; the robot will NOT overshoot
# its destination in this exercise
#
# The function should RETURN (not just show or print) a 2D list (of the same
# dimensions as colors) that gives the probabilities that the robot occupies
# each cell in the world.
#
# Compute the probabilities by assuming the robot initially has a uniform
# probability of being in any cell.
#
# Also assume that at each step, the robot:
# 1) first makes a movement,
# 2) then takes a measurement.
#
# Motion:
# [0,0] - stay
# [0,1] - right
# [0,-1] - left
# [1,0] - down
# [-1,0] - up
def normalize(p):
temp = 0
for i in range(len(p)):
temp += sum(p[i])
for i in range(len(p)):
for j in range(len(p[0])):
p[i][j] /= temp
return p
def sense(p, colors, measurement, p_right):
result = []
for j in range(len(p)):
q = []
for i in range(len(p[0])):
hit = (measurement == colors[j][i])
# We are multiplying, so it's Bayes or Product
q.append(p[j][i] * (hit * p_right + (1 - hit) * (1 - p_right)))
result.append(q)
result = normalize(result)
return result
def move(p, motion, p_move):
q = [] # note that I could've written q = p, avoiding [temp, s]. But somehow p was getting changed on changing q
for j in range(len(p)):
temp = []
for i in range(len(p[0])):
# We are adding, so it's convolution or Total Probability
temp.append(p_move * p[(j - motion[0]) % len(p)][(i - motion[1]) % len(p[0])] + (1 - p_move) * p[j][i])
q.append(temp)
return q
def localize(colors, measurements, motions, sensor_right, p_move):
# initializes p to a uniform distribution over a grid of the same dimensions as colors
pinit = 1.0 / float(len(colors)) / float(len(colors[0]))
p = [[pinit for row in range(len(colors[0]))] for col in range(len(colors))]
for i in range(len(measurements)):
p = move(p, motions[i], p_move)
p = sense(p, colors, measurements[i], sensor_right)
return p
def show(p):
rows = ['[' + ','.join(map(lambda x: '{0:.5f}'.format(x), r)) + ']' for r in p]
print '[' + ',\n '.join(rows) + ']'
#############################################################
# For the following test case, your output should be
# [[0.01105, 0.02464, 0.06799, 0.04472, 0.02465],
# [0.00715, 0.01017, 0.08696, 0.07988, 0.00935],
# [0.00739, 0.00894, 0.11272, 0.35350, 0.04065],
# [0.00910, 0.00715, 0.01434, 0.04313, 0.03642]]
# (within a tolerance of +/- 0.001 for each entry)
def main():
colors = [['R', 'G', 'G', 'R', 'R'],
['R', 'R', 'G', 'R', 'R'],
['R', 'R', 'G', 'G', 'R'],
['R', 'R', 'R', 'R', 'R']]
measurements = ['G', 'G', 'G', 'G', 'G']
motions = [[0, 0], [0, 1], [1, 0], [1, 0], [0, 1]]
if len(measurements) != len(motions):
raise ValueError, "Error in dimensions of measurement and motion vectors "
p = localize(colors, measurements, motions, sensor_right=0.7, p_move=0.8) # displays your answer
show(p)
if __name__ == '__main__':
main()
| mit |
VasuAgrawal/tartanHacks2015 | site/flask/lib/python2.7/site-packages/sqlparse/__init__.py | 33 | 2333 | # Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
"""Parse SQL statements."""
__version__ = '0.1.14'
# Setup namespace
from sqlparse import engine
from sqlparse import filters
from sqlparse import formatter
# Deprecated in 0.1.5. Will be removed in 0.2.0
from sqlparse.exceptions import SQLParseError
def parse(sql, encoding=None):
"""Parse sql and return a list of statements.
:param sql: A string containting one or more SQL statements.
:param encoding: The encoding of the statement (optional).
:returns: A tuple of :class:`~sqlparse.sql.Statement` instances.
"""
return tuple(parsestream(sql, encoding))
def parsestream(stream, encoding=None):
"""Parses sql statements from file-like object.
:param stream: A file-like object.
:param encoding: The encoding of the stream contents (optional).
:returns: A generator of :class:`~sqlparse.sql.Statement` instances.
"""
stack = engine.FilterStack()
stack.full_analyze()
return stack.run(stream, encoding)
def format(sql, **options):
"""Format *sql* according to *options*.
Available options are documented in :ref:`formatting`.
In addition to the formatting options this function accepts the
keyword "encoding" which determines the encoding of the statement.
:returns: The formatted SQL statement as string.
"""
encoding = options.pop('encoding', None)
stack = engine.FilterStack()
options = formatter.validate_options(options)
stack = formatter.build_filter_stack(stack, options)
stack.postprocess.append(filters.SerializerUnicode())
return ''.join(stack.run(sql, encoding))
def split(sql, encoding=None):
"""Split *sql* into single statements.
:param sql: A string containting one or more SQL statements.
:param encoding: The encoding of the statement (optional).
:returns: A list of strings.
"""
stack = engine.FilterStack()
stack.split_statements = True
return [unicode(stmt).strip() for stmt in stack.run(sql, encoding)]
from sqlparse.engine.filter import StatementFilter
def split2(stream):
splitter = StatementFilter()
return list(splitter.process(None, stream))
| mit |
arunhotra/tensorflow | tensorflow/python/training/moving_averages_test.py | 4 | 5395 | """Functional test for moving_averages.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import test_util
from tensorflow.python.framework import types
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import moving_averages
class MovingAveragesTest(test_util.TensorFlowTestCase):
def testAssignMovingAverage(self):
with self.test_session():
var = variables.Variable([10.0, 11.0])
val = constant_op.constant([1.0, 2.0], types.float32)
decay = 0.25
assign = moving_averages.assign_moving_average(var, val, decay)
variables.initialize_all_variables().run()
self.assertAllClose([10.0, 11.0], var.eval())
assign.op.run()
self.assertAllClose([10.0 * 0.25 + 1.0 * (1.0 - 0.25),
11.0 * 0.25 + 2.0 * (1.0 - 0.25)],
var.eval())
def _Repeat(value, dim):
if dim == 1:
return value
return [value for _ in xrange(dim)]
class ExponentialMovingAverageTest(test_util.TensorFlowTestCase):
def _CheckDecay(self, ema, actual_decay, dim):
tens = _Repeat(10.0, dim)
thirties = _Repeat(30.0, dim)
var0 = variables.Variable(tens, name="v0")
var1 = variables.Variable(thirties, name="v1")
variables.initialize_all_variables().run()
# Note that tensor2 is not a Variable but just a plain Tensor resulting
# from the sum operation.
tensor2 = var0 + var1
update = ema.apply([var0, var1, tensor2])
avg0 = ema.average(var0)
avg1 = ema.average(var1)
avg2 = ema.average(tensor2)
self.assertFalse(avg0 in variables.trainable_variables())
self.assertFalse(avg1 in variables.trainable_variables())
self.assertFalse(avg2 in variables.trainable_variables())
variables.initialize_all_variables().run()
self.assertEqual("v0/ExponentialMovingAverage:0", avg0.name)
self.assertEqual("v1/ExponentialMovingAverage:0", avg1.name)
self.assertEqual("add/ExponentialMovingAverage:0", avg2.name)
# Check initial values.
self.assertAllClose(tens, var0.eval())
self.assertAllClose(thirties, var1.eval())
self.assertAllClose(_Repeat(10.0 + 30.0, dim), tensor2.eval())
# Check that averages are initialized correctly.
self.assertAllClose(tens, avg0.eval())
self.assertAllClose(thirties, avg1.eval())
# Note that averages of Tensor's initialize to zeros_like since no value
# of the Tensor is known because the Op has not been run (yet).
self.assertAllClose(_Repeat(0.0, dim), avg2.eval())
# Update the averages and check.
update.run()
dk = actual_decay
expected = _Repeat(10.0 * dk + 10.0 * (1 - dk), dim)
self.assertAllClose(expected, avg0.eval())
expected = _Repeat(30.0 * dk + 30.0 * (1 - dk), dim)
self.assertAllClose(expected, avg1.eval())
expected = _Repeat(0.0 * dk + (10.0 + 30.0) * (1 - dk), dim)
self.assertAllClose(expected, avg2.eval())
# Again, update the averages and check.
update.run()
expected = _Repeat((10.0 * dk + 10.0 * (1 - dk)) * dk + 10.0 * (1 - dk),
dim)
self.assertAllClose(expected, avg0.eval())
expected = _Repeat((30.0 * dk + 30.0 * (1 - dk)) * dk + 30.0 * (1 - dk),
dim)
self.assertAllClose(expected, avg1.eval())
expected = _Repeat(((0.0 * dk + (10.0 + 30.0) * (1 - dk)) * dk +
(10.0 + 30.0) * (1 - dk)),
dim)
self.assertAllClose(expected, avg2.eval())
def testAverageVariablesNoNumUpdates_Scalar(self):
with self.test_session():
ema = moving_averages.ExponentialMovingAverage(0.25)
self._CheckDecay(ema, actual_decay=0.25, dim=1)
def testAverageVariablesNoNumUpdates_Vector(self):
with self.test_session():
ema = moving_averages.ExponentialMovingAverage(0.25)
self._CheckDecay(ema, actual_decay=0.25, dim=5)
def testAverageVariablesNumUpdates_Scalar(self):
with self.test_session():
# With num_updates 1, the decay applied is 0.1818
ema = moving_averages.ExponentialMovingAverage(0.25, num_updates=1)
self._CheckDecay(ema, actual_decay=0.181818, dim=1)
def testAverageVariablesNumUpdates_Vector(self):
with self.test_session():
# With num_updates 1, the decay applied is 0.1818
ema = moving_averages.ExponentialMovingAverage(0.25, num_updates=1)
self._CheckDecay(ema, actual_decay=0.181818, dim=5)
def testAverageVariablesNames(self):
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(30.0, name="v1")
tensor2 = v0 + v1
ema = moving_averages.ExponentialMovingAverage(0.25, name="foo_avg")
self.assertEqual("v0/foo_avg", ema.average_name(v0))
self.assertEqual("v1/foo_avg", ema.average_name(v1))
self.assertEqual("add/foo_avg", ema.average_name(tensor2))
ema.apply([v0, v1, tensor2])
self.assertEqual(ema.average_name(v0), ema.average(v0).op.name)
self.assertEqual(ema.average_name(v1), ema.average(v1).op.name)
self.assertEqual(ema.average_name(tensor2), ema.average(tensor2).op.name)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
bstroebl/QGIS | python/plugins/sextante/core/SextanteUtils.py | 1 | 3056 | # -*- coding: utf-8 -*-
"""
***************************************************************************
SextanteUtils.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import time
import sys
import uuid
from PyQt4.QtCore import *
from qgis.core import *
class SextanteUtils:
NUM_EXPORTED = 1
@staticmethod
def userFolder():
userDir = QFileInfo(QgsApplication.qgisUserDbFilePath()).path() + "/sextante"
if not QDir(userDir).exists():
QDir().mkpath(userDir)
return unicode(QDir.toNativeSeparators(userDir))
@staticmethod
def isWindows():
return os.name =="nt"
@staticmethod
def isMac():
return sys.platform == "darwin"
@staticmethod
def tempFolder():
tempDir = os.path.join(unicode(QDir.tempPath()), "sextante")
if not QDir(tempDir).exists():
QDir().mkpath(tempDir)
return unicode(os.path.abspath(tempDir))
@staticmethod
def setTempOutput(out, alg):
ext = out.getDefaultFileExtension(alg)
validChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
safeCmdName = ''.join(c for c in alg.commandLineName() if c in validChars)
uniqueSufix = str(uuid.uuid4()).replace("-","");
filename = SextanteUtils.tempFolder() + os.sep + safeCmdName + uniqueSufix + "." + ext
out.value = filename
@staticmethod
def getTempFilename(ext):
path = SextanteUtils.tempFolder()
if ext is None:
filename = path + os.sep + str(time.time()) + str(SextanteUtils.getNumExportedLayers())
else:
filename = path + os.sep + str(time.time()) + str(SextanteUtils.getNumExportedLayers()) + "." + ext
return filename
@staticmethod
def getNumExportedLayers():
SextanteUtils.NUM_EXPORTED += 1
return SextanteUtils.NUM_EXPORTED
def mkdir(newdir):
if os.path.isdir(newdir):
pass
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
mkdir(head)
if tail:
os.mkdir(newdir)
| gpl-2.0 |
Split-Screen/android_kernel_oneplus_msm8974 | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
laiqiqi886/kbengine | kbe/res/scripts/common/Lib/distutils/dir_util.py | 59 | 7780 | """distutils.dir_util
Utility functions for manipulating directories and directory trees."""
import os
import errno
from distutils.errors import DistutilsFileError, DistutilsInternalError
from distutils import log
# cache for by mkpath() -- in addition to cheapening redundant calls,
# eliminates redundant "creating /foo/bar/baz" messages in dry-run mode
_path_created = {}
# I don't use os.makedirs because a) it's new to Python 1.5.2, and
# b) it blows up if the directory already exists (I want to silently
# succeed in that case).
def mkpath(name, mode=0o777, verbose=1, dry_run=0):
"""Create a directory and any missing ancestor directories.
If the directory already exists (or if 'name' is the empty string, which
means the current directory, which of course exists), then do nothing.
Raise DistutilsFileError if unable to create some directory along the way
(eg. some sub-path exists, but is a file rather than a directory).
If 'verbose' is true, print a one-line summary of each mkdir to stdout.
Return the list of directories actually created.
"""
global _path_created
# Detect a common bug -- name is None
if not isinstance(name, str):
raise DistutilsInternalError(
"mkpath: 'name' must be a string (got %r)" % (name,))
# XXX what's the better way to handle verbosity? print as we create
# each directory in the path (the current behaviour), or only announce
# the creation of the whole path? (quite easy to do the latter since
# we're not using a recursive algorithm)
name = os.path.normpath(name)
created_dirs = []
if os.path.isdir(name) or name == '':
return created_dirs
if _path_created.get(os.path.abspath(name)):
return created_dirs
(head, tail) = os.path.split(name)
tails = [tail] # stack of lone dirs to create
while head and tail and not os.path.isdir(head):
(head, tail) = os.path.split(head)
tails.insert(0, tail) # push next higher dir onto stack
# now 'head' contains the deepest directory that already exists
# (that is, the child of 'head' in 'name' is the highest directory
# that does *not* exist)
for d in tails:
#print "head = %s, d = %s: " % (head, d),
head = os.path.join(head, d)
abs_head = os.path.abspath(head)
if _path_created.get(abs_head):
continue
if verbose >= 1:
log.info("creating %s", head)
if not dry_run:
try:
os.mkdir(head, mode)
except OSError as exc:
if not (exc.errno == errno.EEXIST and os.path.isdir(head)):
raise DistutilsFileError(
"could not create '%s': %s" % (head, exc.args[-1]))
created_dirs.append(head)
_path_created[abs_head] = 1
return created_dirs
def create_tree(base_dir, files, mode=0o777, verbose=1, dry_run=0):
"""Create all the empty directories under 'base_dir' needed to put 'files'
there.
'base_dir' is just the a name of a directory which doesn't necessarily
exist yet; 'files' is a list of filenames to be interpreted relative to
'base_dir'. 'base_dir' + the directory portion of every file in 'files'
will be created if it doesn't already exist. 'mode', 'verbose' and
'dry_run' flags are as for 'mkpath()'.
"""
# First get the list of directories to create
need_dir = set()
for file in files:
need_dir.add(os.path.join(base_dir, os.path.dirname(file)))
# Now create them
for dir in sorted(need_dir):
mkpath(dir, mode, verbose=verbose, dry_run=dry_run)
def copy_tree(src, dst, preserve_mode=1, preserve_times=1,
preserve_symlinks=0, update=0, verbose=1, dry_run=0):
"""Copy an entire directory tree 'src' to a new location 'dst'.
Both 'src' and 'dst' must be directory names. If 'src' is not a
directory, raise DistutilsFileError. If 'dst' does not exist, it is
created with 'mkpath()'. The end result of the copy is that every
file in 'src' is copied to 'dst', and directories under 'src' are
recursively copied to 'dst'. Return the list of files that were
copied or might have been copied, using their output name. The
return value is unaffected by 'update' or 'dry_run': it is simply
the list of all files under 'src', with the names changed to be
under 'dst'.
'preserve_mode' and 'preserve_times' are the same as for
'copy_file'; note that they only apply to regular files, not to
directories. If 'preserve_symlinks' is true, symlinks will be
copied as symlinks (on platforms that support them!); otherwise
(the default), the destination of the symlink will be copied.
'update' and 'verbose' are the same as for 'copy_file'.
"""
from distutils.file_util import copy_file
if not dry_run and not os.path.isdir(src):
raise DistutilsFileError(
"cannot copy tree '%s': not a directory" % src)
try:
names = os.listdir(src)
except OSError as e:
if dry_run:
names = []
else:
raise DistutilsFileError(
"error listing files in '%s': %s" % (src, e.strerror))
if not dry_run:
mkpath(dst, verbose=verbose)
outputs = []
for n in names:
src_name = os.path.join(src, n)
dst_name = os.path.join(dst, n)
if n.startswith('.nfs'):
# skip NFS rename files
continue
if preserve_symlinks and os.path.islink(src_name):
link_dest = os.readlink(src_name)
if verbose >= 1:
log.info("linking %s -> %s", dst_name, link_dest)
if not dry_run:
os.symlink(link_dest, dst_name)
outputs.append(dst_name)
elif os.path.isdir(src_name):
outputs.extend(
copy_tree(src_name, dst_name, preserve_mode,
preserve_times, preserve_symlinks, update,
verbose=verbose, dry_run=dry_run))
else:
copy_file(src_name, dst_name, preserve_mode,
preserve_times, update, verbose=verbose,
dry_run=dry_run)
outputs.append(dst_name)
return outputs
def _build_cmdtuple(path, cmdtuples):
"""Helper for remove_tree()."""
for f in os.listdir(path):
real_f = os.path.join(path,f)
if os.path.isdir(real_f) and not os.path.islink(real_f):
_build_cmdtuple(real_f, cmdtuples)
else:
cmdtuples.append((os.remove, real_f))
cmdtuples.append((os.rmdir, path))
def remove_tree(directory, verbose=1, dry_run=0):
"""Recursively remove an entire directory tree.
Any errors are ignored (apart from being reported to stdout if 'verbose'
is true).
"""
global _path_created
if verbose >= 1:
log.info("removing '%s' (and everything under it)", directory)
if dry_run:
return
cmdtuples = []
_build_cmdtuple(directory, cmdtuples)
for cmd in cmdtuples:
try:
cmd[0](cmd[1])
# remove dir from cache if it's already there
abspath = os.path.abspath(cmd[1])
if abspath in _path_created:
del _path_created[abspath]
except OSError as exc:
log.warn("error removing %s: %s", directory, exc)
def ensure_relative(path):
"""Take the full path 'path', and make it a relative path.
This is useful to make 'path' the second argument to os.path.join().
"""
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path
| lgpl-3.0 |
alexgorban/models | research/inception/inception/inception_distributed_train.py | 15 | 14074 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to train Inception using multiple replicas with synchronous update.
Please see accompanying README.md for details and instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import numpy as np
import tensorflow as tf
from inception import image_processing
from inception import inception_model as inception
from inception.slim import slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('job_name', '', 'One of "ps", "worker"')
tf.app.flags.DEFINE_string('ps_hosts', '',
"""Comma-separated list of hostname:port for the """
"""parameter server jobs. e.g. """
"""'machine1:2222,machine2:1111,machine2:2222'""")
tf.app.flags.DEFINE_string('worker_hosts', '',
"""Comma-separated list of hostname:port for the """
"""worker jobs. e.g. """
"""'machine1:2222,machine2:1111,machine2:2222'""")
tf.app.flags.DEFINE_string('protocol', 'grpc',
"""Communication protocol to use in distributed """
"""execution (default grpc) """)
tf.app.flags.DEFINE_string('train_dir', '/tmp/imagenet_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000, 'Number of batches to run.')
tf.app.flags.DEFINE_string('subset', 'train', 'Either "train" or "validation".')
tf.app.flags.DEFINE_boolean('log_device_placement', False,
'Whether to log device placement.')
# Task ID is used to select the chief and also to access the local_step for
# each replica to check staleness of the gradients in SyncReplicasOptimizer.
tf.app.flags.DEFINE_integer(
'task_id', 0, 'Task ID of the worker/replica running the training.')
# More details can be found in the SyncReplicasOptimizer class:
# tensorflow/python/training/sync_replicas_optimizer.py
tf.app.flags.DEFINE_integer('num_replicas_to_aggregate', -1,
"""Number of gradients to collect before """
"""updating the parameters.""")
tf.app.flags.DEFINE_integer('save_interval_secs', 10 * 60,
'Save interval seconds.')
tf.app.flags.DEFINE_integer('save_summaries_secs', 180,
'Save summaries interval seconds.')
# **IMPORTANT**
# Please note that this learning rate schedule is heavily dependent on the
# hardware architecture, batch size and any changes to the model architecture
# specification. Selecting a finely tuned learning rate schedule is an
# empirical process that requires some experimentation. Please see README.md
# more guidance and discussion.
#
# Learning rate decay factor selected from https://arxiv.org/abs/1604.00981
tf.app.flags.DEFINE_float('initial_learning_rate', 0.045,
'Initial learning rate.')
tf.app.flags.DEFINE_float('num_epochs_per_decay', 2.0,
'Epochs after which learning rate decays.')
tf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.94,
'Learning rate decay factor.')
# Constants dictating the learning rate schedule.
RMSPROP_DECAY = 0.9 # Decay term for RMSProp.
RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.
RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp.
def train(target, dataset, cluster_spec):
"""Train Inception on a dataset for a number of steps."""
# Number of workers and parameter servers are inferred from the workers and ps
# hosts string.
num_workers = len(cluster_spec.as_dict()['worker'])
num_parameter_servers = len(cluster_spec.as_dict()['ps'])
# If no value is given, num_replicas_to_aggregate defaults to be the number of
# workers.
if FLAGS.num_replicas_to_aggregate == -1:
num_replicas_to_aggregate = num_workers
else:
num_replicas_to_aggregate = FLAGS.num_replicas_to_aggregate
# Both should be greater than 0 in a distributed training.
assert num_workers > 0 and num_parameter_servers > 0, (' num_workers and '
'num_parameter_servers'
' must be > 0.')
# Choose worker 0 as the chief. Note that any worker could be the chief
# but there should be only one chief.
is_chief = (FLAGS.task_id == 0)
# Ops are assigned to worker by default.
with tf.device('/job:worker/task:%d' % FLAGS.task_id):
# Variables and its related init/assign ops are assigned to ps.
with slim.scopes.arg_scope(
[slim.variables.variable, slim.variables.global_step],
device=slim.variables.VariableDeviceChooser(num_parameter_servers)):
# Create a variable to count the number of train() calls. This equals the
# number of updates applied to the variables.
global_step = slim.variables.global_step()
# Calculate the learning rate schedule.
num_batches_per_epoch = (dataset.num_examples_per_epoch() /
FLAGS.batch_size)
# Decay steps need to be divided by the number of replicas to aggregate.
decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay /
num_replicas_to_aggregate)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True)
# Add a summary to track the learning rate.
tf.summary.scalar('learning_rate', lr)
# Create an optimizer that performs gradient descent.
opt = tf.train.RMSPropOptimizer(lr,
RMSPROP_DECAY,
momentum=RMSPROP_MOMENTUM,
epsilon=RMSPROP_EPSILON)
images, labels = image_processing.distorted_inputs(
dataset,
batch_size=FLAGS.batch_size,
num_preprocess_threads=FLAGS.num_preprocess_threads)
# Number of classes in the Dataset label set plus 1.
# Label 0 is reserved for an (unused) background class.
num_classes = dataset.num_classes() + 1
logits = inception.inference(images, num_classes, for_training=True)
# Add classification loss.
inception.loss(logits, labels)
# Gather all of the losses including regularization losses.
losses = tf.get_collection(slim.losses.LOSSES_COLLECTION)
losses += tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n(losses, name='total_loss')
if is_chief:
# Compute the moving average of all individual losses and the
# total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss;
# do the same for the averaged version of the losses.
for l in losses + [total_loss]:
loss_name = l.op.name
# Name each loss as '(raw)' and name the moving average version of the
# loss as the original loss name.
tf.summary.scalar(loss_name + ' (raw)', l)
tf.summary.scalar(loss_name, loss_averages.average(l))
# Add dependency to compute loss_averages.
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
# Track the moving averages of all trainable variables.
# Note that we maintain a 'double-average' of the BatchNormalization
# global statistics.
# This is not needed when the number of replicas are small but important
# for synchronous distributed training with tens of workers/replicas.
exp_moving_averager = tf.train.ExponentialMovingAverage(
inception.MOVING_AVERAGE_DECAY, global_step)
variables_to_average = (
tf.trainable_variables() + tf.moving_average_variables())
# Add histograms for model variables.
for var in variables_to_average:
tf.summary.histogram(var.op.name, var)
# Create synchronous replica optimizer.
opt = tf.train.SyncReplicasOptimizer(
opt,
replicas_to_aggregate=num_replicas_to_aggregate,
total_num_replicas=num_workers,
variable_averages=exp_moving_averager,
variables_to_average=variables_to_average)
batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION)
assert batchnorm_updates, 'Batchnorm updates are missing'
batchnorm_updates_op = tf.group(*batchnorm_updates)
# Add dependency to compute batchnorm_updates.
with tf.control_dependencies([batchnorm_updates_op]):
total_loss = tf.identity(total_loss)
# Compute gradients with respect to the loss.
grads = opt.compute_gradients(total_loss)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
apply_gradients_op = opt.apply_gradients(grads, global_step=global_step)
with tf.control_dependencies([apply_gradients_op]):
train_op = tf.identity(total_loss, name='train_op')
# Get chief queue_runners and init_tokens, which is used to synchronize
# replicas. More details can be found in SyncReplicasOptimizer.
chief_queue_runners = [opt.get_chief_queue_runner()]
init_tokens_op = opt.get_init_tokens_op()
# Create a saver.
saver = tf.train.Saver()
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Build an initialization operation to run below.
init_op = tf.global_variables_initializer()
# We run the summaries in the same thread as the training operations by
# passing in None for summary_op to avoid a summary_thread being started.
# Running summaries and training operations in parallel could run out of
# GPU memory.
sv = tf.train.Supervisor(is_chief=is_chief,
logdir=FLAGS.train_dir,
init_op=init_op,
summary_op=None,
global_step=global_step,
saver=saver,
save_model_secs=FLAGS.save_interval_secs)
tf.logging.info('%s Supervisor' % datetime.now())
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement)
# Get a session.
sess = sv.prepare_or_wait_for_session(target, config=sess_config)
# Start the queue runners.
queue_runners = tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS)
sv.start_queue_runners(sess, queue_runners)
tf.logging.info('Started %d queues for processing input data.',
len(queue_runners))
if is_chief:
sv.start_queue_runners(sess, chief_queue_runners)
sess.run(init_tokens_op)
# Train, checking for Nans. Concurrently run the summary operation at a
# specified interval. Note that the summary_op and train_op never run
# simultaneously in order to prevent running out of GPU memory.
next_summary_time = time.time() + FLAGS.save_summaries_secs
while not sv.should_stop():
try:
start_time = time.time()
loss_value, step = sess.run([train_op, global_step])
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step > FLAGS.max_steps:
break
duration = time.time() - start_time
if step % 30 == 0:
examples_per_sec = FLAGS.batch_size / float(duration)
format_str = ('Worker %d: %s: step %d, loss = %.2f'
'(%.1f examples/sec; %.3f sec/batch)')
tf.logging.info(format_str %
(FLAGS.task_id, datetime.now(), step, loss_value,
examples_per_sec, duration))
# Determine if the summary_op should be run on the chief worker.
if is_chief and next_summary_time < time.time():
tf.logging.info('Running Summary operation on the chief.')
summary_str = sess.run(summary_op)
sv.summary_computed(sess, summary_str)
tf.logging.info('Finished running Summary operation.')
# Determine the next time for running the summary.
next_summary_time += FLAGS.save_summaries_secs
except:
if is_chief:
tf.logging.info('Chief got exception while running!')
raise
# Stop the supervisor. This also waits for service threads to finish.
sv.stop()
# Save after the training ends.
if is_chief:
saver.save(sess,
os.path.join(FLAGS.train_dir, 'model.ckpt'),
global_step=global_step)
| apache-2.0 |
GabrielNicolasAvellaneda/riak-python-client | riak/client/transport.py | 2 | 6227 | """
Copyright 2012 Basho Technologies, Inc.
This file is provided to you under the Apache License,
Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from contextlib import contextmanager
from riak.transports.pool import BadResource
from riak.transports.pbc import is_retryable as is_pbc_retryable
from riak.transports.http import is_retryable as is_http_retryable
import threading
from six import PY2
if PY2:
from httplib import HTTPException
else:
from http.client import HTTPException
#: The default (global) number of times to retry requests that are
#: retryable. This can be modified locally, per-thread, via the
#: :attr:`RiakClient.retries` property, or using the
#: :attr:`RiakClient.retry_count` method in a ``with`` statement.
DEFAULT_RETRY_COUNT = 3
class _client_locals(threading.local):
"""
A thread-locals object used by the client.
"""
def __init__(self):
self.riak_retries_count = DEFAULT_RETRY_COUNT
class RiakClientTransport(object):
"""
Methods for RiakClient related to transport selection and retries.
"""
# These will be set or redefined by the RiakClient initializer
protocol = 'pbc'
_http_pool = None
_pb_pool = None
_locals = _client_locals()
def _get_retry_count(self):
return self._locals.riak_retries_count or DEFAULT_RETRY_COUNT
def _set_retry_count(self, value):
if not isinstance(value, int):
raise TypeError("retries must be an integer")
self._locals.riak_retries_count = value
__retries_doc = """
The number of times retryable operations will be attempted
before raising an exception to the caller. Defaults to
``3``.
:note: This is a thread-local for safety and
operation-specific modification. To change the
default globally, modify
:data:`riak.client.transport.DEFAULT_RETRY_COUNT`.
"""
retries = property(_get_retry_count, _set_retry_count, doc=__retries_doc)
@contextmanager
def retry_count(self, retries):
"""
retry_count(retries)
Modifies the number of retries for the scope of the ``with``
statement (in the current thread).
Example::
with client.retry_count(10):
client.ping()
"""
if not isinstance(retries, int):
raise TypeError("retries must be an integer")
old_retries, self.retries = self.retries, retries
try:
yield
finally:
self.retries = old_retries
@contextmanager
def _transport(self):
"""
_transport()
Yields a single transport to the caller from the default pool,
without retries.
"""
pool = self._choose_pool()
with pool.transaction() as transport:
yield transport
def _acquire(self):
"""
_acquire()
Acquires a connection from the default pool.
"""
return self._choose_pool().acquire()
def _with_retries(self, pool, fn):
"""
Performs the passed function with retries against the given pool.
:param pool: the connection pool to use
:type pool: Pool
:param fn: the function to pass a transport
:type fn: function
"""
skip_nodes = []
def _skip_bad_nodes(transport):
return transport._node not in skip_nodes
retry_count = self.retries
for retry in range(retry_count):
try:
with pool.transaction(_filter=_skip_bad_nodes) as transport:
try:
return fn(transport)
except (IOError, HTTPException) as e:
if _is_retryable(e):
transport._node.error_rate.incr(1)
skip_nodes.append(transport._node)
raise BadResource(e)
else:
raise
except BadResource as e:
if retry < (retry_count - 1):
continue
else:
# Re-raise the inner exception
raise e.args[0]
def _choose_pool(self, protocol=None):
"""
Selects a connection pool according to the default protocol
and the passed one.
:param protocol: the protocol to use
:type protocol: string
:rtype: Pool
"""
if not protocol:
protocol = self.protocol
if protocol == 'http':
pool = self._http_pool
elif protocol == 'pbc':
pool = self._pb_pool
else:
raise ValueError("invalid protocol %s" % protocol)
return pool
def _is_retryable(error):
"""
Determines whether a given error is retryable according to the
exceptions allowed to be retried by each transport.
:param error: the error to check
:type error: Exception
:rtype: boolean
"""
return is_pbc_retryable(error) or is_http_retryable(error)
def retryable(fn, protocol=None):
"""
Wraps a client operation that can be retried according to the set
:attr:`RiakClient.retries`. Used internally.
"""
def wrapper(self, *args, **kwargs):
pool = self._choose_pool(protocol)
def thunk(transport):
return fn(self, transport, *args, **kwargs)
return self._with_retries(pool, thunk)
wrapper.__doc__ = fn.__doc__
wrapper.__repr__ = fn.__repr__
return wrapper
def retryableHttpOnly(fn):
"""
Wraps a retryable client operation that is only valid over HTTP.
Used internally.
"""
return retryable(fn, protocol='http')
| apache-2.0 |
tragiclifestories/django | tests/gis_tests/geos_tests/test_geos.py | 38 | 44711 | from __future__ import unicode_literals
import ctypes
import json
import random
import unittest
from binascii import a2b_hex, b2a_hex
from io import BytesIO
from unittest import skipUnless
from django.contrib.gis import gdal
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import (
HAS_GEOS, GeometryCollection, GEOSException, GEOSGeometry, GEOSIndexError,
LinearRing, LineString, MultiLineString, MultiPoint, MultiPolygon, Point,
Polygon, fromfile, fromstr, geos_version_info,
)
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.shortcuts import numpy
from django.utils import six
from django.utils.encoding import force_bytes
from django.utils.six.moves import range
from ..test_data import TestDataMixin
@skipUnless(HAS_GEOS, "Geos is required.")
class GEOSTest(unittest.TestCase, TestDataMixin):
def test_base(self):
"Tests out the GEOSBase class."
# Testing out GEOSBase class, which provides a `ptr` property
# that abstracts out access to underlying C pointers.
class FakeGeom1(GEOSBase):
pass
# This one only accepts pointers to floats
c_float_p = ctypes.POINTER(ctypes.c_float)
class FakeGeom2(GEOSBase):
ptr_type = c_float_p
# Default ptr_type is `c_void_p`.
fg1 = FakeGeom1()
# Default ptr_type is C float pointer
fg2 = FakeGeom2()
# These assignments are OK -- None is allowed because
# it's equivalent to the NULL pointer.
fg1.ptr = ctypes.c_void_p()
fg1.ptr = None
fg2.ptr = c_float_p(ctypes.c_float(5.23))
fg2.ptr = None
# Because pointers have been set to NULL, an exception should be
# raised when we try to access it. Raising an exception is
# preferable to a segmentation fault that commonly occurs when
# a C method is given a NULL memory reference.
for fg in (fg1, fg2):
# Equivalent to `fg.ptr`
self.assertRaises(GEOSException, fg._get_ptr)
# Anything that is either not None or the acceptable pointer type will
# result in a TypeError when trying to assign it to the `ptr` property.
# Thus, memory addresses (integers) and pointers of the incorrect type
# (in `bad_ptrs`) will not be allowed.
bad_ptrs = (5, ctypes.c_char_p(b'foobar'))
for bad_ptr in bad_ptrs:
# Equivalent to `fg.ptr = bad_ptr`
self.assertRaises(TypeError, fg1._set_ptr, bad_ptr)
self.assertRaises(TypeError, fg2._set_ptr, bad_ptr)
def test_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = fromstr(g.wkt)
if geom.hasz and geos_version_info()['version'] >= '3.3.0':
self.assertEqual(g.ewkt, geom.wkt)
def test_hex(self):
"Testing HEX output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex.decode())
def test_hexewkb(self):
"Testing (HEX)EWKB output."
# For testing HEX(EWKB).
ogc_hex = b'01010000000000000000000000000000000000F03F'
ogc_hex_3d = b'01010000800000000000000000000000000000F03F0000000000000040'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = b'0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = b'01010000A0E61000000000000000000000000000000000F03F0000000000000040'
pnt_2d = Point(0, 1, srid=4326)
pnt_3d = Point(0, 1, 2, srid=4326)
# OGC-compliant HEX will not have SRID value.
self.assertEqual(ogc_hex, pnt_2d.hex)
self.assertEqual(ogc_hex_3d, pnt_3d.hex)
# HEXEWKB should be appropriate for its dimension -- have to use an
# a WKBWriter w/dimension set accordingly, else GEOS will insert
# garbage into 3D coordinate if there is none.
self.assertEqual(hexewkb_2d, pnt_2d.hexewkb)
self.assertEqual(hexewkb_3d, pnt_3d.hexewkb)
self.assertEqual(True, GEOSGeometry(hexewkb_3d).hasz)
# Same for EWKB.
self.assertEqual(six.memoryview(a2b_hex(hexewkb_2d)), pnt_2d.ewkb)
self.assertEqual(six.memoryview(a2b_hex(hexewkb_3d)), pnt_3d.ewkb)
# Redundant sanity check.
self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid)
def test_kml(self):
"Testing KML output."
for tg in self.geometries.wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml:
self.assertEqual(kml, geom.kml)
def test_errors(self):
"Testing the Error handlers."
# string-based
for err in self.geometries.errors:
with self.assertRaises((GEOSException, ValueError)):
fromstr(err.wkt)
# Bad WKB
self.assertRaises(GEOSException, GEOSGeometry, six.memoryview(b'0'))
class NotAGeometry(object):
pass
# Some other object
self.assertRaises(TypeError, GEOSGeometry, NotAGeometry())
# None
self.assertRaises(TypeError, GEOSGeometry, None)
def test_wkb(self):
"Testing WKB output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).decode().upper(), g.hex)
def test_create_hex(self):
"Testing creation from HEX."
for g in self.geometries.hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_create_wkb(self):
"Testing creation from WKB."
for g in self.geometries.hex_wkt:
wkb = six.memoryview(a2b_hex(g.hex.encode()))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_ewkt(self):
"Testing EWKT."
srids = (-1, 32140)
for srid in srids:
for p in self.geometries.polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_json(self):
"Testing GeoJSON input/output (via GDAL)."
for g in self.geometries.json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json))
def test_fromfile(self):
"Testing the fromfile() factory."
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = BytesIO()
wkt_f.write(force_bytes(ref_pnt.wkt))
wkb_f = BytesIO()
wkb_f.write(bytes(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo': 'bar'})
self.assertNotEqual(g, False)
def test_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(pnt, fromstr(p.wkt))
self.assertEqual(False, pnt == prev) # Use assertEqual to test __eq__
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertEqual(True, pnt.hasz)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertEqual(False, pnt.hasz)
self.assertIsNone(pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(pnt, pnt2)
self.assertEqual(pnt, pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
self.assertRaises(GEOSIndexError, mpnt.__getitem__, len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertEqual(p.empty, False)
self.assertEqual(p.valid, True)
def test_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertEqual(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(ls, fromstr(l.wkt))
self.assertEqual(False, ls == prev) # Use assertEqual to test __eq__
self.assertRaises(GEOSIndexError, ls.__getitem__, len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
# Point individual arguments
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt)
if numpy:
self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
def test_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(ml, fromstr(l.wkt))
self.assertEqual(False, ml == prev) # Use assertEqual to test __eq__
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertRaises(GEOSIndexError, ml.__getitem__, len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test_linearring(self):
"Testing LinearRing objects."
for rr in self.geometries.linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr.valid)
self.assertEqual(False, lr.empty)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy:
self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
def test_polygons_from_bbox(self):
"Testing `from_bbox` class method."
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox(bbox)
self.assertEqual(bbox, p.extent)
# Testing numerical precision
x = 3.14159265358979323
bbox = (0, 0, 1, x)
p = Polygon.from_bbox(bbox)
y = p.extent[-1]
self.assertEqual(format(x, '.13f'), format(y, '.13f'))
def test_polygons(self):
"Testing Polygon objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.empty, False)
self.assertEqual(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(poly, fromstr(p.wkt))
# Should not be equal to previous geometry
self.assertEqual(False, poly == prev) # Use assertEqual to test __eq__
self.assertNotEqual(poly, prev) # Use assertNotEqual to test __ne__
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
self.assertRaises(GEOSIndexError, poly.__getitem__, len(poly))
self.assertRaises(GEOSIndexError, poly.__setitem__, len(poly), False)
self.assertRaises(GEOSIndexError, poly.__getitem__, -1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
self.assertRaises(TypeError, Polygon, 0, [1, 2, 3])
self.assertRaises(TypeError, Polygon, 'foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test_polygon_comparison(self):
p1 = Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
p2 = Polygon(((0, 0), (0, 1), (1, 0), (0, 0)))
self.assertGreater(p1, p2)
self.assertLess(p2, p1)
p3 = Polygon(((0, 0), (0, 1), (1, 1), (2, 0), (0, 0)))
p4 = Polygon(((0, 0), (0, 1), (2, 2), (1, 0), (0, 0)))
self.assertGreater(p4, p3)
self.assertLess(p3, p4)
def test_multipolygons(self):
"Testing MultiPolygon objects."
fromstr('POINT (0 0)')
for mp in self.geometries.multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(GEOSIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertEqual(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
def test_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
# #### Memory issues with rings and poly
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(self.geometries.polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
str(ring1)
str(ring2)
def test_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in self.geometries.polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in range(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2:
tset = (5, 23)
else:
tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo')
for rg in self.geometries.relate_geoms:
a = fromstr(rg.wkt_a)
b = fromstr(rg.wkt_b)
self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern))
self.assertEqual(rg.pattern, a.relate(b))
def test_intersection(self):
"Testing intersects() and intersection()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
i1 = fromstr(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test_union(self):
"Testing union()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test_difference(self):
"Testing difference()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test_symdifference(self):
"Testing sym_difference()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test_buffer(self):
"Testing buffer()."
for bg in self.geometries.buffer_geoms:
g = fromstr(bg.wkt)
# The buffer we expect
exp_buf = fromstr(bg.buffer_wkt)
quadsegs = bg.quadsegs
width = bg.width
# Can't use a floating-point for the number of quadsegs.
self.assertRaises(ctypes.ArgumentError, g.buffer, width, float(quadsegs))
# Constructing our buffer
buf = g.buffer(width, quadsegs)
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in range(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in range(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
self.assertRaises(ctypes.ArgumentError, pnt.set_srid, '4326')
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(self.geometries.polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly:
self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)):
self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
p2 = fromstr(p1.hex)
self.assertIsNone(p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_custom_srid(self):
""" Test with a srid unknown from GDAL """
pnt = Point(111200, 220900, srid=999999)
self.assertTrue(pnt.ewkt.startswith("SRID=999999;POINT (111200.0"))
self.assertIsInstance(pnt.ogr, gdal.OGRGeometry)
self.assertIsNone(pnt.srs)
# Test conversion from custom to a known srid
c2w = gdal.CoordTransform(
gdal.SpatialReference(
'+proj=mill +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +R_A +ellps=WGS84 '
'+datum=WGS84 +units=m +no_defs'
),
gdal.SpatialReference(4326))
new_pnt = pnt.transform(c2w, clone=True)
self.assertEqual(new_pnt.srid, 4326)
self.assertAlmostEqual(new_pnt.x, 1, 3)
self.assertAlmostEqual(new_pnt.y, 2, 3)
def test_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
# ### Testing the mutability of Polygons ###
for p in self.geometries.polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup:
new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
# ### Testing the mutability of Geometry Collections
for tg in self.geometries.multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(21, 100), random.randint(21, 100))
# Testing the assignment
mp[i] = new
str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in self.geometries.multipolygons:
mpoly = fromstr(tg.wkt)
for i in range(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in range(len(poly)):
r = poly[j]
for k in range(len(r)):
r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
# mpoly[0][0][0] = (3.14, 2.71)
# self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
# self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
# del mpoly
def test_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2., 3., 8.), pnt.coords)
self.assertRaises(TypeError, pnt.set_coords, (1., 2.))
pnt.coords = (1., 2., 3.)
self.assertEqual((1., 2., 3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2., 3., 8.), (50., 250., -117.)), ls.tuple)
self.assertRaises(TypeError, ls.__setitem__, 0, (1., 2.))
ls[0] = (1., 2., 3.)
self.assertEqual((1., 2., 3.), ls[0])
def test_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumference of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test_emptyCollections(self):
"Testing empty geometries and collections."
gc1 = GeometryCollection([])
gc2 = fromstr('GEOMETRYCOLLECTION EMPTY')
pnt = fromstr('POINT EMPTY')
ls = fromstr('LINESTRING EMPTY')
poly = fromstr('POLYGON EMPTY')
mls = fromstr('MULTILINESTRING EMPTY')
mpoly1 = fromstr('MULTIPOLYGON EMPTY')
mpoly2 = MultiPolygon(())
for g in [gc1, gc2, pnt, ls, poly, mls, mpoly1, mpoly2]:
self.assertEqual(True, g.empty)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
self.assertRaises(GEOSIndexError, g.get_x)
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertEqual(True, lr.empty)
self.assertRaises(GEOSIndexError, lr.__getitem__, 0)
else:
self.assertRaises(GEOSIndexError, g.__getitem__, 0)
def test_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid]
coll.extend(mls.wkt for mls in self.geometries.multilinestrings)
coll.extend(p.wkt for p in self.geometries.polygons)
coll.extend(mp.wkt for mp in self.geometries.multipoints)
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_gdal(self):
"Testing `ogr` and `srs` properties."
g1 = fromstr('POINT(5 23)')
self.assertIsInstance(g1.ogr, gdal.OGRGeometry)
self.assertIsNone(g1.srs)
g1_3d = fromstr('POINT(5 23 8)')
self.assertIsInstance(g1_3d.ogr, gdal.OGRGeometry)
self.assertEqual(g1_3d.ogr.z, 8)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertIsInstance(g2.ogr, gdal.OGRGeometry)
self.assertIsInstance(g2.srs, gdal.SpatialReference)
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test_copy(self):
"Testing use with the Python `copy` module."
import copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
@skipUnless(HAS_GDAL, "GDAL is required to transform geometries")
def test_transform(self):
"Testing `transform` method."
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
@skipUnless(HAS_GDAL, "GDAL is required to transform geometries")
def test_transform_3d(self):
p3d = GEOSGeometry('POINT (5 23 100)', 4326)
p3d.transform(2774)
self.assertEqual(p3d.z, 100)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_transform_noop(self):
""" Testing `transform` method (SRID match) """
# transform() should no-op if source & dest SRIDs match,
# regardless of whether GDAL is available.
if gdal.HAS_GDAL:
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertIsNot(g1, g, "Clone didn't happen")
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertIsNot(g1, g, "Clone didn't happen")
finally:
gdal.HAS_GDAL = old_has_gdal
def test_transform_nosrid(self):
""" Testing `transform` method (no SRID or negative SRID) """
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_transform_nogdal(self):
""" Testing `transform` method (GDAL not available) """
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
finally:
gdal.HAS_GDAL = old_has_gdal
def test_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test_pickle(self):
"Testing pickling and unpickling support."
# Using both pickle and cPickle -- just 'cause.
from django.utils.six.moves import cPickle
import pickle
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(self.geometries.points)
tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
tgeoms.extend(get_geoms(self.geometries.multipolygons, 3857))
for geom in tgeoms:
s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
g1, g2 = cPickle.loads(s1), pickle.loads(s2)
for tmpg in (g1, g2):
self.assertEqual(geom, tmpg)
self.assertEqual(geom.srid, tmpg.srid)
def test_prepared(self):
"Testing PreparedGeometry support."
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
covers = [True, True, False] # No `covers` op for regular GEOS geoms.
for pnt, c in zip(pnts, covers):
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(c, prep.covers(pnt))
if geos_version_info()['version'] > '3.3.0':
self.assertTrue(prep.crosses(fromstr('LINESTRING(1 1, 15 15)')))
self.assertTrue(prep.disjoint(Point(-5, -5)))
poly = Polygon(((-1, -1), (1, 1), (1, 0), (-1, -1)))
self.assertTrue(prep.overlaps(poly))
poly = Polygon(((-5, 0), (-5, 5), (0, 5), (-5, 0)))
self.assertTrue(prep.touches(poly))
poly = Polygon(((-1, -1), (-1, 11), (11, 11), (11, -1), (-1, -1)))
self.assertTrue(prep.within(poly))
# Original geometry deletion should not crash the prepared one (#21662)
del mpoly
self.assertTrue(prep.covers(Point(5, 5)))
def test_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
def test_valid_reason(self):
"Testing IsValidReason support"
g = GEOSGeometry("POINT(0 0)")
self.assertTrue(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertEqual(g.valid_reason, "Valid Geometry")
g = GEOSGeometry("LINESTRING(0 0, 0 0)")
self.assertFalse(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertTrue(g.valid_reason.startswith("Too few points in geometry component"))
@skipUnless(HAS_GEOS, "Geos is required.")
def test_linearref(self):
"Testing linear referencing"
ls = fromstr('LINESTRING(0 0, 0 10, 10 10, 10 0)')
mls = fromstr('MULTILINESTRING((0 0, 0 10), (10 0, 10 10))')
self.assertEqual(ls.project(Point(0, 20)), 10.0)
self.assertEqual(ls.project(Point(7, 6)), 24)
self.assertEqual(ls.project_normalized(Point(0, 20)), 1.0 / 3)
self.assertEqual(ls.interpolate(10), Point(0, 10))
self.assertEqual(ls.interpolate(24), Point(10, 6))
self.assertEqual(ls.interpolate_normalized(1.0 / 3), Point(0, 10))
self.assertEqual(mls.project(Point(0, 20)), 10)
self.assertEqual(mls.project(Point(7, 6)), 16)
self.assertEqual(mls.interpolate(9), Point(0, 9))
self.assertEqual(mls.interpolate(17), Point(10, 7))
def test_geos_version(self):
"""Testing the GEOS version regular expression."""
from django.contrib.gis.geos.libgeos import version_regex
versions = [('3.0.0rc4-CAPI-1.3.3', '3.0.0', '1.3.3'),
('3.0.0-CAPI-1.4.1', '3.0.0', '1.4.1'),
('3.4.0dev-CAPI-1.8.0', '3.4.0', '1.8.0'),
('3.4.0dev-CAPI-1.8.0 r0', '3.4.0', '1.8.0')]
for v_init, v_geos, v_capi in versions:
m = version_regex.match(v_init)
self.assertTrue(m, msg="Unable to parse the version string '%s'" % v_init)
self.assertEqual(m.group('version'), v_geos)
self.assertEqual(m.group('capi_version'), v_capi)
| bsd-3-clause |
ArcherSys/ArcherSys | Lib/idlelib/MultiCall.py | 1 | 55853 | <<<<<<< HEAD
<<<<<<< HEAD
"""
MultiCall - a class which inherits its methods from a Tkinter widget (Text, for
example), but enables multiple calls of functions per virtual event - all
matching events will be called, not only the most specific one. This is done
by wrapping the event functions - event_add, event_delete and event_info.
MultiCall recognizes only a subset of legal event sequences. Sequences which
are not recognized are treated by the original Tk handling mechanism. A
more-specific event will be called before a less-specific event.
The recognized sequences are complete one-event sequences (no emacs-style
Ctrl-X Ctrl-C, no shortcuts like <3>), for all types of events.
Key/Button Press/Release events can have modifiers.
The recognized modifiers are Shift, Control, Option and Command for Mac, and
Control, Alt, Shift, Meta/M for other platforms.
For all events which were handled by MultiCall, a new member is added to the
event instance passed to the binded functions - mc_type. This is one of the
event type constants defined in this module (such as MC_KEYPRESS).
For Key/Button events (which are handled by MultiCall and may receive
modifiers), another member is added - mc_state. This member gives the state
of the recognized modifiers, as a combination of the modifier constants
also defined in this module (for example, MC_SHIFT).
Using these members is absolutely portable.
The order by which events are called is defined by these rules:
1. A more-specific event will be called before a less-specific event.
2. A recently-binded event will be called before a previously-binded event,
unless this conflicts with the first rule.
Each function will be called at most once for each event.
"""
import sys
import re
import tkinter
# the event type constants, which define the meaning of mc_type
MC_KEYPRESS=0; MC_KEYRELEASE=1; MC_BUTTONPRESS=2; MC_BUTTONRELEASE=3;
MC_ACTIVATE=4; MC_CIRCULATE=5; MC_COLORMAP=6; MC_CONFIGURE=7;
MC_DEACTIVATE=8; MC_DESTROY=9; MC_ENTER=10; MC_EXPOSE=11; MC_FOCUSIN=12;
MC_FOCUSOUT=13; MC_GRAVITY=14; MC_LEAVE=15; MC_MAP=16; MC_MOTION=17;
MC_MOUSEWHEEL=18; MC_PROPERTY=19; MC_REPARENT=20; MC_UNMAP=21; MC_VISIBILITY=22;
# the modifier state constants, which define the meaning of mc_state
MC_SHIFT = 1<<0; MC_CONTROL = 1<<2; MC_ALT = 1<<3; MC_META = 1<<5
MC_OPTION = 1<<6; MC_COMMAND = 1<<7
# define the list of modifiers, to be used in complex event types.
if sys.platform == "darwin":
_modifiers = (("Shift",), ("Control",), ("Option",), ("Command",))
_modifier_masks = (MC_SHIFT, MC_CONTROL, MC_OPTION, MC_COMMAND)
else:
_modifiers = (("Control",), ("Alt",), ("Shift",), ("Meta", "M"))
_modifier_masks = (MC_CONTROL, MC_ALT, MC_SHIFT, MC_META)
# a dictionary to map a modifier name into its number
_modifier_names = dict([(name, number)
for number in range(len(_modifiers))
for name in _modifiers[number]])
# In 3.4, if no shell window is ever open, the underlying Tk widget is
# destroyed before .__del__ methods here are called. The following
# is used to selectively ignore shutdown exceptions to avoid
# 'Exception ignored' messages. See http://bugs.python.org/issue20167
APPLICATION_GONE = "application has been destroyed"
# A binder is a class which binds functions to one type of event. It has two
# methods: bind and unbind, which get a function and a parsed sequence, as
# returned by _parse_sequence(). There are two types of binders:
# _SimpleBinder handles event types with no modifiers and no detail.
# No Python functions are called when no events are binded.
# _ComplexBinder handles event types with modifiers and a detail.
# A Python function is called each time an event is generated.
class _SimpleBinder:
def __init__(self, type, widget, widgetinst):
self.type = type
self.sequence = '<'+_types[type][0]+'>'
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = []
self.handlerid = None
def bind(self, triplet, func):
if not self.handlerid:
def handler(event, l = self.bindedfuncs, mc_type = self.type):
event.mc_type = mc_type
wascalled = {}
for i in range(len(l)-1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = func(event)
if r:
return r
self.handlerid = self.widget.bind(self.widgetinst,
self.sequence, handler)
self.bindedfuncs.append(func)
def unbind(self, triplet, func):
self.bindedfuncs.remove(func)
if not self.bindedfuncs:
self.widget.unbind(self.widgetinst, self.sequence, self.handlerid)
self.handlerid = None
def __del__(self):
if self.handlerid:
try:
self.widget.unbind(self.widgetinst, self.sequence,
self.handlerid)
except tkinter.TclError as e:
if not APPLICATION_GONE in e.args[0]:
raise
# An int in range(1 << len(_modifiers)) represents a combination of modifiers
# (if the least significent bit is on, _modifiers[0] is on, and so on).
# _state_subsets gives for each combination of modifiers, or *state*,
# a list of the states which are a subset of it. This list is ordered by the
# number of modifiers is the state - the most specific state comes first.
_states = range(1 << len(_modifiers))
_state_names = [''.join(m[0]+'-'
for i, m in enumerate(_modifiers)
if (1 << i) & s)
for s in _states]
def expand_substates(states):
'''For each item of states return a list containing all combinations of
that item with individual bits reset, sorted by the number of set bits.
'''
def nbits(n):
"number of bits set in n base 2"
nb = 0
while n:
n, rem = divmod(n, 2)
nb += rem
return nb
statelist = []
for state in states:
substates = list(set(state & x for x in states))
substates.sort(key=nbits, reverse=True)
statelist.append(substates)
return statelist
_state_subsets = expand_substates(_states)
# _state_codes gives for each state, the portable code to be passed as mc_state
_state_codes = []
for s in _states:
r = 0
for i in range(len(_modifiers)):
if (1 << i) & s:
r |= _modifier_masks[i]
_state_codes.append(r)
class _ComplexBinder:
# This class binds many functions, and only unbinds them when it is deleted.
# self.handlerids is the list of seqs and ids of binded handler functions.
# The binded functions sit in a dictionary of lists of lists, which maps
# a detail (or None) and a state into a list of functions.
# When a new detail is discovered, handlers for all the possible states
# are binded.
def __create_handler(self, lists, mc_type, mc_state):
def handler(event, lists = lists,
mc_type = mc_type, mc_state = mc_state,
ishandlerrunning = self.ishandlerrunning,
doafterhandler = self.doafterhandler):
ishandlerrunning[:] = [True]
event.mc_type = mc_type
event.mc_state = mc_state
wascalled = {}
r = None
for l in lists:
for i in range(len(l)-1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = l[i](event)
if r:
break
if r:
break
ishandlerrunning[:] = []
# Call all functions in doafterhandler and remove them from list
for f in doafterhandler:
f()
doafterhandler[:] = []
if r:
return r
return handler
def __init__(self, type, widget, widgetinst):
self.type = type
self.typename = _types[type][0]
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = {None: [[] for s in _states]}
self.handlerids = []
# we don't want to change the lists of functions while a handler is
# running - it will mess up the loop and anyway, we usually want the
# change to happen from the next event. So we have a list of functions
# for the handler to run after it finishes calling the binded functions.
# It calls them only once.
# ishandlerrunning is a list. An empty one means no, otherwise - yes.
# this is done so that it would be mutable.
self.ishandlerrunning = []
self.doafterhandler = []
for s in _states:
lists = [self.bindedfuncs[None][i] for i in _state_subsets[s]]
handler = self.__create_handler(lists, type, _state_codes[s])
seq = '<'+_state_names[s]+self.typename+'>'
self.handlerids.append((seq, self.widget.bind(self.widgetinst,
seq, handler)))
def bind(self, triplet, func):
if triplet[2] not in self.bindedfuncs:
self.bindedfuncs[triplet[2]] = [[] for s in _states]
for s in _states:
lists = [ self.bindedfuncs[detail][i]
for detail in (triplet[2], None)
for i in _state_subsets[s] ]
handler = self.__create_handler(lists, self.type,
_state_codes[s])
seq = "<%s%s-%s>"% (_state_names[s], self.typename, triplet[2])
self.handlerids.append((seq, self.widget.bind(self.widgetinst,
seq, handler)))
doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].append(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
def unbind(self, triplet, func):
doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].remove(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
def __del__(self):
for seq, id in self.handlerids:
try:
self.widget.unbind(self.widgetinst, seq, id)
except tkinter.TclError as e:
if not APPLICATION_GONE in e.args[0]:
raise
# define the list of event types to be handled by MultiEvent. the order is
# compatible with the definition of event type constants.
_types = (
("KeyPress", "Key"), ("KeyRelease",), ("ButtonPress", "Button"),
("ButtonRelease",), ("Activate",), ("Circulate",), ("Colormap",),
("Configure",), ("Deactivate",), ("Destroy",), ("Enter",), ("Expose",),
("FocusIn",), ("FocusOut",), ("Gravity",), ("Leave",), ("Map",),
("Motion",), ("MouseWheel",), ("Property",), ("Reparent",), ("Unmap",),
("Visibility",),
)
# which binder should be used for every event type?
_binder_classes = (_ComplexBinder,) * 4 + (_SimpleBinder,) * (len(_types)-4)
# A dictionary to map a type name into its number
_type_names = dict([(name, number)
for number in range(len(_types))
for name in _types[number]])
_keysym_re = re.compile(r"^\w+$")
_button_re = re.compile(r"^[1-5]$")
def _parse_sequence(sequence):
"""Get a string which should describe an event sequence. If it is
successfully parsed as one, return a tuple containing the state (as an int),
the event type (as an index of _types), and the detail - None if none, or a
string if there is one. If the parsing is unsuccessful, return None.
"""
if not sequence or sequence[0] != '<' or sequence[-1] != '>':
return None
words = sequence[1:-1].split('-')
modifiers = 0
while words and words[0] in _modifier_names:
modifiers |= 1 << _modifier_names[words[0]]
del words[0]
if words and words[0] in _type_names:
type = _type_names[words[0]]
del words[0]
else:
return None
if _binder_classes[type] is _SimpleBinder:
if modifiers or words:
return None
else:
detail = None
else:
# _ComplexBinder
if type in [_type_names[s] for s in ("KeyPress", "KeyRelease")]:
type_re = _keysym_re
else:
type_re = _button_re
if not words:
detail = None
elif len(words) == 1 and type_re.match(words[0]):
detail = words[0]
else:
return None
return modifiers, type, detail
def _triplet_to_sequence(triplet):
if triplet[2]:
return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'-'+ \
triplet[2]+'>'
else:
return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'>'
_multicall_dict = {}
def MultiCallCreator(widget):
"""Return a MultiCall class which inherits its methods from the
given widget class (for example, Tkinter.Text). This is used
instead of a templating mechanism.
"""
if widget in _multicall_dict:
return _multicall_dict[widget]
class MultiCall (widget):
assert issubclass(widget, tkinter.Misc)
def __init__(self, *args, **kwargs):
widget.__init__(self, *args, **kwargs)
# a dictionary which maps a virtual event to a tuple with:
# 0. the function binded
# 1. a list of triplets - the sequences it is binded to
self.__eventinfo = {}
self.__binders = [_binder_classes[i](i, widget, self)
for i in range(len(_types))]
def bind(self, sequence=None, func=None, add=None):
#print("bind(%s, %s, %s)" % (sequence, func, add),
# file=sys.__stderr__)
if type(sequence) is str and len(sequence) > 2 and \
sequence[:2] == "<<" and sequence[-2:] == ">>":
if sequence in self.__eventinfo:
ei = self.__eventinfo[sequence]
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].unbind(triplet, ei[0])
ei[0] = func
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].bind(triplet, func)
else:
self.__eventinfo[sequence] = [func, []]
return widget.bind(self, sequence, func, add)
def unbind(self, sequence, funcid=None):
if type(sequence) is str and len(sequence) > 2 and \
sequence[:2] == "<<" and sequence[-2:] == ">>" and \
sequence in self.__eventinfo:
func, triplets = self.__eventinfo[sequence]
if func is not None:
for triplet in triplets:
self.__binders[triplet[1]].unbind(triplet, func)
self.__eventinfo[sequence][0] = None
return widget.unbind(self, sequence, funcid)
def event_add(self, virtual, *sequences):
#print("event_add(%s, %s)" % (repr(virtual), repr(sequences)),
# file=sys.__stderr__)
if virtual not in self.__eventinfo:
self.__eventinfo[virtual] = [None, []]
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
#print("Tkinter event_add(%s)" % seq, file=sys.__stderr__)
widget.event_add(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].bind(triplet, func)
triplets.append(triplet)
def event_delete(self, virtual, *sequences):
if virtual not in self.__eventinfo:
return
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
#print("Tkinter event_delete: %s" % seq, file=sys.__stderr__)
widget.event_delete(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].unbind(triplet, func)
triplets.remove(triplet)
def event_info(self, virtual=None):
if virtual is None or virtual not in self.__eventinfo:
return widget.event_info(self, virtual)
else:
return tuple(map(_triplet_to_sequence,
self.__eventinfo[virtual][1])) + \
widget.event_info(self, virtual)
def __del__(self):
for virtual in self.__eventinfo:
func, triplets = self.__eventinfo[virtual]
if func:
for triplet in triplets:
try:
self.__binders[triplet[1]].unbind(triplet, func)
except tkinter.TclError as e:
if not APPLICATION_GONE in e.args[0]:
raise
_multicall_dict[widget] = MultiCall
return MultiCall
def _multi_call(parent):
root = tkinter.Tk()
root.title("Test MultiCall")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
text = MultiCallCreator(tkinter.Text)(root)
text.pack()
def bindseq(seq, n=[0]):
def handler(event):
print(seq)
text.bind("<<handler%d>>"%n[0], handler)
text.event_add("<<handler%d>>"%n[0], seq)
n[0] += 1
bindseq("<Key>")
bindseq("<Control-Key>")
bindseq("<Alt-Key-a>")
bindseq("<Control-Key-a>")
bindseq("<Alt-Control-Key-a>")
bindseq("<Key-b>")
bindseq("<Control-Button-1>")
bindseq("<Button-2>")
bindseq("<Alt-Button-1>")
bindseq("<FocusOut>")
bindseq("<Enter>")
bindseq("<Leave>")
root.mainloop()
if __name__ == "__main__":
from idlelib.idle_test.htest import run
run(_multi_call)
=======
"""
MultiCall - a class which inherits its methods from a Tkinter widget (Text, for
example), but enables multiple calls of functions per virtual event - all
matching events will be called, not only the most specific one. This is done
by wrapping the event functions - event_add, event_delete and event_info.
MultiCall recognizes only a subset of legal event sequences. Sequences which
are not recognized are treated by the original Tk handling mechanism. A
more-specific event will be called before a less-specific event.
The recognized sequences are complete one-event sequences (no emacs-style
Ctrl-X Ctrl-C, no shortcuts like <3>), for all types of events.
Key/Button Press/Release events can have modifiers.
The recognized modifiers are Shift, Control, Option and Command for Mac, and
Control, Alt, Shift, Meta/M for other platforms.
For all events which were handled by MultiCall, a new member is added to the
event instance passed to the binded functions - mc_type. This is one of the
event type constants defined in this module (such as MC_KEYPRESS).
For Key/Button events (which are handled by MultiCall and may receive
modifiers), another member is added - mc_state. This member gives the state
of the recognized modifiers, as a combination of the modifier constants
also defined in this module (for example, MC_SHIFT).
Using these members is absolutely portable.
The order by which events are called is defined by these rules:
1. A more-specific event will be called before a less-specific event.
2. A recently-binded event will be called before a previously-binded event,
unless this conflicts with the first rule.
Each function will be called at most once for each event.
"""
import sys
import re
import tkinter
# the event type constants, which define the meaning of mc_type
MC_KEYPRESS=0; MC_KEYRELEASE=1; MC_BUTTONPRESS=2; MC_BUTTONRELEASE=3;
MC_ACTIVATE=4; MC_CIRCULATE=5; MC_COLORMAP=6; MC_CONFIGURE=7;
MC_DEACTIVATE=8; MC_DESTROY=9; MC_ENTER=10; MC_EXPOSE=11; MC_FOCUSIN=12;
MC_FOCUSOUT=13; MC_GRAVITY=14; MC_LEAVE=15; MC_MAP=16; MC_MOTION=17;
MC_MOUSEWHEEL=18; MC_PROPERTY=19; MC_REPARENT=20; MC_UNMAP=21; MC_VISIBILITY=22;
# the modifier state constants, which define the meaning of mc_state
MC_SHIFT = 1<<0; MC_CONTROL = 1<<2; MC_ALT = 1<<3; MC_META = 1<<5
MC_OPTION = 1<<6; MC_COMMAND = 1<<7
# define the list of modifiers, to be used in complex event types.
if sys.platform == "darwin":
_modifiers = (("Shift",), ("Control",), ("Option",), ("Command",))
_modifier_masks = (MC_SHIFT, MC_CONTROL, MC_OPTION, MC_COMMAND)
else:
_modifiers = (("Control",), ("Alt",), ("Shift",), ("Meta", "M"))
_modifier_masks = (MC_CONTROL, MC_ALT, MC_SHIFT, MC_META)
# a dictionary to map a modifier name into its number
_modifier_names = dict([(name, number)
for number in range(len(_modifiers))
for name in _modifiers[number]])
# In 3.4, if no shell window is ever open, the underlying Tk widget is
# destroyed before .__del__ methods here are called. The following
# is used to selectively ignore shutdown exceptions to avoid
# 'Exception ignored' messages. See http://bugs.python.org/issue20167
APPLICATION_GONE = "application has been destroyed"
# A binder is a class which binds functions to one type of event. It has two
# methods: bind and unbind, which get a function and a parsed sequence, as
# returned by _parse_sequence(). There are two types of binders:
# _SimpleBinder handles event types with no modifiers and no detail.
# No Python functions are called when no events are binded.
# _ComplexBinder handles event types with modifiers and a detail.
# A Python function is called each time an event is generated.
class _SimpleBinder:
def __init__(self, type, widget, widgetinst):
self.type = type
self.sequence = '<'+_types[type][0]+'>'
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = []
self.handlerid = None
def bind(self, triplet, func):
if not self.handlerid:
def handler(event, l = self.bindedfuncs, mc_type = self.type):
event.mc_type = mc_type
wascalled = {}
for i in range(len(l)-1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = func(event)
if r:
return r
self.handlerid = self.widget.bind(self.widgetinst,
self.sequence, handler)
self.bindedfuncs.append(func)
def unbind(self, triplet, func):
self.bindedfuncs.remove(func)
if not self.bindedfuncs:
self.widget.unbind(self.widgetinst, self.sequence, self.handlerid)
self.handlerid = None
def __del__(self):
if self.handlerid:
try:
self.widget.unbind(self.widgetinst, self.sequence,
self.handlerid)
except tkinter.TclError as e:
if not APPLICATION_GONE in e.args[0]:
raise
# An int in range(1 << len(_modifiers)) represents a combination of modifiers
# (if the least significent bit is on, _modifiers[0] is on, and so on).
# _state_subsets gives for each combination of modifiers, or *state*,
# a list of the states which are a subset of it. This list is ordered by the
# number of modifiers is the state - the most specific state comes first.
_states = range(1 << len(_modifiers))
_state_names = [''.join(m[0]+'-'
for i, m in enumerate(_modifiers)
if (1 << i) & s)
for s in _states]
def expand_substates(states):
'''For each item of states return a list containing all combinations of
that item with individual bits reset, sorted by the number of set bits.
'''
def nbits(n):
"number of bits set in n base 2"
nb = 0
while n:
n, rem = divmod(n, 2)
nb += rem
return nb
statelist = []
for state in states:
substates = list(set(state & x for x in states))
substates.sort(key=nbits, reverse=True)
statelist.append(substates)
return statelist
_state_subsets = expand_substates(_states)
# _state_codes gives for each state, the portable code to be passed as mc_state
_state_codes = []
for s in _states:
r = 0
for i in range(len(_modifiers)):
if (1 << i) & s:
r |= _modifier_masks[i]
_state_codes.append(r)
class _ComplexBinder:
# This class binds many functions, and only unbinds them when it is deleted.
# self.handlerids is the list of seqs and ids of binded handler functions.
# The binded functions sit in a dictionary of lists of lists, which maps
# a detail (or None) and a state into a list of functions.
# When a new detail is discovered, handlers for all the possible states
# are binded.
def __create_handler(self, lists, mc_type, mc_state):
def handler(event, lists = lists,
mc_type = mc_type, mc_state = mc_state,
ishandlerrunning = self.ishandlerrunning,
doafterhandler = self.doafterhandler):
ishandlerrunning[:] = [True]
event.mc_type = mc_type
event.mc_state = mc_state
wascalled = {}
r = None
for l in lists:
for i in range(len(l)-1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = l[i](event)
if r:
break
if r:
break
ishandlerrunning[:] = []
# Call all functions in doafterhandler and remove them from list
for f in doafterhandler:
f()
doafterhandler[:] = []
if r:
return r
return handler
def __init__(self, type, widget, widgetinst):
self.type = type
self.typename = _types[type][0]
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = {None: [[] for s in _states]}
self.handlerids = []
# we don't want to change the lists of functions while a handler is
# running - it will mess up the loop and anyway, we usually want the
# change to happen from the next event. So we have a list of functions
# for the handler to run after it finishes calling the binded functions.
# It calls them only once.
# ishandlerrunning is a list. An empty one means no, otherwise - yes.
# this is done so that it would be mutable.
self.ishandlerrunning = []
self.doafterhandler = []
for s in _states:
lists = [self.bindedfuncs[None][i] for i in _state_subsets[s]]
handler = self.__create_handler(lists, type, _state_codes[s])
seq = '<'+_state_names[s]+self.typename+'>'
self.handlerids.append((seq, self.widget.bind(self.widgetinst,
seq, handler)))
def bind(self, triplet, func):
if triplet[2] not in self.bindedfuncs:
self.bindedfuncs[triplet[2]] = [[] for s in _states]
for s in _states:
lists = [ self.bindedfuncs[detail][i]
for detail in (triplet[2], None)
for i in _state_subsets[s] ]
handler = self.__create_handler(lists, self.type,
_state_codes[s])
seq = "<%s%s-%s>"% (_state_names[s], self.typename, triplet[2])
self.handlerids.append((seq, self.widget.bind(self.widgetinst,
seq, handler)))
doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].append(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
def unbind(self, triplet, func):
doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].remove(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
def __del__(self):
for seq, id in self.handlerids:
try:
self.widget.unbind(self.widgetinst, seq, id)
except tkinter.TclError as e:
if not APPLICATION_GONE in e.args[0]:
raise
# define the list of event types to be handled by MultiEvent. the order is
# compatible with the definition of event type constants.
_types = (
("KeyPress", "Key"), ("KeyRelease",), ("ButtonPress", "Button"),
("ButtonRelease",), ("Activate",), ("Circulate",), ("Colormap",),
("Configure",), ("Deactivate",), ("Destroy",), ("Enter",), ("Expose",),
("FocusIn",), ("FocusOut",), ("Gravity",), ("Leave",), ("Map",),
("Motion",), ("MouseWheel",), ("Property",), ("Reparent",), ("Unmap",),
("Visibility",),
)
# which binder should be used for every event type?
_binder_classes = (_ComplexBinder,) * 4 + (_SimpleBinder,) * (len(_types)-4)
# A dictionary to map a type name into its number
_type_names = dict([(name, number)
for number in range(len(_types))
for name in _types[number]])
_keysym_re = re.compile(r"^\w+$")
_button_re = re.compile(r"^[1-5]$")
def _parse_sequence(sequence):
"""Get a string which should describe an event sequence. If it is
successfully parsed as one, return a tuple containing the state (as an int),
the event type (as an index of _types), and the detail - None if none, or a
string if there is one. If the parsing is unsuccessful, return None.
"""
if not sequence or sequence[0] != '<' or sequence[-1] != '>':
return None
words = sequence[1:-1].split('-')
modifiers = 0
while words and words[0] in _modifier_names:
modifiers |= 1 << _modifier_names[words[0]]
del words[0]
if words and words[0] in _type_names:
type = _type_names[words[0]]
del words[0]
else:
return None
if _binder_classes[type] is _SimpleBinder:
if modifiers or words:
return None
else:
detail = None
else:
# _ComplexBinder
if type in [_type_names[s] for s in ("KeyPress", "KeyRelease")]:
type_re = _keysym_re
else:
type_re = _button_re
if not words:
detail = None
elif len(words) == 1 and type_re.match(words[0]):
detail = words[0]
else:
return None
return modifiers, type, detail
def _triplet_to_sequence(triplet):
if triplet[2]:
return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'-'+ \
triplet[2]+'>'
else:
return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'>'
_multicall_dict = {}
def MultiCallCreator(widget):
"""Return a MultiCall class which inherits its methods from the
given widget class (for example, Tkinter.Text). This is used
instead of a templating mechanism.
"""
if widget in _multicall_dict:
return _multicall_dict[widget]
class MultiCall (widget):
assert issubclass(widget, tkinter.Misc)
def __init__(self, *args, **kwargs):
widget.__init__(self, *args, **kwargs)
# a dictionary which maps a virtual event to a tuple with:
# 0. the function binded
# 1. a list of triplets - the sequences it is binded to
self.__eventinfo = {}
self.__binders = [_binder_classes[i](i, widget, self)
for i in range(len(_types))]
def bind(self, sequence=None, func=None, add=None):
#print("bind(%s, %s, %s)" % (sequence, func, add),
# file=sys.__stderr__)
if type(sequence) is str and len(sequence) > 2 and \
sequence[:2] == "<<" and sequence[-2:] == ">>":
if sequence in self.__eventinfo:
ei = self.__eventinfo[sequence]
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].unbind(triplet, ei[0])
ei[0] = func
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].bind(triplet, func)
else:
self.__eventinfo[sequence] = [func, []]
return widget.bind(self, sequence, func, add)
def unbind(self, sequence, funcid=None):
if type(sequence) is str and len(sequence) > 2 and \
sequence[:2] == "<<" and sequence[-2:] == ">>" and \
sequence in self.__eventinfo:
func, triplets = self.__eventinfo[sequence]
if func is not None:
for triplet in triplets:
self.__binders[triplet[1]].unbind(triplet, func)
self.__eventinfo[sequence][0] = None
return widget.unbind(self, sequence, funcid)
def event_add(self, virtual, *sequences):
#print("event_add(%s, %s)" % (repr(virtual), repr(sequences)),
# file=sys.__stderr__)
if virtual not in self.__eventinfo:
self.__eventinfo[virtual] = [None, []]
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
#print("Tkinter event_add(%s)" % seq, file=sys.__stderr__)
widget.event_add(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].bind(triplet, func)
triplets.append(triplet)
def event_delete(self, virtual, *sequences):
if virtual not in self.__eventinfo:
return
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
#print("Tkinter event_delete: %s" % seq, file=sys.__stderr__)
widget.event_delete(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].unbind(triplet, func)
triplets.remove(triplet)
def event_info(self, virtual=None):
if virtual is None or virtual not in self.__eventinfo:
return widget.event_info(self, virtual)
else:
return tuple(map(_triplet_to_sequence,
self.__eventinfo[virtual][1])) + \
widget.event_info(self, virtual)
def __del__(self):
for virtual in self.__eventinfo:
func, triplets = self.__eventinfo[virtual]
if func:
for triplet in triplets:
try:
self.__binders[triplet[1]].unbind(triplet, func)
except tkinter.TclError as e:
if not APPLICATION_GONE in e.args[0]:
raise
_multicall_dict[widget] = MultiCall
return MultiCall
def _multi_call(parent):
root = tkinter.Tk()
root.title("Test MultiCall")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
text = MultiCallCreator(tkinter.Text)(root)
text.pack()
def bindseq(seq, n=[0]):
def handler(event):
print(seq)
text.bind("<<handler%d>>"%n[0], handler)
text.event_add("<<handler%d>>"%n[0], seq)
n[0] += 1
bindseq("<Key>")
bindseq("<Control-Key>")
bindseq("<Alt-Key-a>")
bindseq("<Control-Key-a>")
bindseq("<Alt-Control-Key-a>")
bindseq("<Key-b>")
bindseq("<Control-Button-1>")
bindseq("<Button-2>")
bindseq("<Alt-Button-1>")
bindseq("<FocusOut>")
bindseq("<Enter>")
bindseq("<Leave>")
root.mainloop()
if __name__ == "__main__":
from idlelib.idle_test.htest import run
run(_multi_call)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""
MultiCall - a class which inherits its methods from a Tkinter widget (Text, for
example), but enables multiple calls of functions per virtual event - all
matching events will be called, not only the most specific one. This is done
by wrapping the event functions - event_add, event_delete and event_info.
MultiCall recognizes only a subset of legal event sequences. Sequences which
are not recognized are treated by the original Tk handling mechanism. A
more-specific event will be called before a less-specific event.
The recognized sequences are complete one-event sequences (no emacs-style
Ctrl-X Ctrl-C, no shortcuts like <3>), for all types of events.
Key/Button Press/Release events can have modifiers.
The recognized modifiers are Shift, Control, Option and Command for Mac, and
Control, Alt, Shift, Meta/M for other platforms.
For all events which were handled by MultiCall, a new member is added to the
event instance passed to the binded functions - mc_type. This is one of the
event type constants defined in this module (such as MC_KEYPRESS).
For Key/Button events (which are handled by MultiCall and may receive
modifiers), another member is added - mc_state. This member gives the state
of the recognized modifiers, as a combination of the modifier constants
also defined in this module (for example, MC_SHIFT).
Using these members is absolutely portable.
The order by which events are called is defined by these rules:
1. A more-specific event will be called before a less-specific event.
2. A recently-binded event will be called before a previously-binded event,
unless this conflicts with the first rule.
Each function will be called at most once for each event.
"""
import sys
import re
import tkinter
# the event type constants, which define the meaning of mc_type
MC_KEYPRESS=0; MC_KEYRELEASE=1; MC_BUTTONPRESS=2; MC_BUTTONRELEASE=3;
MC_ACTIVATE=4; MC_CIRCULATE=5; MC_COLORMAP=6; MC_CONFIGURE=7;
MC_DEACTIVATE=8; MC_DESTROY=9; MC_ENTER=10; MC_EXPOSE=11; MC_FOCUSIN=12;
MC_FOCUSOUT=13; MC_GRAVITY=14; MC_LEAVE=15; MC_MAP=16; MC_MOTION=17;
MC_MOUSEWHEEL=18; MC_PROPERTY=19; MC_REPARENT=20; MC_UNMAP=21; MC_VISIBILITY=22;
# the modifier state constants, which define the meaning of mc_state
MC_SHIFT = 1<<0; MC_CONTROL = 1<<2; MC_ALT = 1<<3; MC_META = 1<<5
MC_OPTION = 1<<6; MC_COMMAND = 1<<7
# define the list of modifiers, to be used in complex event types.
if sys.platform == "darwin":
_modifiers = (("Shift",), ("Control",), ("Option",), ("Command",))
_modifier_masks = (MC_SHIFT, MC_CONTROL, MC_OPTION, MC_COMMAND)
else:
_modifiers = (("Control",), ("Alt",), ("Shift",), ("Meta", "M"))
_modifier_masks = (MC_CONTROL, MC_ALT, MC_SHIFT, MC_META)
# a dictionary to map a modifier name into its number
_modifier_names = dict([(name, number)
for number in range(len(_modifiers))
for name in _modifiers[number]])
# In 3.4, if no shell window is ever open, the underlying Tk widget is
# destroyed before .__del__ methods here are called. The following
# is used to selectively ignore shutdown exceptions to avoid
# 'Exception ignored' messages. See http://bugs.python.org/issue20167
APPLICATION_GONE = "application has been destroyed"
# A binder is a class which binds functions to one type of event. It has two
# methods: bind and unbind, which get a function and a parsed sequence, as
# returned by _parse_sequence(). There are two types of binders:
# _SimpleBinder handles event types with no modifiers and no detail.
# No Python functions are called when no events are binded.
# _ComplexBinder handles event types with modifiers and a detail.
# A Python function is called each time an event is generated.
class _SimpleBinder:
def __init__(self, type, widget, widgetinst):
self.type = type
self.sequence = '<'+_types[type][0]+'>'
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = []
self.handlerid = None
def bind(self, triplet, func):
if not self.handlerid:
def handler(event, l = self.bindedfuncs, mc_type = self.type):
event.mc_type = mc_type
wascalled = {}
for i in range(len(l)-1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = func(event)
if r:
return r
self.handlerid = self.widget.bind(self.widgetinst,
self.sequence, handler)
self.bindedfuncs.append(func)
def unbind(self, triplet, func):
self.bindedfuncs.remove(func)
if not self.bindedfuncs:
self.widget.unbind(self.widgetinst, self.sequence, self.handlerid)
self.handlerid = None
def __del__(self):
if self.handlerid:
try:
self.widget.unbind(self.widgetinst, self.sequence,
self.handlerid)
except tkinter.TclError as e:
if not APPLICATION_GONE in e.args[0]:
raise
# An int in range(1 << len(_modifiers)) represents a combination of modifiers
# (if the least significent bit is on, _modifiers[0] is on, and so on).
# _state_subsets gives for each combination of modifiers, or *state*,
# a list of the states which are a subset of it. This list is ordered by the
# number of modifiers is the state - the most specific state comes first.
_states = range(1 << len(_modifiers))
_state_names = [''.join(m[0]+'-'
for i, m in enumerate(_modifiers)
if (1 << i) & s)
for s in _states]
def expand_substates(states):
'''For each item of states return a list containing all combinations of
that item with individual bits reset, sorted by the number of set bits.
'''
def nbits(n):
"number of bits set in n base 2"
nb = 0
while n:
n, rem = divmod(n, 2)
nb += rem
return nb
statelist = []
for state in states:
substates = list(set(state & x for x in states))
substates.sort(key=nbits, reverse=True)
statelist.append(substates)
return statelist
_state_subsets = expand_substates(_states)
# _state_codes gives for each state, the portable code to be passed as mc_state
_state_codes = []
for s in _states:
r = 0
for i in range(len(_modifiers)):
if (1 << i) & s:
r |= _modifier_masks[i]
_state_codes.append(r)
class _ComplexBinder:
# This class binds many functions, and only unbinds them when it is deleted.
# self.handlerids is the list of seqs and ids of binded handler functions.
# The binded functions sit in a dictionary of lists of lists, which maps
# a detail (or None) and a state into a list of functions.
# When a new detail is discovered, handlers for all the possible states
# are binded.
def __create_handler(self, lists, mc_type, mc_state):
def handler(event, lists = lists,
mc_type = mc_type, mc_state = mc_state,
ishandlerrunning = self.ishandlerrunning,
doafterhandler = self.doafterhandler):
ishandlerrunning[:] = [True]
event.mc_type = mc_type
event.mc_state = mc_state
wascalled = {}
r = None
for l in lists:
for i in range(len(l)-1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = l[i](event)
if r:
break
if r:
break
ishandlerrunning[:] = []
# Call all functions in doafterhandler and remove them from list
for f in doafterhandler:
f()
doafterhandler[:] = []
if r:
return r
return handler
def __init__(self, type, widget, widgetinst):
self.type = type
self.typename = _types[type][0]
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = {None: [[] for s in _states]}
self.handlerids = []
# we don't want to change the lists of functions while a handler is
# running - it will mess up the loop and anyway, we usually want the
# change to happen from the next event. So we have a list of functions
# for the handler to run after it finishes calling the binded functions.
# It calls them only once.
# ishandlerrunning is a list. An empty one means no, otherwise - yes.
# this is done so that it would be mutable.
self.ishandlerrunning = []
self.doafterhandler = []
for s in _states:
lists = [self.bindedfuncs[None][i] for i in _state_subsets[s]]
handler = self.__create_handler(lists, type, _state_codes[s])
seq = '<'+_state_names[s]+self.typename+'>'
self.handlerids.append((seq, self.widget.bind(self.widgetinst,
seq, handler)))
def bind(self, triplet, func):
if triplet[2] not in self.bindedfuncs:
self.bindedfuncs[triplet[2]] = [[] for s in _states]
for s in _states:
lists = [ self.bindedfuncs[detail][i]
for detail in (triplet[2], None)
for i in _state_subsets[s] ]
handler = self.__create_handler(lists, self.type,
_state_codes[s])
seq = "<%s%s-%s>"% (_state_names[s], self.typename, triplet[2])
self.handlerids.append((seq, self.widget.bind(self.widgetinst,
seq, handler)))
doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].append(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
def unbind(self, triplet, func):
doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].remove(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
def __del__(self):
for seq, id in self.handlerids:
try:
self.widget.unbind(self.widgetinst, seq, id)
except tkinter.TclError as e:
if not APPLICATION_GONE in e.args[0]:
raise
# define the list of event types to be handled by MultiEvent. the order is
# compatible with the definition of event type constants.
_types = (
("KeyPress", "Key"), ("KeyRelease",), ("ButtonPress", "Button"),
("ButtonRelease",), ("Activate",), ("Circulate",), ("Colormap",),
("Configure",), ("Deactivate",), ("Destroy",), ("Enter",), ("Expose",),
("FocusIn",), ("FocusOut",), ("Gravity",), ("Leave",), ("Map",),
("Motion",), ("MouseWheel",), ("Property",), ("Reparent",), ("Unmap",),
("Visibility",),
)
# which binder should be used for every event type?
_binder_classes = (_ComplexBinder,) * 4 + (_SimpleBinder,) * (len(_types)-4)
# A dictionary to map a type name into its number
_type_names = dict([(name, number)
for number in range(len(_types))
for name in _types[number]])
_keysym_re = re.compile(r"^\w+$")
_button_re = re.compile(r"^[1-5]$")
def _parse_sequence(sequence):
"""Get a string which should describe an event sequence. If it is
successfully parsed as one, return a tuple containing the state (as an int),
the event type (as an index of _types), and the detail - None if none, or a
string if there is one. If the parsing is unsuccessful, return None.
"""
if not sequence or sequence[0] != '<' or sequence[-1] != '>':
return None
words = sequence[1:-1].split('-')
modifiers = 0
while words and words[0] in _modifier_names:
modifiers |= 1 << _modifier_names[words[0]]
del words[0]
if words and words[0] in _type_names:
type = _type_names[words[0]]
del words[0]
else:
return None
if _binder_classes[type] is _SimpleBinder:
if modifiers or words:
return None
else:
detail = None
else:
# _ComplexBinder
if type in [_type_names[s] for s in ("KeyPress", "KeyRelease")]:
type_re = _keysym_re
else:
type_re = _button_re
if not words:
detail = None
elif len(words) == 1 and type_re.match(words[0]):
detail = words[0]
else:
return None
return modifiers, type, detail
def _triplet_to_sequence(triplet):
if triplet[2]:
return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'-'+ \
triplet[2]+'>'
else:
return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'>'
_multicall_dict = {}
def MultiCallCreator(widget):
"""Return a MultiCall class which inherits its methods from the
given widget class (for example, Tkinter.Text). This is used
instead of a templating mechanism.
"""
if widget in _multicall_dict:
return _multicall_dict[widget]
class MultiCall (widget):
assert issubclass(widget, tkinter.Misc)
def __init__(self, *args, **kwargs):
widget.__init__(self, *args, **kwargs)
# a dictionary which maps a virtual event to a tuple with:
# 0. the function binded
# 1. a list of triplets - the sequences it is binded to
self.__eventinfo = {}
self.__binders = [_binder_classes[i](i, widget, self)
for i in range(len(_types))]
def bind(self, sequence=None, func=None, add=None):
#print("bind(%s, %s, %s)" % (sequence, func, add),
# file=sys.__stderr__)
if type(sequence) is str and len(sequence) > 2 and \
sequence[:2] == "<<" and sequence[-2:] == ">>":
if sequence in self.__eventinfo:
ei = self.__eventinfo[sequence]
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].unbind(triplet, ei[0])
ei[0] = func
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].bind(triplet, func)
else:
self.__eventinfo[sequence] = [func, []]
return widget.bind(self, sequence, func, add)
def unbind(self, sequence, funcid=None):
if type(sequence) is str and len(sequence) > 2 and \
sequence[:2] == "<<" and sequence[-2:] == ">>" and \
sequence in self.__eventinfo:
func, triplets = self.__eventinfo[sequence]
if func is not None:
for triplet in triplets:
self.__binders[triplet[1]].unbind(triplet, func)
self.__eventinfo[sequence][0] = None
return widget.unbind(self, sequence, funcid)
def event_add(self, virtual, *sequences):
#print("event_add(%s, %s)" % (repr(virtual), repr(sequences)),
# file=sys.__stderr__)
if virtual not in self.__eventinfo:
self.__eventinfo[virtual] = [None, []]
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
#print("Tkinter event_add(%s)" % seq, file=sys.__stderr__)
widget.event_add(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].bind(triplet, func)
triplets.append(triplet)
def event_delete(self, virtual, *sequences):
if virtual not in self.__eventinfo:
return
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
#print("Tkinter event_delete: %s" % seq, file=sys.__stderr__)
widget.event_delete(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].unbind(triplet, func)
triplets.remove(triplet)
def event_info(self, virtual=None):
if virtual is None or virtual not in self.__eventinfo:
return widget.event_info(self, virtual)
else:
return tuple(map(_triplet_to_sequence,
self.__eventinfo[virtual][1])) + \
widget.event_info(self, virtual)
def __del__(self):
for virtual in self.__eventinfo:
func, triplets = self.__eventinfo[virtual]
if func:
for triplet in triplets:
try:
self.__binders[triplet[1]].unbind(triplet, func)
except tkinter.TclError as e:
if not APPLICATION_GONE in e.args[0]:
raise
_multicall_dict[widget] = MultiCall
return MultiCall
def _multi_call(parent):
root = tkinter.Tk()
root.title("Test MultiCall")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
text = MultiCallCreator(tkinter.Text)(root)
text.pack()
def bindseq(seq, n=[0]):
def handler(event):
print(seq)
text.bind("<<handler%d>>"%n[0], handler)
text.event_add("<<handler%d>>"%n[0], seq)
n[0] += 1
bindseq("<Key>")
bindseq("<Control-Key>")
bindseq("<Alt-Key-a>")
bindseq("<Control-Key-a>")
bindseq("<Alt-Control-Key-a>")
bindseq("<Key-b>")
bindseq("<Control-Button-1>")
bindseq("<Button-2>")
bindseq("<Alt-Button-1>")
bindseq("<FocusOut>")
bindseq("<Enter>")
bindseq("<Leave>")
root.mainloop()
if __name__ == "__main__":
from idlelib.idle_test.htest import run
run(_multi_call)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit |
atosorigin/ansible | test/units/modules/test_known_hosts.py | 35 | 4373 | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import tempfile
from ansible.module_utils import basic
from units.compat import unittest
from ansible.module_utils._text import to_bytes
from ansible.module_utils.basic import AnsibleModule
from ansible.modules.known_hosts import compute_diff, sanity_check
class KnownHostsDiffTestCase(unittest.TestCase):
def _create_file(self, content):
tmp_file = tempfile.NamedTemporaryFile(prefix='ansible-test-', suffix='-known_hosts', delete=False)
tmp_file.write(to_bytes(content))
tmp_file.close()
self.addCleanup(os.unlink, tmp_file.name)
return tmp_file.name
def test_no_existing_file(self):
path = "/tmp/this_file_does_not_exists_known_hosts"
key = 'example.com ssh-rsa AAAAetc\n'
diff = compute_diff(path, found_line=None, replace_or_add=False, state='present', key=key)
self.assertEqual(diff, {
'before_header': '/dev/null',
'after_header': path,
'before': '',
'after': 'example.com ssh-rsa AAAAetc\n',
})
def test_key_addition(self):
path = self._create_file(
'two.example.com ssh-rsa BBBBetc\n'
)
key = 'one.example.com ssh-rsa AAAAetc\n'
diff = compute_diff(path, found_line=None, replace_or_add=False, state='present', key=key)
self.assertEqual(diff, {
'before_header': path,
'after_header': path,
'before': 'two.example.com ssh-rsa BBBBetc\n',
'after': 'two.example.com ssh-rsa BBBBetc\none.example.com ssh-rsa AAAAetc\n',
})
def test_no_change(self):
path = self._create_file(
'one.example.com ssh-rsa AAAAetc\n'
'two.example.com ssh-rsa BBBBetc\n'
)
key = 'one.example.com ssh-rsa AAAAetc\n'
diff = compute_diff(path, found_line=1, replace_or_add=False, state='present', key=key)
self.assertEqual(diff, {
'before_header': path,
'after_header': path,
'before': 'one.example.com ssh-rsa AAAAetc\ntwo.example.com ssh-rsa BBBBetc\n',
'after': 'one.example.com ssh-rsa AAAAetc\ntwo.example.com ssh-rsa BBBBetc\n',
})
def test_key_change(self):
path = self._create_file(
'one.example.com ssh-rsa AAAaetc\n'
'two.example.com ssh-rsa BBBBetc\n'
)
key = 'one.example.com ssh-rsa AAAAetc\n'
diff = compute_diff(path, found_line=1, replace_or_add=True, state='present', key=key)
self.assertEqual(diff, {
'before_header': path,
'after_header': path,
'before': 'one.example.com ssh-rsa AAAaetc\ntwo.example.com ssh-rsa BBBBetc\n',
'after': 'two.example.com ssh-rsa BBBBetc\none.example.com ssh-rsa AAAAetc\n',
})
def test_key_removal(self):
path = self._create_file(
'one.example.com ssh-rsa AAAAetc\n'
'two.example.com ssh-rsa BBBBetc\n'
)
key = 'one.example.com ssh-rsa AAAAetc\n'
diff = compute_diff(path, found_line=1, replace_or_add=False, state='absent', key=key)
self.assertEqual(diff, {
'before_header': path,
'after_header': path,
'before': 'one.example.com ssh-rsa AAAAetc\ntwo.example.com ssh-rsa BBBBetc\n',
'after': 'two.example.com ssh-rsa BBBBetc\n',
})
def test_key_removal_no_change(self):
path = self._create_file(
'two.example.com ssh-rsa BBBBetc\n'
)
key = 'one.example.com ssh-rsa AAAAetc\n'
diff = compute_diff(path, found_line=None, replace_or_add=False, state='absent', key=key)
self.assertEqual(diff, {
'before_header': path,
'after_header': path,
'before': 'two.example.com ssh-rsa BBBBetc\n',
'after': 'two.example.com ssh-rsa BBBBetc\n',
})
def test_sanity_check(self):
basic._load_params = lambda: {}
# Module used internally to execute ssh-keygen system executable
module = AnsibleModule(argument_spec={})
host = '10.0.0.1'
key = '%s ssh-rsa ASDF foo@bar' % (host,)
keygen = module.get_bin_path('ssh-keygen')
sanity_check(module, host, key, keygen)
| gpl-3.0 |
amyvmiwei/chromium | tools/grit/grit/grd_reader.py | 3 | 4219 | #!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Class for reading GRD files into memory, without processing them.
'''
import os.path
import types
import xml.sax
import xml.sax.handler
from grit import exception
from grit.node import base
from grit.node import mapping
from grit import util
class StopParsingException(Exception):
'''An exception used to stop parsing.'''
pass
class GrdContentHandler(xml.sax.handler.ContentHandler):
def __init__(self, stop_after=None, debug=False):
# Invariant of data:
# 'root' is the root of the parse tree being created, or None if we haven't
# parsed out any elements.
# 'stack' is the a stack of elements that we push new nodes onto and
# pop from when they finish parsing, or [] if we are not currently parsing.
# 'stack[-1]' is the top of the stack.
self.root = None
self.stack = []
self.stop_after = stop_after
self.debug = debug
def startElement(self, name, attrs):
assert not self.root or len(self.stack) > 0
if self.debug:
attr_list = []
for attr in attrs.getNames():
attr_list.append('%s="%s"' % (attr, attrs.getValue(attr)))
if len(attr_list) == 0: attr_list = ['(none)']
attr_list = ' '.join(attr_list)
print "Starting parsing of element %s with attributes %r" % (name, attr_list)
typeattr = None
if 'type' in attrs.getNames():
typeattr = attrs.getValue('type')
node = mapping.ElementToClass(name, typeattr)()
if not self.root:
self.root = node
if len(self.stack) > 0:
self.stack[-1].AddChild(node)
node.StartParsing(name, self.stack[-1])
else:
node.StartParsing(name, None)
# Push
self.stack.append(node)
for attr in attrs.getNames():
node.HandleAttribute(attr, attrs.getValue(attr))
def endElement(self, name):
if self.debug:
print "End parsing of element %s" % name
# Pop
self.stack[-1].EndParsing()
assert len(self.stack) > 0
self.stack = self.stack[:-1]
if self.stop_after and name == self.stop_after:
raise StopParsingException()
def characters(self, content):
if self.stack[-1]:
self.stack[-1].AppendContent(content)
def ignorableWhitespace(self, whitespace):
# TODO(joi) This is not supported by expat. Should use a different XML parser?
pass
def Parse(filename_or_stream, dir = None, flexible_root = False,
stop_after=None, debug=False):
'''Parses a GRD file into a tree of nodes (from grit.node).
If flexible_root is False, the root node must be a <grit> element. Otherwise
it can be any element. The "own" directory of the file will only be fixed up
if the root node is a <grit> element.
'dir' should point to the directory of the input file, or be the full path
to the input file (the filename will be stripped).
If 'stop_after' is provided, the parsing will stop once the first node
with this name has been fully parsed (including all its contents).
If 'debug' is true, lots of information about the parsing events will be
printed out during parsing of the file.
Args:
filename_or_stream: './bla.xml' (must be filename if dir is None)
dir: '.' or None (only if filename_or_stream is a filename)
flexible_root: True | False
stop_after: 'inputs'
debug: False
Return:
Subclass of grit.node.base.Node
Throws:
grit.exception.Parsing
'''
handler = GrdContentHandler(stop_after=stop_after, debug=debug)
try:
xml.sax.parse(filename_or_stream, handler)
except StopParsingException:
assert stop_after
pass
except:
raise
if not flexible_root or hasattr(handler.root, 'SetOwnDir'):
assert isinstance(filename_or_stream, types.StringType) or dir != None
if not dir:
dir = util.dirname(filename_or_stream)
if len(dir) == 0:
dir = '.'
# Fix up the base_dir so it is relative to the input file.
handler.root.SetOwnDir(dir)
return handler.root
if __name__ == '__main__':
util.ChangeStdoutEncoding()
print unicode(Parse(sys.argv[1]))
| bsd-3-clause |
moritzschaefer/the-search-engine | tsg/indexer/page_rank.py | 1 | 4665 | import re
from collections import OrderedDict
from lxml import etree
import os
from os.path import splitext
from tsg.config import RAW_DIR
import operator
import logging
def parse_link(doc_link):
'''
Define a function which take a link and obtain the doc_filename string
and the type of document: journal, conference or author.
'''
link_parts = list(re.search('([^/]*)/([^/]*)$', doc_link).groups())
if "#" in link_parts[1]:
link_parts[1] = link_parts[1].split("#")[0]
if "/pers/" in doc_link:
category = "author"
elif "/conf/" in doc_link:
category = "conference"
elif "/journals/" in doc_link:
category = "journal"
else:
category = "other"
doc_filename = '{}_{}_{}{}'.format(category,
link_parts[0],
link_parts[1],
'' if link_parts[1][-5:] == '.html'
else'.html')
return doc_filename
def get_page_outlinks(doc_path):
xpath_string = "//div[@class='data']//a/@href"
parser = etree.HTMLParser()
page_outlinks = []
page_outfiles = []
if os.path.exists(doc_path):
with open(doc_path) as doc_f:
tree = etree.parse(doc_f, parser)
page_outlinks = tree.xpath(xpath_string)
for link in page_outlinks:
page_outfiles.append(parse_link(link))
return page_outfiles
def build_link_database(html_files_path=RAW_DIR):
logging.info('Start building link database')
log_cnt = 0
doc_dict = {}
doc_outlinks = {}
for doc_filename in os.listdir(html_files_path):
log_cnt += 1
if log_cnt % 100000 == 0:
logging.info(
'Building Link database. Now at file {}'.format(log_cnt))
if doc_filename.endswith(".html"):
doc_path = html_files_path + doc_filename
doc_outlinks[doc_filename] = get_page_outlinks(doc_path)
for target_doc in doc_outlinks[doc_filename]:
try:
doc_dict[target_doc].append(doc_filename)
except KeyError:
doc_dict[target_doc] = [doc_filename]
try:
doc_dict[doc_filename]
except KeyError:
doc_dict[doc_filename] = []
# Unify lists and convert keys to uuid
doc_dict = {splitext(doc)[0]: [splitext(d)[0] for d in (set(doc_dict[doc]))]
for doc in doc_dict}
doc_outlinks = {splitext(doc)[0]: [splitext(d)[0] for d in doc_outlinks[doc]]
for doc in doc_outlinks}
# Sort alphabetically
ordered_db = OrderedDict(sorted(doc_dict.items(), key=lambda t: t[0]))
logging.info('Finished building link database')
return ordered_db, doc_outlinks
def calc_page_rank(html_files_path=RAW_DIR):
logging.info('Starting calc_page_rank')
d = 0.85 # Damping in PageRank Algorithm
threshold = 0.0001 # 1x 10^-3
iteration_flag = True # Keep page rank iteration until threshold is met
log_cnt = 0
docs_links_db, doc_outlinks = build_link_database(html_files_path)
pagerank_per_doc = {doc: 1.0 for doc in docs_links_db}
while iteration_flag:
log_cnt = 0
logging.info('Starting new iteration in calculating the page rank')
tmp_pagerank_per_doc = {}
for doc, doc_inlinks in docs_links_db.items():
tmp_pagerank_per_doc[doc] = (1 - d)
for inlink in doc_inlinks:
num_outlinks_per_inlink = 0
if inlink in doc_outlinks:
num_outlinks_per_inlink = len(doc_outlinks[inlink])
tmp_pagerank_per_doc[doc] += \
d * (pagerank_per_doc[inlink] /
num_outlinks_per_inlink)
else:
tmp_pagerank_per_doc[doc] = 0
log_cnt += 1
if log_cnt % 100000 == 0:
logging.info('at doc_link {}'.format(log_cnt))
logging.info('Now investigating stop condition for caculating the'
'page rank')
iteration_flag = False
for doc in tmp_pagerank_per_doc:
if (pagerank_per_doc[doc] - tmp_pagerank_per_doc[doc] > threshold):
iteration_flag = True
pagerank_per_doc = tmp_pagerank_per_doc
sorted_pagerank_per_docs = OrderedDict(sorted(pagerank_per_doc.items(),
key=operator.itemgetter(1, 0),
reverse=True))
return sorted_pagerank_per_docs
| mit |
ajayuranakar/django-blog | lib/python2.7/site-packages/django/db/backends/mysql/creation.py | 74 | 2231 | import subprocess
import sys
from django.db.backends.base.creation import BaseDatabaseCreation
from .client import DatabaseClient
class DatabaseCreation(BaseDatabaseCreation):
def sql_table_creation_suffix(self):
suffix = []
test_settings = self.connection.settings_dict['TEST']
if test_settings['CHARSET']:
suffix.append('CHARACTER SET %s' % test_settings['CHARSET'])
if test_settings['COLLATION']:
suffix.append('COLLATE %s' % test_settings['COLLATION'])
return ' '.join(suffix)
def _clone_test_db(self, number, verbosity, keepdb=False):
qn = self.connection.ops.quote_name
source_database_name = self.connection.settings_dict['NAME']
target_database_name = self.get_test_db_clone_settings(number)['NAME']
with self._nodb_connection.cursor() as cursor:
try:
cursor.execute("CREATE DATABASE %s" % qn(target_database_name))
except Exception as e:
if keepdb:
return
try:
if verbosity >= 1:
print("Destroying old test database for alias %s..." % (
self._get_database_display_str(target_database_name, verbosity),
))
cursor.execute("DROP DATABASE %s" % qn(target_database_name))
cursor.execute("CREATE DATABASE %s" % qn(target_database_name))
except Exception as e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
dump_cmd = DatabaseClient.settings_to_cmd_args(self.connection.settings_dict)
dump_cmd[0] = 'mysqldump'
dump_cmd[-1] = source_database_name
load_cmd = DatabaseClient.settings_to_cmd_args(self.connection.settings_dict)
load_cmd[-1] = target_database_name
dump_proc = subprocess.Popen(dump_cmd, stdout=subprocess.PIPE)
load_proc = subprocess.Popen(load_cmd, stdin=dump_proc.stdout, stdout=subprocess.PIPE)
dump_proc.stdout.close() # allow dump_proc to receive a SIGPIPE if load_proc exits.
load_proc.communicate()
| gpl-3.0 |
nimbusproject/pidantic | setup.py | 1 | 1649 | #!/usr/bin/env pythonv
# Copyright 2013 University of Chicago
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
VERSION = "0.1.3"
if float("%d.%d" % sys.version_info[:2]) < 2.5:
sys.stderr.write("Your Python version %d.%d.%d is not supported.\n" % sys.version_info[:3])
sys.stderr.write("pidantic requires Python 2.5 or newer.\n")
sys.exit(1)
setup(
name='pidantic',
version=VERSION,
description='An abstraction to process management for OOI',
author='Nimbus Development Team',
author_email='workspace-user@globus.org',
url='https://github.com/nimbusproject/pidantic',
download_url="http://www.nimbusproject.org/downloads/pidantic-%s.tar.gz" % VERSION,
packages=['pidantic', 'pidantic.fork', 'pidantic.supd', 'pidantic.pyon'],
keywords="OOI PID process fork supervisord ION",
long_description="""Some other time""",
license="Apache2",
install_requires=['supervisor', 'sqlalchemy==0.7.6', 'gevent'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: System :: Clustering',
'Topic :: System :: Distributed Computing',
],
)
| apache-2.0 |
yu-peng/english-pddl-translator | launchTkinterGUI.py | 1 | 38281 | '''
Created on Dec 6, 2012
@author: yupeng
'''
#from PIL import Image, ImageTk
from Tkinter import *
from ttk import *
from notebook import *
from parseSentence import *
from getElement import *
from getElementSubtree import *
from GraphPlanWrapper import *
class UhuraGUI(Frame):
def __init__(self, parent):
Frame.__init__(self, parent, background="azure2")
self.parent = parent
self.initUI()
def initUI(self):
self.parent.title("Uhura V0.2: An English -> RMPL/PDDL Translator by Peng Yu and Jonathan Trevino")
self.style = Style()
self.style.theme_use("default")
self.pack(fill=BOTH, expand=1)
w = 940
h = 1000
sw = self.parent.winfo_screenwidth()
sh = self.parent.winfo_screenheight()
x = (sw - w)/2
y = (sh - h)/2
self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))
#TranslatorFrame = Frame(self,width=980, height=880,borderwidth=1, background="azure2")
Tabs = notebook(self, TOP)
TranslatorFrame = Frame(Tabs(),width=930, height=980,borderwidth=1, background="azure2")
self.buildTranslatorFrame(TranslatorFrame)
RMPLTestFrame = Frame(Tabs(),width=930, height=980,borderwidth=1, background="azure2")
self.buildRMPLFrame(RMPLTestFrame)
PDDLTestFrame = Frame(Tabs(),width=930, height=980,borderwidth=1, background="azure2")
self.buildPDDLFrame(PDDLTestFrame)
Tabs.add_screen(TranslatorFrame, " Translator ")
Tabs.add_screen(RMPLTestFrame, " RMPL Test ")
Tabs.add_screen(PDDLTestFrame, " PDDL Test ")
print 'Done making GUI'
def buildPDDLFrame(self,PDDLTestFrame):
NameLabel = Label(PDDLTestFrame, text="Specify the goals of a planning problem using English sentences. \n "+
"The translator will generate the PDDL expressions and ask a planner to find solutions. \n"+
"This test focuses on the spatial relations of the goal states.", background="azure2")
NameLabel.place(x=10,y=10)
ProblemDesLabel = Label(PDDLTestFrame, text="Choose a sample scenario:", background="azure2")
ProblemDesLabel.place(x=10,y=70)
self.PDDLscenario = IntVar()
self.PDDLscenario.set(1)
Radiobutton(PDDLTestFrame, text="Block world", variable=self.PDDLscenario, value=1, background="azure2",command=self.changePDDLDomain).place(x=200,y=70)
Radiobutton(PDDLTestFrame, text="Lunar exploration", variable=self.PDDLscenario, value=2, background="azure2",command=self.changePDDLDomain).place(x=360,y=70)
ProblemDesFrame = Frame(PDDLTestFrame,width=580, height=170,borderwidth=2,relief=RIDGE)
ProblemDesFrame.place(x=10,y=90)
self.PDDLProblemDes = Label(ProblemDesFrame,text='', justify=LEFT)
self.PDDLProblemDes.place(x=0,y=0)
GiveInput = Label(PDDLTestFrame, text="Describe your goals in English:", background="azure2")
GiveInput.place(x=10,y=270)
InputPDDLGoalFrame = Frame(PDDLTestFrame, height=50, width=290)
InputPDDLGoalFrame.pack_propagate(0) # don't shrink
InputPDDLGoalFrame.place(x=10,y=290)
self.InputPDDLGoal = Text(InputPDDLGoalFrame)
self.InputPDDLGoal.pack(fill=BOTH, expand=1)
self.InputPDDLGoal.insert(1.0, '')
AddPDDLButtonFrame = Frame(PDDLTestFrame, height=40, width=120)
AddPDDLButtonFrame.pack_propagate(0) # don't shrink
AddPDDLButtonFrame.place(x=15,y=350)
AddPDDLButton = Button(AddPDDLButtonFrame, text="\nAdd to PDDL\n",command=self.AddPDDL)
AddPDDLButton.pack(fill=BOTH, expand=1)
RemoveLastPDDLButtonFrame = Frame(PDDLTestFrame, height=40, width=120)
RemoveLastPDDLButtonFrame.pack_propagate(0) # don't shrink
RemoveLastPDDLButtonFrame.place(x=170,y=350)
RemoveLastPDDLButton = Button(RemoveLastPDDLButtonFrame, text="\nRemove last input\n",command=self.RemovePDDL)
RemoveLastPDDLButton.pack(fill=BOTH, expand=1)
PlanPDDLButtonFrame = Frame(PDDLTestFrame, height=50, width=290)
PlanPDDLButtonFrame.pack_propagate(0) # don't shrink
PlanPDDLButtonFrame.place(x=10,y=400)
PlanPDDLButton = Button(PlanPDDLButtonFrame, text="\nPlan!\n",command=self.PlanPDDL)
PlanPDDLButton.pack(fill=BOTH, expand=1)
ReceivedPDDLLabel = Label(PDDLTestFrame, text="Received PDDL goals:", background="azure2")
ReceivedPDDLLabel.place(x=310,y=270)
ReceivedPDDLFrame = Frame(PDDLTestFrame, height=170, width=290)
ReceivedPDDLFrame.pack_propagate(0) # don't shrink
ReceivedPDDLFrame.place(x=310,y=290)
self.ReceivedPDDL = Text(ReceivedPDDLFrame)
self.ReceivedPDDL.pack(fill=BOTH, expand=1)
self.ReceivedPDDL.insert(1.0, '')
ReceivedPDDLActionsLabel = Label(PDDLTestFrame, text="Received Actions (will not be used in the planner):", background="azure2")
ReceivedPDDLActionsLabel.place(x=610,y=270)
ReceivedPDDLActionsFrame = Frame(PDDLTestFrame, height=170, width=290)
ReceivedPDDLActionsFrame.pack_propagate(0) # don't shrink
ReceivedPDDLActionsFrame.place(x=610,y=290)
self.ReceivedPDDLActions = Text(ReceivedPDDLActionsFrame)
self.ReceivedPDDLActions.pack(fill=BOTH, expand=1)
self.ReceivedPDDLActions.insert(1.0, '')
InitialPDDLConditionLabel = Label(PDDLTestFrame, text="Initial Condition:", background="azure2")
InitialPDDLConditionLabel.place(x=610,y=70)
InitialPDDLConditionFrame = Frame(PDDLTestFrame,width=290, height=170,borderwidth=2,relief=RIDGE, background="azure2")
InitialPDDLConditionFrame.place(x=610,y=90)
self.InitialConditionPDDL = Label(InitialPDDLConditionFrame,text='', justify=LEFT, background="azure2")
self.InitialConditionPDDL.place(x=0,y=0)
InitialPDDLActionLabel = Label(PDDLTestFrame, text="Available Actions:", background="azure2")
InitialPDDLActionLabel.place(x=10,y=480)
InitialPDDLActionFrame = Frame(PDDLTestFrame,width=350, height=400,borderwidth=2,relief=RIDGE, background="azure2")
InitialPDDLActionFrame.place(x=10,y=500)
self.DomainConditionPDDL = Label(InitialPDDLActionFrame,text='', justify=LEFT, background="azure2")
self.DomainConditionPDDL.place(x=0,y=0)
PDDLPlanLabel = Label(PDDLTestFrame, text="Generated Plan:", background="azure2")
PDDLPlanLabel.place(x=380,y=480)
PDDLPlanFrame = Frame(PDDLTestFrame,width=520, height=400,borderwidth=2,relief=RIDGE, background="azure2")
PDDLPlanFrame.place(x=380,y=500)
self.PDDLPlan = Label(PDDLPlanFrame,text='', justify=LEFT, background="azure2")
self.PDDLPlan.place(x=0,y=0)
self.SetPDDLBlockWorld()
self.ReceivedPDDLStrings = []
self.ReceivedPDDLActionStrings = []
def changePDDLDomain(self):
if self.PDDLscenario.get() == 1:
# Blocksworld domain
self.SetPDDLBlockWorld()
elif self.PDDLscenario.get() == 2:
# Lunar domain
self.SetPDDLLunarMission()
def buildRMPLFrame(self,RMPLTestFrame):
NameLabel = Label(RMPLTestFrame, text="Specify the goals of a planning problem using English sentences. \n "+
"The translator will generate the RMPL expressions and ask a planner to find solutions. \n"+
"This test focuses on the temporal relations of the goal states.", background="azure2")
NameLabel.place(x=10,y=10)
ProblemDesLabel = Label(RMPLTestFrame, text="Choose a sample scenario:", background="azure2")
ProblemDesLabel.place(x=10,y=70)
self.RMPLscenario = IntVar()
self.RMPLscenario.set(2)
# Radiobutton(RMPLTestFrame, text="Around the campus", variable=self.RMPLscenario, value=1, background="azure2").place(x=200,y=70)
Radiobutton(RMPLTestFrame, text="Weekend shopping", variable=self.RMPLscenario, value=2, background="azure2").place(x=360,y=70)
# Radiobutton(RMPLTestFrame, text="Christmas trip", variable=self.RMPLscenario, value=3, background="azure2").place(x=520,y=70)
# Radiobutton(RMPLTestFrame, text="Around the world", variable=self.RMPLscenario, value=4, background="azure2").place(x=680,y=70)
ProblemDesFrame = Frame(RMPLTestFrame,width=900, height=150,borderwidth=2,relief=RIDGE)
ProblemDesFrame.place(x=10,y=100)
self.RMPLProblemDes = Label(ProblemDesFrame,text='It is a sunny Sunday morning and you are about to start a day trip around Cambridge.\n'+
'You are going to pick up your clothes from a laundry store, have lunch in Chinatown and shop for grocery.\n'+
'In addition, you have a smart car that can understand your requests and will generate the best travel plan that satisfies all your goals.\n'+
'Please describe your requirements in the following text box, such as "go to the lunch place in 30 minutes" \n'+
'or "visit the laundry store before the grocery store".', justify=LEFT)
self.RMPLProblemDes.place(x=0,y=0)
GiveInput = Label(RMPLTestFrame, text="Describe your goals in English:", background="azure2")
GiveInput.place(x=10,y=270)
InputRMPLGoalFrame = Frame(RMPLTestFrame, height=35, width=400)
InputRMPLGoalFrame.pack_propagate(0) # don't shrink
InputRMPLGoalFrame.place(x=10,y=290)
self.InputRMPLGoal = Text(InputRMPLGoalFrame)
self.InputRMPLGoal.pack(fill=BOTH, expand=1)
self.InputRMPLGoal.insert(1.0, 'The man with a coat wants to eat and drink a cake and two wings.')
AddRMPLButtonFrame = Frame(RMPLTestFrame, height=50, width=160)
AddRMPLButtonFrame.pack_propagate(0) # don't shrink
AddRMPLButtonFrame.place(x=35,y=335)
AddRMPLButton = Button(AddRMPLButtonFrame, text="\nAdd to RMPL\n",command=self.AddRMPL)
AddRMPLButton.pack(fill=BOTH, expand=1)
RemoveLastRMPLButtonFrame = Frame(RMPLTestFrame, height=50, width=160)
RemoveLastRMPLButtonFrame.pack_propagate(0) # don't shrink
RemoveLastRMPLButtonFrame.place(x=220,y=335)
RemoveLastRMPLButton = Button(RemoveLastRMPLButtonFrame, text="\nRemove last input\n",command=self.RemoveRMPL)
RemoveLastRMPLButton.pack(fill=BOTH, expand=1)
PlanRMPLButtonFrame = Frame(RMPLTestFrame, height=50, width=380)
PlanRMPLButtonFrame.pack_propagate(0) # don't shrink
PlanRMPLButtonFrame.place(x=20,y=400)
PlanRMPLButton = Button(PlanRMPLButtonFrame, text="\nPlan!\n",command=self.PlanRMPL)
PlanRMPLButton.pack(fill=BOTH, expand=1)
ReceivedRMPL = Label(RMPLTestFrame, text="Received RMPL goals:", background="azure2")
ReceivedRMPL.place(x=430,y=270)
ReceivedRMPLFrame = Frame(RMPLTestFrame, height=170, width=470)
ReceivedRMPLFrame.pack_propagate(0) # don't shrink
ReceivedRMPLFrame.place(x=430,y=290)
self.ReceivedRMPL = Text(ReceivedRMPLFrame)
self.ReceivedRMPL.pack(fill=BOTH, expand=1)
self.ReceivedRMPL.insert(1.0, '')
InitialRMPLConditionLabel = Label(RMPLTestFrame, text="Initial Condition:", background="azure2")
InitialRMPLConditionLabel.place(x=10,y=480)
InitialConditionRMPLFrame = Frame(RMPLTestFrame,width=250, height=400,borderwidth=2,relief=RIDGE, background="azure2")
InitialConditionRMPLFrame.place(x=10,y=500)
self.InitialConditionRMPL = Label(InitialConditionRMPLFrame,text='(t = 0) At home', justify=LEFT, background="azure2")
self.InitialConditionRMPL.place(x=0,y=0)
RMPLPlanLabel = Label(RMPLTestFrame, text="Generated Plan:", background="azure2")
RMPLPlanLabel.place(x=300,y=480)
RMPLPlanFrame = Frame(RMPLTestFrame,width=600, height=400,borderwidth=2,relief=RIDGE, background="azure2")
RMPLPlanFrame.place(x=300,y=500)
self.RMPLPlan = Label(RMPLPlanFrame,text='', justify=LEFT, background="azure2")
self.RMPLPlan.place(x=0,y=0)
self.ReceivedRMPLStrings = []
def AddRMPL(self):
sent = self.InputRMPLGoal.get(1.0,END)
print sent
# show parse tree
tree = parseEnglish(sent,self.parser.get());
self.showTree(tree)
AgentSubtree = ExtractAgentSubtree(tree);
ActionSubtree = ExtractActionSubtree(tree);
print 'Agent: ',AgentSubtree
print 'Actions: ',ActionSubtree
agents = []
actions = []
# Given a NP subtree, get a list of agents inside
if AgentSubtree != None:
agents = getAgents(AgentSubtree)
# Given a VP subtree, get a list of actions inside
if ActionSubtree != None:
actions = getAction(ActionSubtree)
self.ClearOutputs()
self.ShowResult(agents,actions)
Result = self.GenerateRMPL(agents,actions)
self.ReceivedRMPLStrings.append(Result)
self.showReceivedRMPL()
def showReceivedRMPL(self):
StringRMPL = ''
StringMain = 'class Main {\n'
StringRun = 'method run () {\n'
StringRun = StringRun + 'parallel {\n'
for input in self.ReceivedRMPLStrings:
NewDefs = input[0]
NewActions = input[1]
NewClasses = input[2]
for NewDef in NewDefs:
StringMain = StringMain + NewDef
for NewAction in NewActions:
StringRun = StringRun + NewAction
for NewClass in NewClasses:
StringRMPL = StringRMPL + NewClass
StringRun = StringRun + '}\n}\n'
StringMain = StringMain + StringRun + '}\n'
StringRMPL = StringMain + StringRMPL
self.ReceivedRMPL.delete(1.0,END)
self.ReceivedRMPL.insert(END, StringRMPL+'\n')
def RemoveRMPL(self):
if len(self.ReceivedRMPLStrings) > 0:
self.ReceivedRMPLStrings.pop()
self.showReceivedRMPL()
def PlanRMPL(self):
print 'Sorry, the RMPL temporal planner is not included in this distribution.'
def SetPDDLBlockWorld(self):
self.InputPDDLGoal.delete(1.0,END)
self.ReceivedPDDL.delete(1.0,END)
self.PDDLProblemDes.config(text='This is a classic block world scenario.\n'+
'You have three fruits: apple, pear, and orange lying on the table at the moment.\n'+
'You have two available actions that can modify the states of these objects.\n'+
'Describe the desired end states using English sentences,\n'+
'such as "put apple on orange" (case sensitive).\n'+
'Uhura will try to parse the desired states from your input.\n'+
'It will also record the action in your input, but will not use it in the planning process.\n'+
'You can always go back to the Translator tab to check the current parse tree\nand see what is going on.', justify=LEFT)
self.InputPDDLGoal.insert(1.0, 'put apple on pear.')
self.PDDLInitial = '(apple OBJECT)\n(pear OBJECT)\n(orange OBJECT)\n(preconds (on-table apple)\n(on-table pear)\n(on-table orange)\n(clear apple)\n(clear pear)\n(clear orange)\n(arm-empty))'
self.PDDLDomain = '(operator\nSTACK\n(params (<ob> OBJECT) (<underob> OBJECT))\n(preconds\n(clear <underob>) (holding <ob>))\n(effects\n(del holding <ob>) (del clear <underob>)\n(arm-empty) (clear <ob>) (on <ob> <underob>)))\n\n'+'(operator\nPICK-UP\n(params (<ob1> OBJECT))\n(preconds\n(clear <ob1>) (on-table <ob1>) (arm-empty))\n(effects\n(del on-table <ob1>) (del clear <ob1>)\n(del arm-empty) (holding <ob1>)))\n'
self.InitialConditionPDDL.config(text=self.PDDLInitial, justify=LEFT, background="azure2")
self.DomainConditionPDDL.config(text=self.PDDLDomain, justify=LEFT, background="azure2")
self.ReceivedPDDLStrings = []
self.ReceivedPDDLActionStrings = []
def SetPDDLLunarMission(self):
self.InputPDDLGoal.delete(1.0,END)
self.ReceivedPDDL.delete(1.0,END)
self.PDDLProblemDes.config(text='This is a lunar mission scenario.\n'+
'You are a rover operator and was sent to the lunar surface with three lovely animals: dog, cat, and pig.\n'+
'They are currently placed in the canyon, but you can transport them to two other places: mountain and river.\n'+
'Describe the desired end states using English sentences,\n'+
'such as "put dog in mountain" (case sensitive).\n'+
'Uhura will try to parse the desired states from your input.\n'+
'It will also record the action in your input, but will not use it in the planning process.\n'+
'You can always go back to the Translator tab to check the current parse tree and see what is going on.', justify=LEFT)
self.InputPDDLGoal.insert(1.0, 'put dog in mountain.')
self.PDDLDomain = '(operator navigate\n(params (<placefrom> PLACE)\n(<placeto> PLACE))\n(preconds (at <placefrom>) (not-at <placeto>))\n(effects (at <placeto>) (del at <placefrom>)\n(not-at <placefrom>)))\n\n(operator pickup\n(params (<item> OBJECT) (<place> PLACE))\n(preconds (at <place>) (in <item> <place>))\n(effects (del in <item> <place>) (has <item>)))\n\n(operator dropoff\n(params (<item> OBJECT) (<place> PLACE))\n(precons (at <place>) (has <item>))\n(effects (del has <item>) (in <item> <place>)))\n'
self.PDDLInitial = '(dog OBJECT) (cat OBJECT) (pig OBJECT)\n(canyon PLACE) (mountain PLACE) (river PLACE)\n(preconds\n(at canyon)\n(not-at mountain)\n(not-at river)\n(in dog canyon)\n(in cat canyon)\n(in pig canyon))'
self.InitialConditionPDDL.config(text=self.PDDLInitial, justify=LEFT, background="azure2")
self.DomainConditionPDDL.config(text=self.PDDLDomain, justify=LEFT, background="azure2")
self.ReceivedPDDLStrings = []
self.ReceivedPDDLActionStrings = []
def AddPDDL(self):
sent = self.InputPDDLGoal.get(1.0,END)
print sent
# show parse tree
tree = parseEnglish(sent,self.parser.get());
self.showTree(tree)
AgentSubtree = ExtractAgentSubtree(tree);
ActionSubtree = ExtractActionSubtree(tree);
print 'Agent: ',AgentSubtree
print 'Actions: ',ActionSubtree
agents = []
actions = []
# Given a NP subtree, get a list of agents inside
if AgentSubtree != None:
agents = getAgents(AgentSubtree)
# Given a VP subtree, get a list of actions inside
if ActionSubtree != None:
actions = getAction(ActionSubtree)
self.ClearOutputs()
self.ShowResult(agents,actions)
Result = self.GeneratePDDL(agents,actions)
PDDLStateStrings = Result[0]
PDDLActionStrings = Result[1]
self.ReceivedPDDLStrings = self.ReceivedPDDLStrings + PDDLStateStrings
self.ReceivedPDDLActionStrings = self.ReceivedPDDLActionStrings + PDDLActionStrings
self.ReceivedPDDL.delete(1.0,END)
self.ReceivedPDDLActions.delete(1.0,END)
for string in self.ReceivedPDDLStrings:
self.ReceivedPDDL.insert(END, string+'\n')
for string in self.ReceivedPDDLActionStrings:
self.ReceivedPDDLActions.insert(END, string+'\n')
def RemovePDDL(self):
if len(self.ReceivedPDDLStrings) > 0:
self.ReceivedPDDLStrings.pop()
self.ReceivedPDDL.delete(1.0,END)
for string in self.ReceivedPDDLStrings:
self.ReceivedPDDL.insert(END, string+'\n')
def PlanPDDL(self):
PDDLs = self.ReceivedPDDL.get(1.0,END)
operators = self.PDDLDomain
facts = self.PDDLInitial
facts = facts + '\n(effects\n'
facts = facts + PDDLs + ')'
Actions = run_planner(operators,facts)
if Actions == None:
self.PDDLPlan.config(text='No feasible plan can be found.\nPlease modify your goal settings.')
else:
ActionString = ''
for action in Actions:
ActionString = ActionString + action +'\n'
self.PDDLPlan.config(text=ActionString)
def buildTranslatorFrame(self,TranslatorFrame):
NameLabel = Label(TranslatorFrame, text="Translate the input English sentence to RMPL/PDDL expression \n"+
" by extracting the desired state evolution from its parse tree.", background="azure2")
NameLabel.place(x=10,y=10)
GiveInput = Label(TranslatorFrame, text="Enter your command in English:", background="azure2")
GiveInput.place(x=10,y=50)
self.Input = Text(TranslatorFrame,width=50, height=2)
self.Input.place(x=10,y=70)
#self.Input.insert(1.0, 'The man with a coat wants to eat and drink a cake and two wings in ten minutes .')
self.Input.insert(1.0, 'The man with a coat wants to eat and drink a cake and two wings .')
ParserLabel = Label(TranslatorFrame, text="Select an English parser:", background="azure2")
ParserLabel.place(x=10,y=120)
self.parser = IntVar()
self.parser.set(1)
Radiobutton(TranslatorFrame, text="Stanford (Online)", variable=self.parser, value=1, background="azure2").place(x=10,y=140)
#Radiobutton(TranslatorFrame, text="Bikel-Collins", variable=self.parser, value=2, background="azure2").place(x=160,y=140)
Radiobutton(TranslatorFrame, text="Viterbi Parser+Customer Corpus", variable=self.parser, value=3, background="azure2").place(x=280,y=140)
OutputLabel = Label(TranslatorFrame, text="Select an output format:", background="azure2")
OutputLabel.place(x=10,y=175)
self.output = IntVar()
self.output.set(1)
Radiobutton(TranslatorFrame, text="RMPL", variable=self.output, value=1, background="azure2").place(x=200,y=175)
Radiobutton(TranslatorFrame, text="PDDL", variable=self.output, value=2, background="azure2").place(x=320,y=175)
TreeLabel = Label(TranslatorFrame, text="Parse Tree: ", background="azure2")
TreeLabel.place(x=10,y=210)
self.TreeFrame = Frame(TranslatorFrame,width=460, height=625,borderwidth=2, background="azure2",relief=RIDGE)
self.TreeFrame.place(x=10,y=230)
RMPLExpression = Label(TranslatorFrame, text="RMPL/PDDL Expression:", background="azure2")
RMPLExpression.place(x=510,y=10)
self.RMPL = Text(TranslatorFrame,width=50, height=10)
self.RMPL.place(x=510,y=30)
#CommandType = Label(TranslatorFrame, text="Type of Commands:", background="azure2")
#CommandType.place(x=510,y=120)
#self.Command = Text(TranslatorFrame,width=50, height=2)
#self.Command.place(x=510,y=140)
#self.Command.insert(1.0, 'Temporal goal / Spatial goal / A mix of both ')
ElementsMapping = Label(TranslatorFrame, text="Elements Mapping:", background="azure2")
ElementsMapping.place(x=510,y=200)
AgentLabel = Label(TranslatorFrame, text="Agent -> ", background="azure2")
AgentLabel.place(x=510,y=240)
self.Agent = Text(TranslatorFrame,width=36, height=1, background="azure2")
self.Agent.place(x=620,y=240)
AgentModifierLabel = Label(TranslatorFrame, text="Agent Modifier -> ", background="azure2")
AgentModifierLabel.place(x=510,y=280)
self.AgentModifier = Text(TranslatorFrame,width=36, height=1, background="azure2")
self.AgentModifier.place(x=620,y=280)
TargetLabel = Label(TranslatorFrame, text="Target -> ", background="azure2")
TargetLabel.place(x=510,y=320)
self.Target = Text(TranslatorFrame,width=36, height=1, background="azure2")
self.Target.place(x=620,y=320)
TargetModifierLabel = Label(TranslatorFrame, text="Target Modifier -> ", background="azure2")
TargetModifierLabel.place(x=510,y=360)
self.TargetModifier = Text(TranslatorFrame,width=36, height=1, background="azure2")
self.TargetModifier.place(x=620,y=360)
InitialStateLabel = Label(TranslatorFrame, text="Initial State -> ", background="azure2")
InitialStateLabel.place(x=510,y=400)
self.InitialState = Text(TranslatorFrame,width=36, height=1, background="azure2")
self.InitialState.place(x=620,y=400)
TargetStateLabel = Label(TranslatorFrame, text="Target State -> ", background="azure2")
TargetStateLabel.place(x=510,y=440)
self.TargetState = Text(TranslatorFrame,width=36, height=1, background="azure2")
self.TargetState.place(x=620,y=440)
ActionLabel = Label(TranslatorFrame, text="Action -> ", background="azure2")
ActionLabel.place(x=510,y=480)
self.Action = Text(TranslatorFrame,width=36, height=1, background="azure2")
self.Action.place(x=620,y=480)
ActionModifierLabel = Label(TranslatorFrame, text="Action Modifier -> ", background="azure2")
ActionModifierLabel.place(x=510,y=520)
self.ActionModifier = Text(TranslatorFrame,width=36, height=1, background="azure2")
self.ActionModifier.place(x=620,y=520)
MessageLabel = Label(TranslatorFrame, text="Execution message:", background="azure2")
MessageLabel.place(x=510,y=600)
self.MessageFrame = Frame(TranslatorFrame,width=200,height=100,borderwidth=1)
self.MessageFrame.place(x=510,y=620)
self.Message = Text(self.MessageFrame, wrap=WORD,width=47,height=12)
self.Message.pack(side=LEFT)
MessageScrollbar = Scrollbar(self.MessageFrame)
MessageScrollbar.pack(side=RIGHT, fill=Y)
self.Message.config(yscrollcommand=MessageScrollbar.set)
MessageScrollbar.config(command=self.Message.yview)
TranslateButtonFrame = Frame(TranslatorFrame, height=50, width=870)
TranslateButtonFrame.pack_propagate(0) # don't shrink
TranslateButtonFrame.place(x=35,y=1000-120)
#b = Button(f, text="Sure!")
#b.pack(fill=BOTH, expand=1)
TranslateButton = Button(TranslateButtonFrame, text="\nTranslate!\n",command=self.translate)
TranslateButton.pack(fill=BOTH, expand=1)
def showTree(self,tree):
canvas = getCanvas(self.TreeFrame,tree)
xscrollbar = Scrollbar(self.TreeFrame, orient=HORIZONTAL)
xscrollbar.grid(row=1,column=0, sticky=W+E)
xscrollbar.config(command=canvas.xview)
yscrollbar = Scrollbar(self.TreeFrame,orient=VERTICAL)
yscrollbar.grid(row=0,column=1, sticky=N+S)
yscrollbar.config(command=canvas.yview)
canvas.config(width=430,height=600)
canvas.config(xscrollcommand=xscrollbar.set, yscrollcommand=yscrollbar.set)
canvas.grid(row=0,column=0, sticky=N+S)
def translate(self):
sent = self.Input.get(1.0,END)
print sent
if self.output.get() == 1:
self.Message.insert(END,'Translating:\n '+sent+' to RMPL\n');
elif self.output.get() == 2:
self.Message.insert(END,'Translating:\n '+sent+' to PDDL\n');
# show parse tree
tree = parseEnglish(sent,self.parser.get());
self.showTree(tree)
# Extract agents and actions
# Agent first
AgentSubtree = ExtractAgentSubtree(tree);
ActionSubtree = ExtractActionSubtree(tree);
print 'Agent: ',AgentSubtree
print 'Actions: ',ActionSubtree
agents = []
actions = []
# Given a NP subtree, get a list of agents inside
if AgentSubtree != None:
agents = getAgents(AgentSubtree)
# Given a VP subtree, get a list of actions inside
if ActionSubtree != None:
actions = getAction(ActionSubtree)
#actions = actions+getAction(tree[0][1][1][0])
#actions = getAction(tree[0][0])
self.ClearOutputs()
self.ShowResult(agents,actions)
if self.output.get() == 1:
self.GenerateRMPL(agents,actions)
elif self.output.get() == 2:
self.GeneratePDDL(agents,actions)
def ShowResult(self,agents,actions):
for agent in agents:
if agent.getNounString() != '':
self.Agent.insert(END,agent.getNounString()+'; ');
if agent.getPPString() != '':
self.AgentModifier.insert(END,agent.getPPString()+'; ');
for action in actions:
self.Action.insert(END,action.getActionString()+'; ');
if action.getPPString() != '':
self.ActionModifier.insert(END,action.getPPString()+'; ');
self.Target.insert(END,'(');
self.TargetModifier.insert(END,'(');
for target in action.getNPs():
if target.getNounString() != '':
self.Target.insert(END,target.getNounString()+';');
if target.getPPString() != '':
self.TargetModifier.insert(END,target.getPPString()+';');
self.Target.insert(END,') ');
self.TargetModifier.insert(END,') ');
def GenerateRMPL(self,agents,actions):
# Assume single agent
Agent = ''
Time = ''
Action = ''
Target = ''
AgentModifier = ''
ActionModifier = ''
TargetModifier = ''
StringRMPL = ''
StringMain = 'class Main {\n'
StringRun = 'method run () {\n'
StringRun = StringRun + 'parallel {\n'
NewClass = []
NewAction = []
NewDefinition = []
#If there is at least one agent
if (len(agents) < 1):
#Build virtual agent
agent = NounGroup()
agent.addNP('VirtualAgent')
agents.append(agent)
for agent in agents:
Agent = agent.getNounString()
AgentModifier = agent.getPPString()
StringClass = ''
StringClass = 'class '+Agent.upper()+'{\n'
StringDef = Agent.upper() + ' ' + Agent + ';\n'
#StringMain = StringMain + Agent.upper() + ' ' + Agent + ';\n'
if AgentModifier != '':
StringClass = StringClass + 'value ' + AgentModifier + ';\n'
StringDef = StringDef + Agent+'=='+AgentModifier + ';' + '\n'
#StringMain = StringMain + Agent+'=='+AgentModifier + ';' + '\n'
StringMain = StringMain + StringDef
NewDefinition.append(StringDef)
for action in actions:
Action = action.getActionString()
ActionModifier = action.getPPString()
StringClass = StringClass + 'method ' + Action +'();\n'
for target in action.getNPs():
Target = Target + ',"' +target.getNounString()+'"'
TargetModifier = target.getPPString()
if TargetModifier != '':
Target = Target + '['+TargetModifier+']'
NewActionString = Agent + '.' + Action + '[' + ActionModifier + ']'+ '(' + Target[1:] + ')\n'
StringRun = StringRun + NewActionString
NewAction.append(NewActionString)
Action = ''
Target = ''
ActionModifier = ''
TargetModifier = ''
StringClass = StringClass + '}\n'
StringRMPL = StringRMPL + StringClass
NewClass.append(StringClass)
StringRun = StringRun + '}\n}\n'
StringMain = StringMain + StringRun + '}\n'
StringRMPL = StringMain + StringRMPL
self.RMPL.insert(END, StringRMPL)
return[NewDefinition,NewAction,NewClass]
def GeneratePDDL(self,agents,actions):
PDDLStateStrings = []
PDDLActionStrings = []
for action in actions:
#build a virtual target, if no target
if len(action.getNPs()) == 0:
VirtualTarget = NounGroup()
VirtualTarget.addNP(' ')
action.addNP(VirtualTarget)
for target in action.getNPs():
if len(action.getPPs()) > 0:
PP = action.getPPs()[0]
Prop = PP.getPropString()
#if there is a NP in the PP
if len(PP.getNPs()) > 0:
for PPNP in PP.getNPs():
PDDLActionString = '('
PDDLStateString = '('
PDDLActionString = PDDLActionString + action.getActionString()
PDDLActionString = PDDLActionString + '-' + Prop + ' ' + target.getNounString()
PDDLActionString = PDDLActionString + ' ' + PPNP.getNounString()
PDDLStateString = PDDLStateString + Prop + ' ' + target.getNounString()
PDDLStateString = PDDLStateString + ' ' + PPNP.getNounString()
if target.getPPString() != '':
PDDLActionString = PDDLActionString + '-' + target.getPPString()
PDDLActionString = PDDLActionString + ')'
PDDLStateString = PDDLStateString + ')'
PDDLActionStrings.append(PDDLActionString)
PDDLStateStrings.append(PDDLStateString)
self.RMPL.insert(END, PDDLActionString+'\n')
self.RMPL.insert(END, PDDLStateString+'\n')
else:
# If there is a PP but no NP inside
PDDLActionString = '('
PDDLActionString = PDDLActionString + action.getActionString()
PDDLActionString = PDDLActionString + '-' + Prop + ' ' + target.getNounString()
if target.getPPString() != '':
PDDLActionString = PDDLActionString + '-' + target.getPPString()
PDDLActionString = PDDLActionString + ')'
PDDLActionStrings.append(PDDLActionString)
self.RMPL.insert(END, PDDLActionString+'\n')
else:
PDDLActionString = '('
PDDLActionString = PDDLActionString + action.getActionString()
PDDLActionString = PDDLActionString + ' ' + target.getNounString()
if target.getPPString() != '':
PDDLActionString = PDDLActionString + '-' + target.getPPString()
PDDLActionString = PDDLActionString + ')'
PDDLActionStrings.append(PDDLActionString)
self.RMPL.insert(END, PDDLActionString+'\n')
return [PDDLStateStrings,PDDLActionStrings]
def ClearOutputs(self):
self.RMPL.delete(1.0,END);
self.Agent.delete(1.0,END);
self.AgentModifier.delete(1.0,END);
self.Target.delete(1.0,END);
self.TargetModifier.delete(1.0,END);
self.InitialState.delete(1.0,END);
self.TargetState.delete(1.0,END);
self.Action.delete(1.0,END);
self.ActionModifier.delete(1.0,END);
def main():
root = Tk()
Uhura = UhuraGUI(root)
root.mainloop()
if __name__ == '__main__':
main() | mit |
afbarnard/fitamord | fitamord/env.py | 1 | 4176 | """Environments of hierarchical bindings"""
# Copyright (c) 2016 Aubrey Barnard. This is free software released
# under the MIT License. See `LICENSE.txt` for details.
from . import general
from . import parse
# TODO parse YAML with provenance
def flatten_dicts(dicts): # TODO
return ()
class Environment: # TODO relation to chained dicts in collections?
def __init__(self, name_frame_pairs=None):
self._names = []
self._frames = []
self._cached_fnd = False
self._cached_key = None
self._cached_val = None
self._cached_idx = None
if name_frame_pairs is not None:
self.extend(name_frame_pairs)
def _lookup_key_in(self, key, frame):
# Allow frame to be any object if the key is None
if not isinstance(frame, dict):
if key is None:
return True, frame
else:
return False, None
# Frame is a dict, look up the key in it. If the key is a
# tuple, treat it as a hierarchical path. Otherwise treat it as
# is.
if isinstance(key, tuple) and length(key) >= 1:
key_head = key[0]
key_tail = key[1:]
if key_head in frame:
subframe = frame[key_head]
if key_tail:
return self._lookup_key_in(key_tail, subframe)
else:
return True, subframe
else:
return False, None
elif key in frame:
return True, frame[key]
else:
return False, None
def _lookup(self, key):
# Satisfy the lookup from the cache if possible
if self._cached_fnd and key == self._cached_key:
return True, self._cached_val, self._cached_idx
# Do the lookup
# Parse string keys into hierarchical names
parsed_key = (tuple(key.split('.'))
if isinstance(key, str)
else key)
# Search through the stack of frames for the first occurrence of
# key
for frame_idx in range(length(frames) - 1, -1, -1):
found, value = self._lookup_key_in(
parsed_key, self._frames[frame_idx])
if found:
# Cache this lookup
self._cached_fnd = True
self._cached_key = key
self._cached_val = value
self._cached_idx = frame_idx
return True, value, frame_idx
# Key not found
return False, None, None
def push(self, dictlike, frame_name=None):
# Default the name of the frame if necessary
if frame_name is None:
frame_name = general.object_name(dictlike)
self._frames.append(dictlike)
self._names.append(frame_name)
def provenance(self, key):
found, value, frame_idx = self._lookup(key)
return (parse.TextLocation(self._names[frame_idx])
if found else None)
def get(self, key, default=None):
found, value, frame_idx = self._lookup(key)
return value if found else default
def __contains__(self, key):
found, value, frame_idx = self._lookup(key)
return found
def __getindex__(self, key):
found, value, where = self._lookup(key)
if found:
return value
else:
raise KeyError(key)
def keys(self): # TODO
return ()
def values(self): # TODO
return ()
def items(self): # TODO
return ()
def items_with_provenance(self): # TODO
return ()
def dump(self, out): # TODO
pass
def names_frames(self):
for idx in range(length(self._names)):
yield self._names[idx], self._frames[idx]
def extend(self, name_frame_pairs):
for name, frame in name_frame_pairs:
self.push(frame, name)
def from_yaml_files(files): # TODO
env = Environment()
for file in files:
# If the file exists, load it as YAML and add it to the environment
pass
return env
def from_cli_args(args): # TODO
return None
| mit |
benchisell/photostream-bc | flask/lib/python2.7/site-packages/flup/server/ajp_fork.py | 19 | 7429 | # Copyright (c) 2005, 2006 Allan Saddi <allan@saddi.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id$
"""
ajp - an AJP 1.3/WSGI gateway.
For more information about AJP and AJP connectors for your web server, see
<http://jakarta.apache.org/tomcat/connectors-doc/>.
For more information about the Web Server Gateway Interface, see
<http://www.python.org/peps/pep-0333.html>.
Example usage:
#!/usr/bin/env python
import sys
from myapplication import app # Assume app is your WSGI application object
from ajp import WSGIServer
ret = WSGIServer(app).run()
sys.exit(ret and 42 or 0)
See the documentation for WSGIServer for more information.
About the bit of logic at the end:
Upon receiving SIGHUP, the python script will exit with status code 42. This
can be used by a wrapper script to determine if the python script should be
re-run. When a SIGINT or SIGTERM is received, the script exits with status
code 0, possibly indicating a normal exit.
Example wrapper script:
#!/bin/sh
STATUS=42
while test $STATUS -eq 42; do
python "$@" that_script_above.py
STATUS=$?
done
Example workers.properties (for mod_jk):
worker.list=foo
worker.foo.port=8009
worker.foo.host=localhost
worker.foo.type=ajp13
Example httpd.conf (for mod_jk):
JkWorkersFile /path/to/workers.properties
JkMount /* foo
Note that if you mount your ajp application anywhere but the root ("/"), you
SHOULD specifiy scriptName to the WSGIServer constructor. This will ensure
that SCRIPT_NAME/PATH_INFO are correctly deduced.
"""
__author__ = 'Allan Saddi <allan@saddi.com>'
__version__ = '$Revision$'
import socket
import logging
from flup.server.ajp_base import BaseAJPServer, Connection
from flup.server.preforkserver import PreforkServer
__all__ = ['WSGIServer']
class WSGIServer(BaseAJPServer, PreforkServer):
"""
AJP1.3/WSGI server. Runs your WSGI application as a persistant program
that understands AJP1.3. Opens up a TCP socket, binds it, and then
waits for forwarded requests from your webserver.
Why AJP? Two good reasons are that AJP provides load-balancing and
fail-over support. Personally, I just wanted something new to
implement. :)
Of course you will need an AJP1.3 connector for your webserver (e.g.
mod_jk) - see <http://jakarta.apache.org/tomcat/connectors-doc/>.
"""
def __init__(self, application, scriptName='', environ=None,
bindAddress=('localhost', 8009), allowedServers=None,
loggingLevel=logging.INFO, debug=True, **kw):
"""
scriptName is the initial portion of the URL path that "belongs"
to your application. It is used to determine PATH_INFO (which doesn't
seem to be passed in). An empty scriptName means your application
is mounted at the root of your virtual host.
environ, which must be a dictionary, can contain any additional
environment variables you want to pass to your application.
bindAddress is the address to bind to, which must be a tuple of
length 2. The first element is a string, which is the host name
or IPv4 address of a local interface. The 2nd element is the port
number.
allowedServers must be None or a list of strings representing the
IPv4 addresses of servers allowed to connect. None means accept
connections from anywhere.
loggingLevel sets the logging level of the module-level logger.
"""
BaseAJPServer.__init__(self, application,
scriptName=scriptName,
environ=environ,
multithreaded=False,
multiprocess=True,
bindAddress=bindAddress,
allowedServers=allowedServers,
loggingLevel=loggingLevel,
debug=debug)
for key in ('multithreaded', 'multiprocess', 'jobClass', 'jobArgs'):
if kw.has_key(key):
del kw[key]
PreforkServer.__init__(self, jobClass=Connection, jobArgs=(self,), **kw)
def run(self):
"""
Main loop. Call this after instantiating WSGIServer. SIGHUP, SIGINT,
SIGQUIT, SIGTERM cause it to cleanup and return. (If a SIGHUP
is caught, this method returns True. Returns False otherwise.)
"""
self.logger.info('%s starting up', self.__class__.__name__)
try:
sock = self._setupSocket()
except socket.error, e:
self.logger.error('Failed to bind socket (%s), exiting', e[1])
return False
ret = PreforkServer.run(self, sock)
self._cleanupSocket(sock)
self.logger.info('%s shutting down%s', self.__class__.__name__,
self._hupReceived and ' (reload requested)' or '')
return ret
if __name__ == '__main__':
def test_app(environ, start_response):
"""Probably not the most efficient example."""
import cgi
start_response('200 OK', [('Content-Type', 'text/html')])
yield '<html><head><title>Hello World!</title></head>\n' \
'<body>\n' \
'<p>Hello World!</p>\n' \
'<table border="1">'
names = environ.keys()
names.sort()
for name in names:
yield '<tr><td>%s</td><td>%s</td></tr>\n' % (
name, cgi.escape(`environ[name]`))
form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ,
keep_blank_values=1)
if form.list:
yield '<tr><th colspan="2">Form data</th></tr>'
for field in form.list:
yield '<tr><td>%s</td><td>%s</td></tr>\n' % (
field.name, field.value)
yield '</table>\n' \
'</body></html>\n'
from wsgiref import validate
test_app = validate.validator(test_app)
# Explicitly set bindAddress to *:8009 for testing.
WSGIServer(test_app,
bindAddress=('', 8009), allowedServers=None,
loggingLevel=logging.DEBUG).run()
| bsd-3-clause |
surgebiswas/poker | PokerBots_2017/Johnny/external/protobuf/python/google/protobuf/reflection.py | 50 | 4422 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This code is meant to work on Python 2.4 and above only.
"""Contains a metaclass and helper functions used to create
protocol message classes from Descriptor objects at runtime.
Recall that a metaclass is the "type" of a class.
(A class is to a metaclass what an instance is to a class.)
In this case, we use the GeneratedProtocolMessageType metaclass
to inject all the useful functionality into the classes
output by the protocol compiler at compile-time.
The upshot of all this is that the real implementation
details for ALL pure-Python protocol buffers are *here in
this file*.
"""
__author__ = 'robinson@google.com (Will Robinson)'
from google.protobuf.internal import api_implementation
from google.protobuf import message
if api_implementation.Type() == 'cpp':
from google.protobuf.pyext import cpp_message as message_impl
else:
from google.protobuf.internal import python_message as message_impl
# The type of all Message classes.
# Part of the public interface, but normally only used by message factories.
GeneratedProtocolMessageType = message_impl.GeneratedProtocolMessageType
def ParseMessage(descriptor, byte_str):
"""Generate a new Message instance from this Descriptor and a byte string.
Args:
descriptor: Protobuf Descriptor object
byte_str: Serialized protocol buffer byte string
Returns:
Newly created protobuf Message object.
"""
result_class = MakeClass(descriptor)
new_msg = result_class()
new_msg.ParseFromString(byte_str)
return new_msg
def MakeClass(descriptor):
"""Construct a class object for a protobuf described by descriptor.
Composite descriptors are handled by defining the new class as a member of the
parent class, recursing as deep as necessary.
This is the dynamic equivalent to:
class Parent(message.Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor
class Child(message.Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor.nested_types[0]
Sample usage:
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(proto2_string)
msg_descriptor = descriptor.MakeDescriptor(file_descriptor.message_type[0])
msg_class = reflection.MakeClass(msg_descriptor)
msg = msg_class()
Args:
descriptor: A descriptor.Descriptor object describing the protobuf.
Returns:
The Message class object described by the descriptor.
"""
attributes = {}
for name, nested_type in descriptor.nested_types_by_name.items():
attributes[name] = MakeClass(nested_type)
attributes[GeneratedProtocolMessageType._DESCRIPTOR_KEY] = descriptor
return GeneratedProtocolMessageType(str(descriptor.name), (message.Message,),
attributes)
| mit |
m8ttyB/socorro | alembic/versions/335c2bfd99a6_bug_1255444_fix_fennecandroid_for_new_.py | 3 | 3200 | """bug 1255444 fix fennecandroid for new release repository
Revision ID: 335c2bfd99a6
Revises: 9371b45451b
Create Date: 2016-03-10 13:24:35.662063
"""
# revision identifiers, used by Alembic.
revision = '335c2bfd99a6'
down_revision = '9371b45451b'
from alembic import op
from socorro.lib import citexttype, jsontype, buildtype
from socorro.lib.migrations import fix_permissions, load_stored_proc
import sqlalchemy as sa
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column
def upgrade():
op.execute("""
INSERT INTO release_repositories VALUES ('mozilla-central-android-api-15');
INSERT INTO release_repositories VALUES ('mozilla-aurora-android-api-15');
INSERT INTO release_repositories VALUES ('mozilla-beta-android-api-15');
INSERT INTO release_repositories VALUES ('mozilla-release-android-api-15');
INSERT INTO special_product_platforms (platform, repository, release_channel, release_name, product_name, min_version) VALUES ('android-arm', 'mozilla-central-android-api-15', 'nightly', 'mobile', 'FennecAndroid', '37.0');
INSERT INTO special_product_platforms (platform, repository, release_channel, release_name, product_name, min_version) VALUES ('android-arm', 'mozilla-aurora-android-api-15', 'aurora', 'mobile', 'FennecAndroid', '37.0');
INSERT INTO special_product_platforms (platform, repository, release_channel, release_name, product_name, min_version) VALUES ('android-arm', 'mozilla-beta-android-api-15', 'beta', 'mobile', 'FennecAndroid', '37.0');
INSERT INTO special_product_platforms (platform, repository, release_channel, release_name, product_name, min_version) VALUES ('android-arm', 'mozilla-release-android-api-15', 'release', 'mobile', 'FennecAndroid', '37.0');
""")
def downgrade():
op.execute("""
DELETE FROM release_repositories WHERE repository = 'mozilla-central-android-api-15';
DELETE FROM release_repositories WHERE repository = 'mozilla-aurora-android-api-15';
DELETE FROM release_repositories WHERE repository = 'mozilla-beta-android-api-15';
DELETE FROM release_repositories WHERE repository = 'mozilla-release-android-api-15';
DELETE FROM special_product_platforms WHERE platform = 'android-arm' AND repository = 'mozilla-central-android-api-15' AND release_channel = 'nightly' AND release_name = 'mobile' AND product_name = 'FennecAndroid' AND min_version = '37.0';
DELETE FROM special_product_platforms WHERE platform = 'android-arm' AND repository = 'mozilla-aurora-android-api-15' AND release_channel = 'aurora' AND release_name = 'mobile' AND product_name = 'FennecAndroid' AND min_version = '37.0';
DELETE FROM special_product_platforms WHERE platform = 'android-arm' AND repository = 'mozilla-beta-android-api-15' AND release_channel = 'beta' AND release_name = 'mobile' AND product_name = 'FennecAndroid' AND min_version = '37.0';
DELETE FROM special_product_platforms WHERE platform = 'android-arm' AND repository = 'mozilla-release-android-api-15' AND release_channel = 'release' AND release_name = 'mobile' AND product_name = 'FennecAndroid' AND min_version = '37.0';
""")
| mpl-2.0 |
lucasrangit/twitter-winner | twitter-winner/oauthlib/oauth2/rfc6749/grant_types/implicit.py | 13 | 15811 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import unicode_literals, absolute_import
from oauthlib import common
from oauthlib.common import log
from oauthlib.uri_validate import is_absolute_uri
from .base import GrantTypeBase
from .. import errors
from ..request_validator import RequestValidator
class ImplicitGrant(GrantTypeBase):
"""`Implicit Grant`_
The implicit grant type is used to obtain access tokens (it does not
support the issuance of refresh tokens) and is optimized for public
clients known to operate a particular redirection URI. These clients
are typically implemented in a browser using a scripting language
such as JavaScript.
Unlike the authorization code grant type, in which the client makes
separate requests for authorization and for an access token, the
client receives the access token as the result of the authorization
request.
The implicit grant type does not include client authentication, and
relies on the presence of the resource owner and the registration of
the redirection URI. Because the access token is encoded into the
redirection URI, it may be exposed to the resource owner and other
applications residing on the same device::
+----------+
| Resource |
| Owner |
| |
+----------+
^
|
(B)
+----|-----+ Client Identifier +---------------+
| -+----(A)-- & Redirection URI --->| |
| User- | | Authorization |
| Agent -|----(B)-- User authenticates -->| Server |
| | | |
| |<---(C)--- Redirection URI ----<| |
| | with Access Token +---------------+
| | in Fragment
| | +---------------+
| |----(D)--- Redirection URI ---->| Web-Hosted |
| | without Fragment | Client |
| | | Resource |
| (F) |<---(E)------- Script ---------<| |
| | +---------------+
+-|--------+
| |
(A) (G) Access Token
| |
^ v
+---------+
| |
| Client |
| |
+---------+
Note: The lines illustrating steps (A) and (B) are broken into two
parts as they pass through the user-agent.
Figure 4: Implicit Grant Flow
The flow illustrated in Figure 4 includes the following steps:
(A) The client initiates the flow by directing the resource owner's
user-agent to the authorization endpoint. The client includes
its client identifier, requested scope, local state, and a
redirection URI to which the authorization server will send the
user-agent back once access is granted (or denied).
(B) The authorization server authenticates the resource owner (via
the user-agent) and establishes whether the resource owner
grants or denies the client's access request.
(C) Assuming the resource owner grants access, the authorization
server redirects the user-agent back to the client using the
redirection URI provided earlier. The redirection URI includes
the access token in the URI fragment.
(D) The user-agent follows the redirection instructions by making a
request to the web-hosted client resource (which does not
include the fragment per [RFC2616]). The user-agent retains the
fragment information locally.
(E) The web-hosted client resource returns a web page (typically an
HTML document with an embedded script) capable of accessing the
full redirection URI including the fragment retained by the
user-agent, and extracting the access token (and other
parameters) contained in the fragment.
(F) The user-agent executes the script provided by the web-hosted
client resource locally, which extracts the access token.
(G) The user-agent passes the access token to the client.
See `Section 10.3`_ and `Section 10.16`_ for important security considerations
when using the implicit grant.
.. _`Implicit Grant`: http://tools.ietf.org/html/rfc6749#section-4.2
.. _`Section 10.3`: http://tools.ietf.org/html/rfc6749#section-10.3
.. _`Section 10.16`: http://tools.ietf.org/html/rfc6749#section-10.16
"""
def __init__(self, request_validator=None):
self.request_validator = request_validator or RequestValidator()
def create_authorization_response(self, request, token_handler):
"""Create an authorization response.
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format, per `Appendix B`_:
response_type
REQUIRED. Value MUST be set to "token".
client_id
REQUIRED. The client identifier as described in `Section 2.2`_.
redirect_uri
OPTIONAL. As described in `Section 3.1.2`_.
scope
OPTIONAL. The scope of the access request as described by
`Section 3.3`_.
state
RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in `Section 10.12`_.
The authorization server validates the request to ensure that all
required parameters are present and valid. The authorization server
MUST verify that the redirection URI to which it will redirect the
access token matches a redirection URI registered by the client as
described in `Section 3.1.2`_.
.. _`Section 2.2`: http://tools.ietf.org/html/rfc6749#section-2.2
.. _`Section 3.1.2`: http://tools.ietf.org/html/rfc6749#section-3.1.2
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 10.12`: http://tools.ietf.org/html/rfc6749#section-10.12
.. _`Appendix B`: http://tools.ietf.org/html/rfc6749#appendix-B
"""
return self.create_token_response(request, token_handler)
def create_token_response(self, request, token_handler):
"""Return token or error embedded in the URI fragment.
If the resource owner grants the access request, the authorization
server issues an access token and delivers it to the client by adding
the following parameters to the fragment component of the redirection
URI using the "application/x-www-form-urlencoded" format, per
`Appendix B`_:
access_token
REQUIRED. The access token issued by the authorization server.
token_type
REQUIRED. The type of the token issued as described in
`Section 7.1`_. Value is case insensitive.
expires_in
RECOMMENDED. The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
scope
OPTIONAL, if identical to the scope requested by the client;
otherwise, REQUIRED. The scope of the access token as
described by `Section 3.3`_.
state
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
The authorization server MUST NOT issue a refresh token.
.. _`Appendix B`: http://tools.ietf.org/html/rfc6749#appendix-B
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 7.1`: http://tools.ietf.org/html/rfc6749#section-7.1
"""
try:
# request.scopes is only mandated in post auth and both pre and
# post auth use validate_authorization_request
if not request.scopes:
raise ValueError('Scopes must be set on post auth.')
self.validate_token_request(request)
# If the request fails due to a missing, invalid, or mismatching
# redirection URI, or if the client identifier is missing or invalid,
# the authorization server SHOULD inform the resource owner of the
# error and MUST NOT automatically redirect the user-agent to the
# invalid redirection URI.
except errors.FatalClientError as e:
log.debug('Fatal client error during validation of %r. %r.',
request, e)
raise
# If the resource owner denies the access request or if the request
# fails for reasons other than a missing or invalid redirection URI,
# the authorization server informs the client by adding the following
# parameters to the fragment component of the redirection URI using the
# "application/x-www-form-urlencoded" format, per Appendix B:
# http://tools.ietf.org/html/rfc6749#appendix-B
except errors.OAuth2Error as e:
log.debug('Client error during validation of %r. %r.', request, e)
return {'Location': common.add_params_to_uri(request.redirect_uri, e.twotuples,
fragment=True)}, None, 302
token = token_handler.create_token(request, refresh_token=False)
return {'Location': common.add_params_to_uri(request.redirect_uri, token.items(),
fragment=True)}, None, 302
def validate_authorization_request(self, request):
return self.validate_token_request(request)
def validate_token_request(self, request):
"""Check the token request for normal and fatal errors.
This method is very similar to validate_authorization_request in
the AuthorizationCodeGrant but differ in a few subtle areas.
A normal error could be a missing response_type parameter or the client
attempting to access scope it is not allowed to ask authorization for.
Normal errors can safely be included in the redirection URI and
sent back to the client.
Fatal errors occur when the client_id or redirect_uri is invalid or
missing. These must be caught by the provider and handled, how this
is done is outside of the scope of OAuthLib but showing an error
page describing the issue is a good idea.
"""
# First check for fatal errors
# If the request fails due to a missing, invalid, or mismatching
# redirection URI, or if the client identifier is missing or invalid,
# the authorization server SHOULD inform the resource owner of the
# error and MUST NOT automatically redirect the user-agent to the
# invalid redirection URI.
# REQUIRED. The client identifier as described in Section 2.2.
# http://tools.ietf.org/html/rfc6749#section-2.2
if not request.client_id:
raise errors.MissingClientIdError(state=request.state, request=request)
if not self.request_validator.validate_client_id(request.client_id, request):
raise errors.InvalidClientIdError(state=request.state, request=request)
# OPTIONAL. As described in Section 3.1.2.
# http://tools.ietf.org/html/rfc6749#section-3.1.2
if request.redirect_uri is not None:
request.using_default_redirect_uri = False
log.debug('Using provided redirect_uri %s', request.redirect_uri)
if not is_absolute_uri(request.redirect_uri):
raise errors.InvalidRedirectURIError(state=request.state, request=request)
# The authorization server MUST verify that the redirection URI
# to which it will redirect the access token matches a
# redirection URI registered by the client as described in
# Section 3.1.2.
# http://tools.ietf.org/html/rfc6749#section-3.1.2
if not self.request_validator.validate_redirect_uri(
request.client_id, request.redirect_uri, request):
raise errors.MismatchingRedirectURIError(state=request.state, request=request)
else:
request.redirect_uri = self.request_validator.get_default_redirect_uri(
request.client_id, request)
request.using_default_redirect_uri = True
log.debug('Using default redirect_uri %s.', request.redirect_uri)
if not request.redirect_uri:
raise errors.MissingRedirectURIError(state=request.state, request=request)
if not is_absolute_uri(request.redirect_uri):
raise errors.InvalidRedirectURIError(state=request.state, request=request)
# Then check for normal errors.
# If the resource owner denies the access request or if the request
# fails for reasons other than a missing or invalid redirection URI,
# the authorization server informs the client by adding the following
# parameters to the fragment component of the redirection URI using the
# "application/x-www-form-urlencoded" format, per Appendix B.
# http://tools.ietf.org/html/rfc6749#appendix-B
# Note that the correct parameters to be added are automatically
# populated through the use of specific exceptions.
if request.response_type is None:
raise errors.InvalidRequestError(state=request.state,
description='Missing response_type parameter.',
request=request)
for param in ('client_id', 'response_type', 'redirect_uri', 'scope', 'state'):
if param in request.duplicate_params:
raise errors.InvalidRequestError(state=request.state,
description='Duplicate %s parameter.' % param, request=request)
# REQUIRED. Value MUST be set to "token".
if request.response_type != 'token':
raise errors.UnsupportedResponseTypeError(state=request.state, request=request)
log.debug('Validating use of response_type token for client %r (%r).',
request.client_id, request.client)
if not self.request_validator.validate_response_type(request.client_id,
request.response_type, request.client, request):
log.debug('Client %s is not authorized to use response_type %s.',
request.client_id, request.response_type)
raise errors.UnauthorizedClientError(request=request)
# OPTIONAL. The scope of the access request as described by Section 3.3
# http://tools.ietf.org/html/rfc6749#section-3.3
self.validate_scopes(request)
return request.scopes, {
'client_id': request.client_id,
'redirect_uri': request.redirect_uri,
'response_type': request.response_type,
'state': request.state,
'request': request,
}
| mit |
electrum/presto | docs/src/main/sphinx/ext/download.py | 3 | 2129 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# noinspection PyUnresolvedReferences
from docutils import nodes, utils
# noinspection PyUnresolvedReferences
from sphinx.errors import SphinxError
GROUP_ID = 'io.trino'
ARTIFACTS = {
'server': ('trino-server', 'tar.gz', None),
'cli': ('trino-cli', 'jar', 'executable'),
'jdbc': ('trino-jdbc', 'jar', None),
}
def maven_filename(artifact, version, packaging, classifier):
classifier = '-' + classifier if classifier else ''
return '%s-%s%s.%s' % (artifact, version, classifier, packaging)
def maven_download(group, artifact, version, packaging, classifier):
base = 'https://repo1.maven.org/maven2/'
group_path = group.replace('.', '/')
filename = maven_filename(artifact, version, packaging, classifier)
return base + '/'.join((group_path, artifact, version, filename))
def setup(app):
# noinspection PyDefaultArgument,PyUnusedLocal
def download_link_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
version = app.config.release
if not text in ARTIFACTS:
inliner.reporter.error('Unsupported download type: ' + text, line=lineno)
return [], []
artifact, packaging, classifier = ARTIFACTS[text]
title = maven_filename(artifact, version, packaging, classifier)
uri = maven_download(GROUP_ID, artifact, version, packaging, classifier)
node = nodes.reference(title, title, internal=False, refuri=uri)
return [node], []
app.add_role('maven_download', download_link_role)
return {
'parallel_read_safe': True,
}
| apache-2.0 |
meredith-digops/ansible | lib/ansible/modules/cloud/amazon/lightsail.py | 68 | 16237 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: lightsail
short_description: Create or delete a virtual machine instance in AWS Lightsail
description:
- Creates or instances in AWS Lightsail and optionally wait for it to be 'running'.
version_added: "2.4"
author: "Nick Ball (@nickball)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent', 'running', 'restarted', 'stopped']
name:
description:
- Name of the instance
required: true
default : null
zone:
description:
- AWS availability zone in which to launch the instance. Required when state='present'
required: false
default: null
blueprint_id:
description:
- ID of the instance blueprint image. Required when state='present'
required: false
default: null
bundle_id:
description:
- Bundle of specification info for the instance. Required when state='present'
required: false
default: null
user_data:
description:
- Launch script that can configure the instance with additional data
required: false
default: null
key_pair_name:
description:
- Name of the key pair to use with the instance
required: false
default: null
wait:
description:
- Wait for the instance to be in state 'running' before returning. If wait is "no" an ip_address may not be returned
default: "yes"
choices: [ "yes", "no" ]
wait_timeout:
description:
- How long before wait gives up, in seconds.
default: 300
requirements:
- "python >= 2.6"
- boto3
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Create a new Lightsail instance, register the instance details
- lightsail:
state: present
name: myinstance
region: us-east-1
zone: us-east-1a
blueprint_id: ubuntu_16_04
bundle_id: nano_1_0
key_pair_name: id_rsa
user_data: " echo 'hello world' > /home/ubuntu/test.txt"
wait_timeout: 500
register: my_instance
- debug:
msg: "Name is {{ my_instance.instance.name }}"
- debug:
msg: "IP is {{ my_instance.instance.publicIpAddress }}"
# Delete an instance if present
- lightsail:
state: absent
region: us-east-1
name: myinstance
'''
RETURN = '''
changed:
description: if a snapshot has been modified/created
returned: always
type: bool
sample:
changed: true
instance:
description: instance data
returned: always
type: dict
sample:
arn: "arn:aws:lightsail:us-east-1:448830907657:Instance/1fef0175-d6c8-480e-84fa-214f969cda87"
blueprint_id: "ubuntu_16_04"
blueprint_name: "Ubuntu"
bundle_id: "nano_1_0"
created_at: "2017-03-27T08:38:59.714000-04:00"
hardware:
cpu_count: 1
ram_size_in_gb: 0.5
is_static_ip: false
location:
availability_zone: "us-east-1a"
region_name: "us-east-1"
name: "my_instance"
networking:
monthly_transfer:
gb_per_month_allocated: 1024
ports:
- access_direction: "inbound"
access_from: "Anywhere (0.0.0.0/0)"
access_type: "public"
common_name: ""
from_port: 80
protocol: tcp
to_port: 80
- access_direction: "inbound"
access_from: "Anywhere (0.0.0.0/0)"
access_type: "public"
common_name: ""
from_port: 22
protocol: tcp
to_port: 22
private_ip_address: "172.26.8.14"
public_ip_address: "34.207.152.202"
resource_type: "Instance"
ssh_key_name: "keypair"
state:
code: 16
name: running
support_code: "588307843083/i-0997c97831ee21e33"
username: "ubuntu"
'''
import os
import time
import traceback
try:
import botocore
HAS_BOTOCORE = True
except ImportError:
HAS_BOTOCORE = False
try:
import boto3
except ImportError:
# will be caught by imported HAS_BOTO3
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn, HAS_BOTO3, camel_dict_to_snake_dict
def create_instance(module, client, instance_name):
"""
Create an instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to delete
Returns a dictionary of instance information
about the new instance.
"""
changed = False
# Check if instance already exists
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
zone = module.params.get('zone')
blueprint_id = module.params.get('blueprint_id')
bundle_id = module.params.get('bundle_id')
key_pair_name = module.params.get('key_pair_name')
user_data = module.params.get('user_data')
user_data = '' if user_data is None else user_data
resp = None
if inst is None:
try:
resp = client.create_instances(
instanceNames=[
instance_name
],
availabilityZone=zone,
blueprintId=blueprint_id,
bundleId=bundle_id,
userData=user_data,
keyPairName=key_pair_name,
)
resp = resp['operations'][0]
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Unable to create instance {0}, error: {1}'.format(instance_name, e))
changed = True
inst = _find_instance_info(client, instance_name)
return (changed, inst)
def delete_instance(module, client, instance_name):
"""
Terminates an instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to delete
Returns a dictionary of instance information
about the instance deleted (pre-deletion).
If the instance to be deleted is running
"changed" will be set to False.
"""
# It looks like deleting removes the instance immediately, nothing to wait for
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before deleting
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to delete instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to delete instance {0}.".format(instance_name), exception=traceback.format_exc())
# sleep and retry
time.sleep(10)
# Attempt to delete
if inst is not None:
while not changed and ((wait and wait_max > time.time()) or (not wait)):
try:
client.delete_instance(instanceName=instance_name)
changed = True
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Error deleting instance {0}, error: {1}'.format(instance_name, e))
# Timed out
if wait and not changed and wait_max <= time.time():
module.fail_json(msg="wait for instance delete timeout at %s" % time.asctime())
return (changed, inst)
def restart_instance(module, client, instance_name):
"""
Reboot an existing instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to reboot
Returns a dictionary of instance information
about the restarted instance
If the instance was not able to reboot,
"changed" will be set to False.
Wait will not apply here as this is an OS-level operation
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before state change
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to restart instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to restart instance {0}.".format(instance_name), exception=traceback.format_exc())
time.sleep(3)
# send reboot
if inst is not None:
try:
client.reboot_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Unable to reboot instance {0}, error: {1}'.format(instance_name, e))
changed = True
return (changed, inst)
def startstop_instance(module, client, instance_name, state):
"""
Starts or stops an existing instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to start/stop
state: Target state ("running" or "stopped")
Returns a dictionary of instance information
about the instance started/stopped
If the instance was not able to state change,
"changed" will be set to False.
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before state change
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to start/stop instance {0}.".format(instance_name), exception=traceback.format_exc())
time.sleep(1)
# Try state change
if inst is not None and inst['state']['name'] != state:
try:
if state == 'running':
client.start_instance(instanceName=instance_name)
else:
client.stop_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(instance_name, e))
changed = True
# Grab current instance info
inst = _find_instance_info(client, instance_name)
return (changed, inst)
def core(module):
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg='region must be specified')
client = None
try:
client = boto3_conn(module, conn_type='client', resource='lightsail',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
module.fail_json('Failed while connecting to the lightsail service: %s' % e, exception=traceback.format_exc())
changed = False
state = module.params['state']
name = module.params['name']
if state == 'absent':
changed, instance_dict = delete_instance(module, client, name)
elif state in ('running', 'stopped'):
changed, instance_dict = startstop_instance(module, client, name, state)
elif state == 'restarted':
changed, instance_dict = restart_instance(module, client, name)
elif state == 'present':
changed, instance_dict = create_instance(module, client, name)
module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(instance_dict))
def _find_instance_info(client, instance_name):
''' handle exceptions where this function is called '''
inst = None
try:
inst = client.get_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
raise
return inst['instance']
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted']),
zone=dict(type='str'),
blueprint_id=dict(type='str'),
bundle_id=dict(type='str'),
key_pair_name=dict(type='str'),
user_data=dict(type='str'),
wait=dict(type='bool', default=True),
wait_timeout=dict(default=300),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='Python module "boto3" is missing, please install it')
if not HAS_BOTOCORE:
module.fail_json(msg='Python module "botocore" is missing, please install it')
try:
core(module)
except (botocore.exceptions.ClientError, Exception) as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
jforbess/pvlib-python | pvlib/pvsystem.py | 1 | 42489 | """
The ``pvsystem`` module contains functions for modeling the output and
performance of PV modules and inverters.
"""
from __future__ import division
import logging
pvl_logger = logging.getLogger('pvlib')
import io
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
import numpy as np
import pandas as pd
from pvlib import tools
def systemdef(meta, surface_tilt, surface_azimuth, albedo, series_modules,
parallel_modules):
'''
Generates a dict of system parameters used throughout a simulation.
Parameters
----------
meta : dict
meta dict either generated from a TMY file using readtmy2 or readtmy3,
or a dict containing at least the following fields:
=============== ====== ====================
meta field format description
=============== ====== ====================
meta.altitude Float site elevation
meta.latitude Float site latitude
meta.longitude Float site longitude
meta.Name String site name
meta.State String state
meta.TZ Float timezone
=============== ====== ====================
surface_tilt : float or Series
Surface tilt angles in decimal degrees.
The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90)
surface_azimuth : float or Series
Surface azimuth angles in decimal degrees.
The azimuth convention is defined
as degrees east of north
(North=0, South=180, East=90, West=270).
albedo : float or Series
Ground reflectance, typically 0.1-0.4 for
surfaces on Earth (land), may increase over snow, ice, etc. May also
be known as the reflection coefficient. Must be >=0 and <=1.
series_modules : int
Number of modules connected in series in a string.
parallel_modules : int
Number of strings connected in parallel.
Returns
-------
Result : dict
A dict with the following fields.
* 'surface_tilt'
* 'surface_azimuth'
* 'albedo'
* 'series_modules'
* 'parallel_modules'
* 'latitude'
* 'longitude'
* 'tz'
* 'name'
* 'altitude'
See also
--------
pvlib.tmy.readtmy3
pvlib.tmy.readtmy2
'''
try:
name = meta['Name']
except KeyError:
name = meta['City']
system = {'surface_tilt': surface_tilt,
'surface_azimuth': surface_azimuth,
'albedo': albedo,
'series_modules': series_modules,
'parallel_modules': parallel_modules,
'latitude': meta['latitude'],
'longitude': meta['longitude'],
'tz': meta['TZ'],
'name': name,
'altitude': meta['altitude']}
return system
def ashraeiam(b, aoi):
'''
Determine the incidence angle modifier using the ASHRAE transmission model.
ashraeiam calculates the incidence angle modifier as developed in
[1], and adopted by ASHRAE (American Society of Heating, Refrigeration,
and Air Conditioning Engineers) [2]. The model has been used by model
programs such as PVSyst [3].
Note: For incident angles near 90 degrees, this model has a
discontinuity which has been addressed in this function.
Parameters
----------
b : float
A parameter to adjust the modifier as a function of angle of
incidence. Typical values are on the order of 0.05 [3].
aoi : Series
The angle of incidence between the module normal vector and the
sun-beam vector in degrees.
Returns
-------
IAM : Series
The incident angle modifier calculated as 1-b*(sec(aoi)-1) as
described in [2,3].
Returns nan for all abs(aoi) >= 90 and for all IAM values
that would be less than 0.
References
----------
[1] Souka A.F., Safwat H.H., "Determindation of the optimum orientations
for the double exposure flat-plate collector and its reflections".
Solar Energy vol .10, pp 170-174. 1966.
[2] ASHRAE standard 93-77
[3] PVsyst Contextual Help.
http://files.pvsyst.com/help/index.html?iam_loss.htm retrieved on
September 10, 2012
See Also
--------
irradiance.aoi
physicaliam
'''
IAM = 1 - b*((1/np.cos(np.radians(aoi)) - 1))
IAM[abs(aoi) >= 90] = np.nan
IAM[IAM < 0] = np.nan
return IAM
def physicaliam(K, L, n, aoi):
'''
Determine the incidence angle modifier using refractive
index, glazing thickness, and extinction coefficient
physicaliam calculates the incidence angle modifier as described in
De Soto et al. "Improvement and validation of a model for photovoltaic
array performance", section 3. The calculation is based upon a physical
model of absorbtion and transmission through a cover. Required
information includes, incident angle, cover extinction coefficient,
cover thickness
Note: The authors of this function believe that eqn. 14 in [1] is
incorrect. This function uses the following equation in its place:
theta_r = arcsin(1/n * sin(theta))
Parameters
----------
K : float
The glazing extinction coefficient in units of 1/meters. Reference
[1] indicates that a value of 4 is reasonable for "water white"
glass. K must be a numeric scalar or vector with all values >=0. If K
is a vector, it must be the same size as all other input vectors.
L : float
The glazing thickness in units of meters. Reference [1] indicates
that 0.002 meters (2 mm) is reasonable for most glass-covered
PV panels. L must be a numeric scalar or vector with all values >=0.
If L is a vector, it must be the same size as all other input vectors.
n : float
The effective index of refraction (unitless). Reference [1]
indicates that a value of 1.526 is acceptable for glass. n must be a
numeric scalar or vector with all values >=0. If n is a vector, it
must be the same size as all other input vectors.
aoi : Series
The angle of incidence between the module normal vector and the
sun-beam vector in degrees.
Returns
-------
IAM : float or Series
The incident angle modifier as specified in eqns. 14-16 of [1].
IAM is a column vector with the same number of elements as the
largest input vector.
Theta must be a numeric scalar or vector.
For any values of theta where abs(aoi)>90, IAM is set to 0. For any
values of aoi where -90 < aoi < 0, theta is set to abs(aoi) and
evaluated.
References
----------
[1] W. De Soto et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
[2] Duffie, John A. & Beckman, William A.. (2006). Solar Engineering
of Thermal Processes, third edition. [Books24x7 version] Available
from http://common.books24x7.com/toc.aspx?bookid=17160.
See Also
--------
getaoi
ephemeris
spa
ashraeiam
'''
thetar_deg = tools.asind(1.0 / n*(tools.sind(aoi)))
tau = ( np.exp(- 1.0 * (K*L / tools.cosd(thetar_deg))) *
((1 - 0.5*((((tools.sind(thetar_deg - aoi)) ** 2) /
((tools.sind(thetar_deg + aoi)) ** 2) +
((tools.tand(thetar_deg - aoi)) ** 2) /
((tools.tand(thetar_deg + aoi)) ** 2))))) )
zeroang = 1e-06
thetar_deg0 = tools.asind(1.0 / n*(tools.sind(zeroang)))
tau0 = ( np.exp(- 1.0 * (K*L / tools.cosd(thetar_deg0))) *
((1 - 0.5*((((tools.sind(thetar_deg0 - zeroang)) ** 2) /
((tools.sind(thetar_deg0 + zeroang)) ** 2) +
((tools.tand(thetar_deg0 - zeroang)) ** 2) /
((tools.tand(thetar_deg0 + zeroang)) ** 2))))) )
IAM = tau / tau0
IAM[abs(aoi) >= 90] = np.nan
IAM[IAM < 0] = np.nan
return IAM
def calcparams_desoto(poa_global, temp_cell, alpha_isc, module_parameters,
EgRef, dEgdT, M=1, irrad_ref=1000, temp_ref=25):
'''
Applies the temperature and irradiance corrections to
inputs for singlediode.
Applies the temperature and irradiance corrections to the IL, I0,
Rs, Rsh, and a parameters at reference conditions (IL_ref, I0_ref,
etc.) according to the De Soto et. al description given in [1]. The
results of this correction procedure may be used in a single diode
model to determine IV curves at irradiance = S, cell temperature =
Tcell.
Parameters
----------
poa_global : float or Series
The irradiance (in W/m^2) absorbed by the module.
temp_cell : float or Series
The average cell temperature of cells within a module in C.
alpha_isc : float
The short-circuit current temperature coefficient of the
module in units of 1/C.
module_parameters : dict
Parameters describing PV module performance at reference
conditions according to DeSoto's paper. Parameters may be
generated or found by lookup. For ease of use,
retrieve_sam can automatically generate a dict based on the
most recent SAM CEC module
database. The module_parameters dict must contain the
following 5 fields:
* A_ref - modified diode ideality factor parameter at
reference conditions (units of eV), a_ref can be calculated
from the usual diode ideality factor (n),
number of cells in series (Ns),
and cell temperature (Tcell) per equation (2) in [1].
* I_l_ref - Light-generated current (or photocurrent)
in amperes at reference conditions. This value is referred to
as Iph in some literature.
* I_o_ref - diode reverse saturation current in amperes,
under reference conditions.
* R_sh_ref - shunt resistance under reference conditions (ohms).
* R_s - series resistance under reference conditions (ohms).
EgRef : float
The energy bandgap at reference temperature (in eV).
1.121 eV for silicon. EgRef must be >0.
dEgdT : float
The temperature dependence of the energy bandgap at SRC (in 1/C).
May be either a scalar value (e.g. -0.0002677 as in [1]) or a
DataFrame of dEgdT values corresponding to each input condition (this
may be useful if dEgdT is a function of temperature).
M : float or Series (optional, default=1)
An optional airmass modifier, if omitted, M is given a value of 1,
which assumes absolute (pressure corrected) airmass = 1.5. In this
code, M is equal to M/Mref as described in [1] (i.e. Mref is assumed
to be 1). Source [1] suggests that an appropriate value for M
as a function absolute airmass (AMa) may be:
>>> M = np.polyval([-0.000126, 0.002816, -0.024459, 0.086257, 0.918093],
... AMa) # doctest: +SKIP
M may be a Series.
irrad_ref : float (optional, default=1000)
Reference irradiance in W/m^2.
temp_ref : float (optional, default=25)
Reference cell temperature in C.
Returns
-------
Tuple of the following results:
photocurrent : float or Series
Light-generated current in amperes at irradiance=S and
cell temperature=Tcell.
saturation_current : float or Series
Diode saturation curent in amperes at irradiance
S and cell temperature Tcell.
resistance_series : float
Series resistance in ohms at irradiance S and cell temperature Tcell.
resistance_shunt : float or Series
Shunt resistance in ohms at irradiance S and cell temperature Tcell.
nNsVth : float or Series
Modified diode ideality factor at irradiance S and cell temperature
Tcell. Note that in source [1] nNsVth = a (equation 2). nNsVth is the
product of the usual diode ideality factor (n), the number of
series-connected cells in the module (Ns), and the thermal voltage
of a cell in the module (Vth) at a cell temperature of Tcell.
References
----------
[1] W. De Soto et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
[2] System Advisor Model web page. https://sam.nrel.gov.
[3] A. Dobos, "An Improved Coefficient Calculator for the California
Energy Commission 6 Parameter Photovoltaic Module Model", Journal of
Solar Energy Engineering, vol 134, 2012.
[4] O. Madelung, "Semiconductors: Data Handbook, 3rd ed." ISBN
3-540-40488-0
See Also
--------
sapm
sapm_celltemp
singlediode
retrieve_sam
Notes
-----
If the reference parameters in the ModuleParameters struct are read
from a database or library of parameters (e.g. System Advisor Model),
it is important to use the same EgRef and dEgdT values that
were used to generate the reference parameters, regardless of the
actual bandgap characteristics of the semiconductor. For example, in
the case of the System Advisor Model library, created as described in
[3], EgRef and dEgdT for all modules were 1.121 and -0.0002677,
respectively.
This table of reference bandgap energies (EgRef), bandgap energy
temperature dependence (dEgdT), and "typical" airmass response (M) is
provided purely as reference to those who may generate their own
reference module parameters (a_ref, IL_ref, I0_ref, etc.) based upon the
various PV semiconductors. Again, we stress the importance of
using identical EgRef and dEgdT when generation reference
parameters and modifying the reference parameters (for irradiance,
temperature, and airmass) per DeSoto's equations.
Silicon (Si):
* EgRef = 1.121
* dEgdT = -0.0002677
>>> M = np.polyval([-1.26E-4, 2.816E-3, -0.024459, 0.086257, 0.918093],
... AMa) # doctest: +SKIP
Source: [1]
Cadmium Telluride (CdTe):
* EgRef = 1.475
* dEgdT = -0.0003
>>> M = np.polyval([-2.46E-5, 9.607E-4, -0.0134, 0.0716, 0.9196],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium diSelenide (CIS):
* EgRef = 1.010
* dEgdT = -0.00011
>>> M = np.polyval([-3.74E-5, 0.00125, -0.01462, 0.0718, 0.9210],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium Gallium diSelenide (CIGS):
* EgRef = 1.15
* dEgdT = ????
>>> M = np.polyval([-9.07E-5, 0.0022, -0.0202, 0.0652, 0.9417],
... AMa) # doctest: +SKIP
Source: Wikipedia
Gallium Arsenide (GaAs):
* EgRef = 1.424
* dEgdT = -0.000433
* M = unknown
Source: [4]
'''
M = np.max(M, 0)
a_ref = module_parameters['A_ref']
IL_ref = module_parameters['I_l_ref']
I0_ref = module_parameters['I_o_ref']
Rsh_ref = module_parameters['R_sh_ref']
Rs_ref = module_parameters['R_s']
k = 8.617332478e-05
Tref_K = temp_ref + 273.15
Tcell_K = temp_cell + 273.15
E_g = EgRef * (1 + dEgdT*(Tcell_K - Tref_K))
nNsVth = a_ref * (Tcell_K / Tref_K)
IL = (poa_global/irrad_ref) * M * (IL_ref + alpha_isc * (Tcell_K - Tref_K))
I0 = ( I0_ref * ((Tcell_K / Tref_K) ** 3) *
(np.exp(EgRef / (k*(Tref_K)) - (E_g / (k*(Tcell_K))))) )
Rsh = Rsh_ref * (irrad_ref / poa_global)
Rs = Rs_ref
return IL, I0, Rs, Rsh, nNsVth
def retrieve_sam(name=None, samfile=None):
'''
Retrieve lastest module and inverter info from SAM website.
This function will retrieve either:
* CEC module database
* Sandia Module database
* Sandia Inverter database
and return it as a pandas dataframe.
Parameters
----------
name : String
Name can be one of:
* 'CECMod' - returns the CEC module database
* 'SandiaInverter' - returns the Sandia Inverter database
* 'SandiaMod' - returns the Sandia Module database
samfile : String
Absolute path to the location of local versions of the SAM file.
If file is specified, the latest versions of the SAM database will
not be downloaded. The selected file must be in .csv format.
If set to 'select', a dialogue will open allowing the user to navigate
to the appropriate page.
Returns
-------
A DataFrame containing all the elements of the desired database.
Each column representa a module or inverter, and a specific dataset
can be retreived by the command
Examples
--------
>>> from pvlib import pvsystem
>>> invdb = pvsystem.retrieve_sam(name='SandiaInverter')
>>> inverter = invdb.AE_Solar_Energy__AE6_0__277V__277V__CEC_2012_
>>> inverter
Vac 277.000000
Paco 6000.000000
Pdco 6165.670000
Vdco 361.123000
Pso 36.792300
C0 -0.000002
C1 -0.000047
C2 -0.001861
C3 0.000721
Pnt 0.070000
Vdcmax 600.000000
Idcmax 32.000000
Mppt_low 200.000000
Mppt_high 500.000000
Name: AE_Solar_Energy__AE6_0__277V__277V__CEC_2012_, dtype: float64
'''
if name is not None:
name = name.lower()
if name == 'cecmod':
url = 'https://sam.nrel.gov/sites/sam.nrel.gov/files/sam-library-cec-modules-2014-1-14.csv'
elif name == 'sandiamod':
url = 'https://sam.nrel.gov/sites/sam.nrel.gov/files/sam-library-sandia-modules-2014-1-14.csv'
elif name == 'sandiainverter':
url = 'https://sam.nrel.gov/sites/sam.nrel.gov/files/sam-library-sandia-inverters-2014-1-14.csv'
elif samfile is None:
raise ValueError('invalid name {}'.format(name))
if name is None and samfile is None:
raise ValueError('must supply name or samfile')
if samfile is None:
pvl_logger.info('retrieving {} from {}'.format(name, url))
response = urlopen(url)
csvdata = io.StringIO(response.read().decode(errors='ignore'))
elif samfile == 'select':
import Tkinter
from tkFileDialog import askopenfilename
Tkinter.Tk().withdraw()
csvdata = askopenfilename()
else:
csvdata = samfile
return _parse_raw_sam_df(csvdata)
def _parse_raw_sam_df(csvdata):
df = pd.read_csv(csvdata, index_col=0)
parsedindex = []
for index in df.index:
parsedindex.append(index.replace(' ', '_').replace('-', '_')
.replace('.', '_').replace('(', '_')
.replace(')', '_').replace('[', '_')
.replace(']', '_').replace(':', '_'))
df.index = parsedindex
df = df.transpose()
return df
def sapm(module, poa_direct, poa_diffuse, temp_cell, airmass_absolute, aoi):
'''
The Sandia PV Array Performance Model (SAPM) generates 5 points on a PV
module's I-V curve (Voc, Isc, Ix, Ixx, Vmp/Imp) according to
SAND2004-3535. Assumes a reference cell temperature of 25 C.
Parameters
----------
module : Series or dict
A DataFrame defining the SAPM performance parameters.
poa_direct : Series
The direct irradiance incident upon the module (W/m^2).
poa_diffuse : Series
The diffuse irradiance incident on module.
temp_cell : Series
The cell temperature (degrees C).
airmass_absolute : Series
Absolute airmass.
aoi : Series
Angle of incidence (degrees).
Returns
-------
A DataFrame with the columns:
* i_sc : Short-circuit current (A)
* I_mp : Current at the maximum-power point (A)
* v_oc : Open-circuit voltage (V)
* v_mp : Voltage at maximum-power point (V)
* p_mp : Power at maximum-power point (W)
* i_x : Current at module V = 0.5Voc, defines 4th point on I-V
curve for modeling curve shape
* i_xx : Current at module V = 0.5(Voc+Vmp), defines 5th point on
I-V curve for modeling curve shape
* effective_irradiance : Effective irradiance
Notes
-----
The coefficients from SAPM which are required in ``module`` are:
======== ===============================================================
Key Description
======== ===============================================================
A0-A4 The airmass coefficients used in calculating
effective irradiance
B0-B5 The angle of incidence coefficients used in calculating
effective irradiance
C0-C7 The empirically determined coefficients relating
Imp, Vmp, Ix, and Ixx to effective irradiance
Isco Short circuit current at reference condition (amps)
Impo Maximum power current at reference condition (amps)
Aisc Short circuit current temperature coefficient at
reference condition (1/C)
Aimp Maximum power current temperature coefficient at
reference condition (1/C)
Bvoc Open circuit voltage temperature coefficient at
reference condition (V/C)
Mbvoc Coefficient providing the irradiance dependence for the BetaVoc
temperature coefficient at reference irradiance (V/C)
Bvmpo Maximum power voltage temperature coefficient at
reference condition
Mbvmp Coefficient providing the irradiance dependence for the
BetaVmp temperature coefficient at reference irradiance (V/C)
N Empirically determined "diode factor" (dimensionless)
#Series Number of cells in series in a module's cell string(s)
IXO Ix at reference conditions
IXXO Ixx at reference conditions
FD Fraction of diffuse irradiance used by module
======== ===============================================================
References
----------
[1] King, D. et al, 2004, "Sandia Photovoltaic Array Performance Model",
SAND Report 3535, Sandia National Laboratories, Albuquerque, NM.
See Also
--------
retrieve_sam
sapm_celltemp
'''
T0 = 25
q = 1.60218e-19 # Elementary charge in units of coulombs
kb = 1.38066e-23 # Boltzmann's constant in units of J/K
E0 = 1000
am_coeff = [module['A4'], module['A3'], module['A2'], module['A1'],
module['A0']]
aoi_coeff = [module['B5'], module['B4'], module['B3'], module['B2'],
module['B1'], module['B0']]
F1 = np.polyval(am_coeff, airmass_absolute)
F2 = np.polyval(aoi_coeff, aoi)
# Ee is the "effective irradiance"
Ee = F1 * ( (poa_direct*F2 + module['FD']*poa_diffuse) / E0 )
Ee.fillna(0, inplace=True)
Ee = Ee.clip_lower(0)
Bvmpo = module['Bvmpo'] + module['Mbvmp']*(1 - Ee)
Bvoco = module['Bvoco'] + module['Mbvoc']*(1 - Ee)
delta = module['N'] * kb * (temp_cell + 273.15) / q
dfout = pd.DataFrame(index=Ee.index)
dfout['i_sc'] = (
module['Isco'] * Ee * (1 + module['Aisc']*(temp_cell - T0)) )
dfout['i_mp'] = ( module['Impo'] *
(module['C0']*Ee + module['C1']*(Ee**2)) *
(1 + module['Aimp']*(temp_cell - T0)) )
dfout['v_oc'] = (( module['Voco'] +
module['#Series']*delta*np.log(Ee) + Bvoco*(temp_cell - T0) )
.clip_lower(0))
dfout['v_mp'] = ( module['Vmpo'] +
module['C2']*module['#Series']*delta*np.log(Ee) +
module['C3']*module['#Series']*((delta*np.log(Ee)) ** 2) +
Bvmpo*(temp_cell - T0) ).clip_lower(0)
dfout['p_mp'] = dfout['i_mp'] * dfout['v_mp']
dfout['i_x'] = ( module['IXO'] *
(module['C4']*Ee + module['C5']*(Ee**2)) *
(1 + module['Aisc']*(temp_cell - T0)) )
# the Ixx calculation in King 2004 has a typo (mixes up Aisc and Aimp)
dfout['i_xx'] = ( module['IXXO'] *
(module['C6']*Ee + module['C7']*(Ee**2)) *
(1 + module['Aisc']*(temp_cell - T0)) )
dfout['effective_irradiance'] = Ee
return dfout
def sapm_celltemp(irrad, wind, temp, model='open_rack_cell_glassback'):
'''
Estimate cell and module temperatures per the Sandia PV Array
Performance Model (SAPM, SAND2004-3535), from the incident
irradiance, wind speed, ambient temperature, and SAPM module
parameters.
Parameters
----------
irrad : float or Series
Total incident irradiance in W/m^2.
wind : float or Series
Wind speed in m/s at a height of 10 meters.
temp : float or Series
Ambient dry bulb temperature in degrees C.
model : string or list
Model to be used.
If string, can be:
* 'open_rack_cell_glassback' (default)
* 'roof_mount_cell_glassback'
* 'open_rack_cell_polymerback'
* 'insulated_back_polymerback'
* 'open_rack_polymer_thinfilm_steel'
* '22x_concentrator_tracker'
If list, supply the following parameters in the following order:
* a : float
SAPM module parameter for establishing the upper
limit for module temperature at low wind speeds and
high solar irradiance.
* b : float
SAPM module parameter for establishing the rate at
which the module temperature drops as wind speed increases
(see SAPM eqn. 11).
* deltaT : float
SAPM module parameter giving the temperature difference
between the cell and module back surface at the
reference irradiance, E0.
Returns
--------
DataFrame with columns 'temp_cell' and 'temp_module'.
Values in degrees C.
References
----------
[1] King, D. et al, 2004, "Sandia Photovoltaic Array Performance Model",
SAND Report 3535, Sandia National Laboratories, Albuquerque, NM.
See Also
--------
sapm
'''
temp_models = {'open_rack_cell_glassback': [-3.47, -.0594, 3],
'roof_mount_cell_glassback': [-2.98, -.0471, 1],
'open_rack_cell_polymerback': [-3.56, -.0750, 3],
'insulated_back_polymerback': [-2.81, -.0455, 0],
'open_rack_polymer_thinfilm_steel': [-3.58, -.113, 3],
'22x_concentrator_tracker': [-3.23, -.130, 13]
}
if isinstance(model, str):
model = temp_models[model.lower()]
elif isinstance(model, list):
model = model
a = model[0]
b = model[1]
deltaT = model[2]
E0 = 1000. # Reference irradiance
temp_module = pd.Series(irrad*np.exp(a + b*wind) + temp)
temp_cell = temp_module + (irrad / E0)*(deltaT)
return pd.DataFrame({'temp_cell': temp_cell, 'temp_module': temp_module})
def singlediode(module, photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth):
'''
Solve the single-diode model to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]
.. math::
I = IL - I0*[exp((V+I*Rs)/(nNsVth))-1] - (V + I*Rs)/Rsh
for ``I`` and ``V`` when given
``IL, I0, Rs, Rsh,`` and ``nNsVth (nNsVth = n*Ns*Vth)`` which
are described later. Returns a DataFrame which contains
the 5 points on the I-V curve specified in SAND2004-3535 [3].
If all IL, I0, Rs, Rsh, and nNsVth are scalar, a single curve
will be returned, if any are Series (of the same length), multiple IV
curves will be calculated.
The input parameters can be calculated using calcparams_desoto from
meteorological data.
Parameters
----------
module : DataFrame
A DataFrame defining the SAPM performance parameters.
photocurrent : float or Series
Light-generated current (photocurrent) in amperes under desired IV
curve conditions. Often abbreviated ``I_L``.
saturation_current : float or Series
Diode saturation current in amperes under desired IV curve
conditions. Often abbreviated ``I_0``.
resistance_series : float or Series
Series resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rs``.
resistance_shunt : float or Series
Shunt resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rsh``.
nNsVth : float or Series
The product of three components. 1) The usual diode ideal
factor (n), 2) the number of cells in series (Ns), and 3) the cell
thermal voltage under the desired IV curve conditions (Vth).
The thermal voltage of the cell (in volts) may be calculated as
``k*temp_cell/q``, where k is Boltzmann's constant (J/K),
temp_cell is the temperature of the p-n junction in Kelvin,
and q is the charge of an electron (coulombs).
Returns
-------
If ``photocurrent`` is a Series, a DataFrame with the following columns.
All columns have the same number of rows as the largest input DataFrame.
If ``photocurrent`` is a scalar, a dict with the following keys.
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
Notes
-----
The solution employed to solve the implicit diode equation utilizes
the Lambert W function to obtain an explicit function of V=f(i) and
I=f(V) as shown in [2].
References
-----------
[1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics"
ISBN 0 86758 909 4
[2] A. Jain, A. Kapoor, "Exact analytical solutions of the parameters of
real solar cells using Lambert W-function", Solar Energy Materials
and Solar Cells, 81 (2004) 269-277.
[3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
See also
--------
sapm
calcparams_desoto
'''
pvl_logger.debug('pvsystem.singlediode')
# Find short circuit current using Lambert W
i_sc = i_from_v(resistance_shunt, resistance_series, nNsVth, 0.01,
saturation_current, photocurrent)
params = {'r_sh': resistance_shunt,
'r_s': resistance_series,
'nNsVth': nNsVth,
'i_0': saturation_current,
'i_l': photocurrent}
__, v_oc = _golden_sect_DataFrame(params, 0, module['V_oc_ref']*1.6,
_v_oc_optfcn)
p_mp, v_mp = _golden_sect_DataFrame(params, 0, module['V_oc_ref']*1.14,
_pwr_optfcn)
# Invert the Power-Current curve. Find the current where the inverted power
# is minimized. This is i_mp. Start the optimization at v_oc/2
i_mp = i_from_v(resistance_shunt, resistance_series, nNsVth, v_mp,
saturation_current, photocurrent)
# Find Ix and Ixx using Lambert W
i_x = i_from_v(resistance_shunt, resistance_series, nNsVth,
0.5*v_oc, saturation_current, photocurrent)
i_xx = i_from_v(resistance_shunt, resistance_series, nNsVth,
0.5*(v_oc+v_mp), saturation_current, photocurrent)
# @wholmgren: need to move this stuff to a different function
# If the user says they want a curve of with number of points equal to
# NumPoints (must be >=2), then create a voltage array where voltage is
# zero in the first column, and Voc in the last column. Number of columns
# must equal NumPoints. Each row represents the voltage for one IV curve.
# Then create a current array where current is Isc in the first column, and
# zero in the last column, and each row represents the current in one IV
# curve. Thus the nth (V,I) point of curve m would be found as follows:
# (Result.V(m,n),Result.I(m,n)).
# if NumPoints >= 2
# s = ones(1,NumPoints); # shaping DataFrame to shape the column DataFrame parameters into 2-D matrices
# Result.V = (Voc)*(0:1/(NumPoints-1):1);
# Result.I = I_from_V(Rsh*s, Rs*s, nNsVth*s, Result.V, I0*s, IL*s);
# end
dfout = {}
dfout['i_sc'] = i_sc
dfout['i_mp'] = i_mp
dfout['v_oc'] = v_oc
dfout['v_mp'] = v_mp
dfout['p_mp'] = p_mp
dfout['i_x'] = i_x
dfout['i_xx'] = i_xx
try:
dfout = pd.DataFrame(dfout, index=photocurrent.index)
except AttributeError:
pass
return dfout
# Created April,2014
# Author: Rob Andrews, Calama Consulting
def _golden_sect_DataFrame(params, VL, VH, func):
'''
Vectorized golden section search for finding MPPT
from a dataframe timeseries.
Parameters
----------
params : dict
Dictionary containing scalars or arrays
of inputs to the function to be optimized.
Each row should represent an independent optimization.
VL: float
Lower bound of the optimization
VH: float
Upper bound of the optimization
func: function
Function to be optimized must be in the form f(array-like, x)
Returns
-------
func(df,'V1') : DataFrame
function evaluated at the optimal point
df['V1']: Dataframe
Dataframe of optimal points
Notes
-----
This funtion will find the MAXIMUM of a function
'''
df = params
df['VH'] = VH
df['VL'] = VL
err = df['VH'] - df['VL']
errflag = True
iterations = 0
while errflag:
phi = (np.sqrt(5)-1)/2*(df['VH']-df['VL'])
df['V1'] = df['VL'] + phi
df['V2'] = df['VH'] - phi
df['f1'] = func(df, 'V1')
df['f2'] = func(df, 'V2')
df['SW_Flag'] = df['f1'] > df['f2']
df['VL'] = df['V2']*df['SW_Flag'] + df['VL']*(~df['SW_Flag'])
df['VH'] = df['V1']*~df['SW_Flag'] + df['VH']*(df['SW_Flag'])
err = df['V1'] - df['V2']
try:
errflag = (abs(err)>.01).all()
except ValueError:
errflag = (abs(err)>.01)
iterations += 1
if iterations > 50:
raise Exception("EXCEPTION:iterations exeeded maximum (50)")
return func(df, 'V1'), df['V1']
def _pwr_optfcn(df, loc):
'''
Function to find power from ``i_from_v``.
'''
I = i_from_v(df['r_sh'], df['r_s'], df['nNsVth'],
df[loc], df['i_0'], df['i_l'])
return I*df[loc]
def _v_oc_optfcn(df, loc):
'''
Function to find the open circuit voltage from ``i_from_v``.
'''
I = -abs(i_from_v(df['r_sh'], df['r_s'], df['nNsVth'],
df[loc], df['i_0'], df['i_l']))
return I
def i_from_v(resistance_shunt, resistance_series, nNsVth, voltage,
saturation_current, photocurrent):
'''
Calculates current from voltage per Eq 2 Jain and Kapoor 2004 [1].
Parameters
----------
resistance_series : float or Series
Series resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rs``.
resistance_shunt : float or Series
Shunt resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rsh``.
saturation_current : float or Series
Diode saturation current in amperes under desired IV curve
conditions. Often abbreviated ``I_0``.
nNsVth : float or Series
The product of three components. 1) The usual diode ideal
factor (n), 2) the number of cells in series (Ns), and 3) the cell
thermal voltage under the desired IV curve conditions (Vth).
The thermal voltage of the cell (in volts) may be calculated as
``k*temp_cell/q``, where k is Boltzmann's constant (J/K),
temp_cell is the temperature of the p-n junction in Kelvin,
and q is the charge of an electron (coulombs).
photocurrent : float or Series
Light-generated current (photocurrent) in amperes under desired IV
curve conditions. Often abbreviated ``I_L``.
Returns
-------
current : np.array
References
----------
[1] A. Jain, A. Kapoor, "Exact analytical solutions of the parameters of
real solar cells using Lambert W-function", Solar Energy Materials
and Solar Cells, 81 (2004) 269-277.
'''
try:
from scipy.special import lambertw
except ImportError:
raise ImportError('This function requires scipy')
Rsh = resistance_shunt
Rs = resistance_series
I0 = saturation_current
IL = photocurrent
V = voltage
argW = (Rs*I0*Rsh *
np.exp( Rsh*(Rs*(IL+I0)+V) / (nNsVth*(Rs+Rsh)) ) /
(nNsVth*(Rs + Rsh)) )
lambertwterm = lambertw(argW)
pvl_logger.debug('argW: {}, lambertwterm{}'.format(argW, lambertwterm))
# Eqn. 4 in Jain and Kapoor, 2004
I = -V/(Rs + Rsh) - (nNsVth/Rs)*lambertwterm + Rsh*(IL + I0)/(Rs + Rsh)
return I.real
def snlinverter(inverter, v_dc, p_dc):
'''
Converts DC power and voltage to AC power using
Sandia's Grid-Connected PV Inverter model.
Determines the AC power output of an inverter given the DC voltage, DC
power, and appropriate Sandia Grid-Connected Photovoltaic Inverter
Model parameters. The output, ac_power, is clipped at the maximum power
output, and gives a negative power during low-input power conditions,
but does NOT account for maximum power point tracking voltage windows
nor maximum current or voltage limits on the inverter.
Parameters
----------
inverter : DataFrame
A DataFrame defining the inverter to be used, giving the
inverter performance parameters according to the Sandia
Grid-Connected Photovoltaic Inverter Model (SAND 2007-5036) [1].
A set of inverter performance parameters are provided with pvlib,
or may be generated from a System Advisor Model (SAM) [2]
library using retrievesam.
Required DataFrame columns are:
====== ============================================================
Column Description
====== ============================================================
Pac0 AC-power output from inverter based on input power
and voltage (W)
Pdc0 DC-power input to inverter, typically assumed to be equal
to the PV array maximum power (W)
Vdc0 DC-voltage level at which the AC-power rating is achieved
at the reference operating condition (V)
Ps0 DC-power required to start the inversion process, or
self-consumption by inverter, strongly influences inverter
efficiency at low power levels (W)
C0 Parameter defining the curvature (parabolic) of the
relationship between ac-power and dc-power at the reference
operating condition, default value of zero gives a
linear relationship (1/W)
C1 Empirical coefficient allowing Pdco to vary linearly
with dc-voltage input, default value is zero (1/V)
C2 Empirical coefficient allowing Pso to vary linearly with
dc-voltage input, default value is zero (1/V)
C3 Empirical coefficient allowing Co to vary linearly with
dc-voltage input, default value is zero (1/V)
Pnt AC-power consumed by inverter at night (night tare) to
maintain circuitry required to sense PV array voltage (W)
====== ============================================================
v_dc : float or Series
DC voltages, in volts, which are provided as input to the inverter.
Vdc must be >= 0.
p_dc : float or Series
A scalar or DataFrame of DC powers, in watts, which are provided
as input to the inverter. Pdc must be >= 0.
Returns
-------
ac_power : float or Series
Modeled AC power output given the input
DC voltage, Vdc, and input DC power, Pdc. When ac_power would be
greater than Pac0, it is set to Pac0 to represent inverter
"clipping". When ac_power would be less than Ps0 (startup power
required), then ac_power is set to -1*abs(Pnt) to represent nightly
power losses. ac_power is not adjusted for maximum power point
tracking (MPPT) voltage windows or maximum current limits of the
inverter.
References
----------
[1] SAND2007-5036, "Performance Model for Grid-Connected Photovoltaic
Inverters by D. King, S. Gonzalez, G. Galbraith, W. Boyson
[2] System Advisor Model web page. https://sam.nrel.gov.
See also
--------
sapm
singlediode
'''
Paco = inverter['Paco']
Pdco = inverter['Pdco']
Vdco = inverter['Vdco']
Pso = inverter['Pso']
C0 = inverter['C0']
C1 = inverter['C1']
C2 = inverter['C2']
C3 = inverter['C3']
Pnt = inverter['Pnt']
A = Pdco * (1 + C1*(v_dc - Vdco))
B = Pso * (1 + C2*(v_dc - Vdco))
C = C0 * (1 + C3*(v_dc - Vdco))
# ensures that function works with scalar or Series input
p_dc = pd.Series(p_dc)
ac_power = ( Paco/(A-B) - C*(A-B) ) * (p_dc-B) + C*((p_dc-B)**2)
ac_power[ac_power > Paco] = Paco
ac_power[ac_power < Pso] = - 1.0 * abs(Pnt)
if len(ac_power) == 1:
ac_power = ac_power.ix[0]
return ac_power
| bsd-3-clause |
jtux270/translate | FreeIPA/freeipa-3.0.0/tests/test_xmlrpc/test_selinuxusermap_plugin.py | 2 | 28881 | # Authors:
# Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2011 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test the `ipalib/plugins/selinuxusermap.py` module.
"""
from ipalib import api, errors
from tests.test_xmlrpc import objectclasses
from xmlrpc_test import Declarative, fuzzy_digits, fuzzy_uuid
from ipapython.dn import DN
from tests.util import Fuzzy
rule1 = u'selinuxrule1'
selinuxuser1 = u'guest_u:s0'
selinuxuser2 = u'xguest_u:s0'
user1 = u'tuser1'
group1 = u'testgroup1'
host1 = u'testhost1.%s' % api.env.domain
hostdn1 = DN(('fqdn',host1),('cn','computers'),('cn','accounts'),
api.env.basedn)
hbacrule1 = u'testhbacrule1'
hbacrule2 = u'testhbacrule12'
# Note (?i) at the beginning of the regexp is the ingnore case flag
fuzzy_selinuxusermapdn = Fuzzy(
'(?i)ipauniqueid=[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12},%s,%s' % (api.env.container_selinux, api.env.basedn)
)
fuzzy_hbacruledn = Fuzzy(
'(?i)ipauniqueid=[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12},%s,%s' % (api.env.container_hbac, api.env.basedn)
)
class test_selinuxusermap(Declarative):
cleanup_commands = [
('selinuxusermap_del', [rule1], {}),
('group_del', [group1], {}),
('user_del', [user1], {}),
('host_del', [host1], {}),
('hbacrule_del', [hbacrule1], {}),
('hbacrule_del', [hbacrule2], {}),
]
tests = [
dict(
desc='Try to retrieve non-existent %r' % rule1,
command=('selinuxusermap_show', [rule1], {}),
expected=errors.NotFound(
reason=u'%s: SELinux User Map rule not found' % rule1),
),
dict(
desc='Try to update non-existent %r' % rule1,
command=('selinuxusermap_mod', [rule1], dict(description=u'Foo')),
expected=errors.NotFound(
reason=u'%s: SELinux User Map rule not found' % rule1),
),
dict(
desc='Try to delete non-existent %r' % rule1,
command=('selinuxusermap_del', [rule1], {}),
expected=errors.NotFound(
reason=u'%s: SELinux User Map rule not found' % rule1),
),
dict(
desc='Create rule %r' % rule1,
command=(
'selinuxusermap_add', [rule1], dict(ipaselinuxuser=selinuxuser1)
),
expected=dict(
value=rule1,
summary=u'Added SELinux User Map "%s"' % rule1,
result=dict(
cn=[rule1],
ipaselinuxuser=[selinuxuser1],
objectclass=objectclasses.selinuxusermap,
ipauniqueid=[fuzzy_uuid],
ipaenabledflag = [u'TRUE'],
dn=fuzzy_selinuxusermapdn,
),
),
),
dict(
desc='Try to create duplicate %r' % rule1,
command=(
'selinuxusermap_add', [rule1], dict(ipaselinuxuser=selinuxuser1)
),
expected=errors.DuplicateEntry(message=u'SELinux User Map rule ' +
u'with name "%s" already exists' % rule1),
),
dict(
desc='Retrieve rule %r' % rule1,
command=('selinuxusermap_show', [rule1], {}),
expected=dict(
value=rule1,
summary=None,
result=dict(
cn=[rule1],
ipaselinuxuser=[selinuxuser1],
ipaenabledflag = [u'TRUE'],
dn=fuzzy_selinuxusermapdn,
),
),
),
dict(
desc='Update rule %r' % rule1,
command=(
'selinuxusermap_mod', [rule1], dict(ipaselinuxuser=selinuxuser2)
),
expected=dict(
result=dict(
cn=[rule1],
ipaselinuxuser=[selinuxuser2],
ipaenabledflag = [u'TRUE'],
),
summary=u'Modified SELinux User Map "%s"' % rule1,
value=rule1,
),
),
dict(
desc='Retrieve %r to verify update' % rule1,
command=('selinuxusermap_show', [rule1], {}),
expected=dict(
value=rule1,
result=dict(
cn=[rule1],
ipaselinuxuser=[selinuxuser2],
ipaenabledflag = [u'TRUE'],
dn=fuzzy_selinuxusermapdn,
),
summary=None,
),
),
dict(
desc='Search for rule %r' % rule1,
command=('selinuxusermap_find', [], dict(cn=rule1)),
expected=dict(
count=1,
truncated=False,
result=[
dict(
cn=[rule1],
ipaselinuxuser=[selinuxuser2],
ipaenabledflag = [u'TRUE'],
dn=fuzzy_selinuxusermapdn,
),
],
summary=u'1 SELinux User Map matched',
),
),
###############
# Create additional entries needed for testing
dict(
desc='Create %r' % user1,
command=(
'user_add', [], dict(givenname=u'Test', sn=u'User1')
),
expected=dict(
value=user1,
summary=u'Added user "%s"' % user1,
result=dict(
gecos=[u'Test User1'],
givenname=[u'Test'],
homedirectory=[u'/home/%s' % user1],
krbprincipalname=[u'%s@%s' % (user1, api.env.realm)],
loginshell=[u'/bin/sh'],
objectclass=objectclasses.user,
sn=[u'User1'],
uid=[user1],
uidnumber=[fuzzy_digits],
gidnumber=[fuzzy_digits],
mail=[u'%s@%s' % (user1, api.env.domain)],
displayname=[u'Test User1'],
cn=[u'Test User1'],
initials=[u'TU'],
ipauniqueid=[fuzzy_uuid],
krbpwdpolicyreference=[DN(('cn','global_policy'),('cn',api.env.realm),
('cn','kerberos'),api.env.basedn)],
mepmanagedentry=[DN(('cn',user1),('cn','groups'),('cn','accounts'),
api.env.basedn)],
memberof_group=[u'ipausers'],
dn=DN(('uid',user1),('cn','users'),('cn','accounts'),
api.env.basedn),
has_keytab=False,
has_password=False,
),
),
),
dict(
desc='Create group %r' % group1,
command=(
'group_add', [group1], dict(description=u'Test desc 1')
),
expected=dict(
value=group1,
summary=u'Added group "%s"' % group1,
result=dict(
cn=[group1],
description=[u'Test desc 1'],
gidnumber=[fuzzy_digits],
objectclass=objectclasses.group + [u'posixgroup'],
ipauniqueid=[fuzzy_uuid],
dn=DN(('cn',group1),('cn','groups'),('cn','accounts'),
api.env.basedn),
),
),
),
dict(
desc='Add member %r to %r' % (user1, group1),
command=(
'group_add_member', [group1], dict(user=user1)
),
expected=dict(
completed=1,
failed=dict(
member=dict(
group=tuple(),
user=tuple(),
),
),
result={
'dn': DN(('cn',group1),('cn','groups'),('cn','accounts'),
api.env.basedn),
'member_user': (user1,),
'gidnumber': [fuzzy_digits],
'cn': [group1],
'description': [u'Test desc 1'],
},
),
),
dict(
desc='Create host %r' % host1,
command=('host_add', [host1],
dict(
description=u'Test host 1',
l=u'Undisclosed location 1',
force=True,
),
),
expected=dict(
value=host1,
summary=u'Added host "%s"' % host1,
result=dict(
dn=hostdn1,
fqdn=[host1],
description=[u'Test host 1'],
l=[u'Undisclosed location 1'],
krbprincipalname=[u'host/%s@%s' % (host1, api.env.realm)],
objectclass=objectclasses.host,
ipauniqueid=[fuzzy_uuid],
managedby_host=[host1],
has_keytab=False,
has_password=False,
),
),
),
dict(
desc='Create HBAC rule %r' % hbacrule1,
command=(
'hbacrule_add', [hbacrule1], {}
),
expected=dict(
value=hbacrule1,
summary=u'Added HBAC rule "%s"' % hbacrule1,
result=dict(
cn=[hbacrule1],
objectclass=objectclasses.hbacrule,
ipauniqueid=[fuzzy_uuid],
accessruletype=[u'allow'],
ipaenabledflag=[u'TRUE'],
dn=fuzzy_hbacruledn,
),
),
),
dict(
desc='Create HBAC rule %r' % hbacrule2,
command=(
'hbacrule_add', [hbacrule2], {}
),
expected=dict(
value=hbacrule2,
summary=u'Added HBAC rule "%s"' % hbacrule2,
result=dict(
cn=[hbacrule2],
objectclass=objectclasses.hbacrule,
ipauniqueid=[fuzzy_uuid],
accessruletype=[u'allow'],
ipaenabledflag=[u'TRUE'],
dn=fuzzy_hbacruledn,
),
),
),
###############
# Fill out rule with members and/or pointers to HBAC rules
dict(
desc='Add user to %r' % rule1,
command=('selinuxusermap_add_user', [rule1], dict(user=user1)),
expected=dict(
failed=dict(memberuser=dict(group=[], user=[])),
completed=1,
result=dict(
cn=[rule1],
ipaselinuxuser=[selinuxuser2],
ipaenabledflag = [u'TRUE'],
memberuser_user = [user1],
dn=fuzzy_selinuxusermapdn,
),
)
),
dict(
desc='Add non-existent user to %r' % rule1,
command=('selinuxusermap_add_user', [rule1], dict(user=u'notfound')),
expected=dict(
failed=dict(memberuser=dict(group=[], user=[(u'notfound', u'no such entry')])),
completed=0,
result=dict(
cn=[rule1],
ipaselinuxuser=[selinuxuser2],
ipaenabledflag = [u'TRUE'],
memberuser_user = [user1],
dn=fuzzy_selinuxusermapdn,
),
)
),
dict(
desc='Remove user from %r' % rule1,
command=('selinuxusermap_remove_user', [rule1], dict(user=user1)),
expected=dict(
failed=dict(memberuser=dict(group=[], user=[])),
completed=1,
result=dict(
cn=[rule1],
ipaselinuxuser=[selinuxuser2],
ipaenabledflag = [u'TRUE'],
dn=fuzzy_selinuxusermapdn,
),
)
),
dict(
desc='Remove non-existent user to %r' % rule1,
command=('selinuxusermap_remove_user', [rule1], dict(user=u'notfound')),
expected=dict(
failed=dict(memberuser=dict(group=[], user=[(u'notfound', u'This entry is not a member')])),
completed=0,
result=dict(
cn=[rule1],
ipaselinuxuser=[selinuxuser2],
ipaenabledflag = [u'TRUE'],
dn=fuzzy_selinuxusermapdn,
),
)
),
dict(
desc='Add group to %r' % rule1,
command=('selinuxusermap_add_user', [rule1], dict(group=group1)),
expected=dict(
failed=dict(memberuser=dict(group=[], user=[])),
completed=1,
result=dict(
cn=[rule1],
ipaselinuxuser=[selinuxuser2],
ipaenabledflag = [u'TRUE'],
memberuser_group = [group1],
dn=fuzzy_selinuxusermapdn,
),
)
),
dict(
desc='Add host to %r' % rule1,
command=('selinuxusermap_add_host', [rule1], dict(host=host1)),
expected=dict(
failed=dict(memberhost=dict(hostgroup=[], host=[])),
completed=1,
result=dict(
cn=[rule1],
ipaselinuxuser=[selinuxuser2],
ipaenabledflag = [u'TRUE'],
memberhost_host = [host1],
memberuser_group = [group1],
dn=fuzzy_selinuxusermapdn,
),
)
),
###############
# Test enabling and disabling
dict(
desc='Disable %r' % rule1,
command=('selinuxusermap_disable', [rule1], {}),
expected=dict(
result=True,
value=rule1,
summary=u'Disabled SELinux User Map "%s"' % rule1,
)
),
dict(
desc='Disable %r again' % rule1,
command=('selinuxusermap_disable', [rule1], {}),
expected=errors.AlreadyInactive(),
),
dict(
desc='Enable %r' % rule1,
command=('selinuxusermap_enable', [rule1], {}),
expected=dict(
result=True,
value=rule1,
summary=u'Enabled SELinux User Map "%s"' % rule1,
)
),
dict(
desc='Re-enable %r again' % rule1,
command=('selinuxusermap_enable', [rule1], {}),
expected=errors.AlreadyActive(),
),
# Point to an HBAC Rule
dict(
desc='Add an HBAC rule to %r that has other members' % rule1,
command=(
'selinuxusermap_mod', [rule1], dict(seealso=hbacrule1)
),
expected=errors.MutuallyExclusiveError(
reason=u'HBAC rule and local members cannot both be set'),
),
dict(
desc='Remove host from %r' % rule1,
command=('selinuxusermap_remove_host', [rule1], dict(host=host1)),
expected=dict(
failed=dict(memberhost=dict(hostgroup=[], host=[])),
completed=1,
result=dict(
cn=[rule1],
ipaselinuxuser=[selinuxuser2],
ipaenabledflag = [u'TRUE'],
memberuser_group = [group1],
dn=fuzzy_selinuxusermapdn,
),
)
),
dict(
desc='Remove group from %r' % rule1,
command=('selinuxusermap_remove_user', [rule1], dict(group=group1)),
expected=dict(
failed=dict(memberuser=dict(group=[], user=[])),
completed=1,
result=dict(
cn=[rule1],
ipaselinuxuser=[selinuxuser2],
ipaenabledflag = [u'TRUE'],
dn=fuzzy_selinuxusermapdn,
),
)
),
dict(
desc='Add non-existent HBAC rule to %r' % rule1,
command=(
'selinuxusermap_mod', [rule1], dict(seealso=u'notfound')
),
expected=errors.NotFound(
reason=u'HBAC rule notfound not found'),
),
dict(
desc='Add an HBAC rule to %r' % rule1,
command=(
'selinuxusermap_mod', [rule1], dict(seealso=hbacrule1)
),
expected=dict(
result=dict(
cn=[rule1],
ipaselinuxuser=[selinuxuser2],
ipaenabledflag = [u'TRUE'],
seealso = hbacrule1,
),
summary=u'Modified SELinux User Map "%s"' % rule1,
value=rule1,
),
),
dict(
desc='Add user to %r that has HBAC' % rule1,
command=('selinuxusermap_add_user', [rule1], dict(user=user1)),
expected=errors.MutuallyExclusiveError(
reason=u'HBAC rule and local members cannot both be set'),
),
dict(
desc='Add host to %r that has HBAC' % rule1,
command=('selinuxusermap_add_host', [rule1], dict(host=host1)),
expected=errors.MutuallyExclusiveError(
reason=u'HBAC rule and local members cannot both be set'),
),
dict(
desc='Try to delete HBAC rule pointed to by %r' % rule1,
command=('hbacrule_del', [hbacrule1], {}),
expected=errors.DependentEntry(key=hbacrule1, label=u'SELinux User Map', dependent=rule1)
),
# This tests selinuxusermap-find --hbacrule=<foo> returns an
# exact match
dict(
desc='Try to delete similarly named HBAC rule %r' % hbacrule2,
command=('hbacrule_del', [hbacrule2], {}),
expected=dict(
result=dict(failed=u''),
value=hbacrule2,
summary=u'Deleted HBAC rule "%s"' % hbacrule2,
)
),
# Test clean up
dict(
desc='Delete %r' % rule1,
command=('selinuxusermap_del', [rule1], {}),
expected=dict(
result=dict(failed=u''),
value=rule1,
summary=u'Deleted SELinux User Map "%s"' % rule1,
)
),
dict(
desc='Try to delete non-existent %r' % rule1,
command=('selinuxusermap_del', [rule1], {}),
expected=errors.NotFound(
reason=u'%s: SELinux User Map rule not found' % rule1),
),
# Some negative tests
dict(
desc='Create rule with unknown user %r' % rule1,
command=(
'selinuxusermap_add', [rule1], dict(ipaselinuxuser=u'notfound:s0:c0')
),
expected=errors.NotFound(reason=u'SELinux user notfound:s0:c0 not ' +
u'found in ordering list (in config)'),
),
dict(
desc='Create rule with invalid user bad+user',
command=(
'selinuxusermap_add', [rule1], dict(ipaselinuxuser=u'bad+user')
),
expected=errors.ValidationError(name='selinuxuser',
error=u'Invalid SELinux user name, only a-Z and _ are allowed'),
),
dict(
desc='Create rule with invalid MCS xguest_u:s999',
command=(
'selinuxusermap_add', [rule1], dict(ipaselinuxuser=u'xguest_u:s999')
),
expected=errors.ValidationError(name='selinuxuser',
error=u'Invalid MLS value, must match s[0-15](-s[0-15])'),
),
dict(
desc='Create rule with invalid MLS xguest_u:s0:p88',
command=(
'selinuxusermap_add', [rule1], dict(ipaselinuxuser=u'xguest_u:s0:p88')
),
expected=errors.ValidationError(name='selinuxuser',
error=u'Invalid MCS value, must match c[0-1023].c[0-1023] ' +
u'and/or c[0-1023]-c[0-c0123]'),
),
dict(
desc='Create rule with invalid MLS xguest_u:s0:c0.c1028',
command=(
'selinuxusermap_add', [rule1], dict(ipaselinuxuser=u'xguest_u:s0-s0:c0.c1028')
),
expected=errors.ValidationError(name='selinuxuser',
error=u'Invalid MCS value, must match c[0-1023].c[0-1023] ' +
u'and/or c[0-1023]-c[0-c0123]'),
),
dict(
desc='Create rule with invalid user via setattr',
command=(
'selinuxusermap_mod', [rule1], dict(setattr=u'ipaselinuxuser=deny')
),
expected=errors.ValidationError(name='ipaselinuxuser',
error=u'Invalid MLS value, must match s[0-15](-s[0-15])'),
),
dict(
desc='Create rule with both --hbacrule and --usercat set',
command=(
'selinuxusermap_add', [rule1], dict(ipaselinuxuser=selinuxuser1,seealso=hbacrule1,usercategory=u'all')
),
expected=errors.MutuallyExclusiveError(
reason=u'HBAC rule and local members cannot both be set'),
),
dict(
desc='Create rule with both --hbacrule and --hostcat set',
command=(
'selinuxusermap_add', [rule1], dict(ipaselinuxuser=selinuxuser1,seealso=hbacrule1,hostcategory=u'all')
),
expected=errors.MutuallyExclusiveError(
reason=u'HBAC rule and local members cannot both be set'),
),
dict(
desc='Create rule with both --hbacrule and --usercat set via setattr',
command=(
'selinuxusermap_add', [rule1], dict(ipaselinuxuser=selinuxuser1,seealso=hbacrule1,setattr=u'usercategory=all')
),
expected=errors.MutuallyExclusiveError(
reason=u'HBAC rule and local members cannot both be set'),
),
dict(
desc='Create rule with both --hbacrule and --hostcat set via setattr',
command=(
'selinuxusermap_add', [rule1], dict(ipaselinuxuser=selinuxuser1,seealso=hbacrule1,setattr=u'hostcategory=all')
),
expected=errors.MutuallyExclusiveError(
reason=u'HBAC rule and local members cannot both be set'),
),
dict(
desc='Create rule %r with --hbacrule' % rule1,
command=(
'selinuxusermap_add', [rule1], dict(ipaselinuxuser=selinuxuser1,seealso=hbacrule1)
),
expected=dict(
value=rule1,
summary=u'Added SELinux User Map "%s"' % rule1,
result=dict(
cn=[rule1],
ipaselinuxuser=[selinuxuser1],
objectclass=objectclasses.selinuxusermap,
ipauniqueid=[fuzzy_uuid],
ipaenabledflag = [u'TRUE'],
dn=fuzzy_selinuxusermapdn,
seealso=hbacrule1
),
),
),
dict(
desc='Add an --usercat to %r that has HBAC set' % rule1,
command=(
'selinuxusermap_mod', [rule1], dict(usercategory=u'all')
),
expected=errors.MutuallyExclusiveError(
reason=u'HBAC rule and local members cannot both be set'),
),
dict(
desc='Add an --hostcat to %r that has HBAC set' % rule1,
command=(
'selinuxusermap_mod', [rule1], dict(hostcategory=u'all')
),
expected=errors.MutuallyExclusiveError(
reason=u'HBAC rule and local members cannot both be set'),
),
dict(
desc='Add an usercat via setattr to %r that has HBAC set' % rule1,
command=(
'selinuxusermap_mod', [rule1], dict(setattr=u'usercategory=all')
),
expected=errors.MutuallyExclusiveError(
reason=u'HBAC rule and local members cannot both be set'),
),
dict(
desc='Add an hostcat via setattr to %r that has HBAC set' % rule1,
command=(
'selinuxusermap_mod', [rule1], dict(setattr=u'hostcategory=all')
),
expected=errors.MutuallyExclusiveError(
reason=u'HBAC rule and local members cannot both be set'),
),
dict(
desc='Delete %r' % rule1,
command=('selinuxusermap_del', [rule1], {}),
expected=dict(
result=dict(failed=u''),
value=rule1,
summary=u'Deleted SELinux User Map "%s"' % rule1,
)
),
dict(
desc='Create rule %r with usercat and hostcat set' % rule1,
command=(
'selinuxusermap_add', [rule1], dict(ipaselinuxuser=selinuxuser1,usercategory=u'all',hostcategory=u'all')
),
expected=dict(
value=rule1,
summary=u'Added SELinux User Map "%s"' % rule1,
result=dict(
cn=[rule1],
ipaselinuxuser=[selinuxuser1],
objectclass=objectclasses.selinuxusermap,
ipauniqueid=[fuzzy_uuid],
ipaenabledflag = [u'TRUE'],
dn=fuzzy_selinuxusermapdn,
usercategory = [u'all'],
hostcategory = [u'all']
),
),
),
dict(
desc='Add HBAC rule to %r that has usercat and hostcat' % rule1,
command=(
'selinuxusermap_mod', [rule1], dict(seealso=hbacrule1)
),
expected=errors.MutuallyExclusiveError(
reason=u'HBAC rule and local members cannot both be set'),
),
dict(
desc='Delete %r' % rule1,
command=('selinuxusermap_del', [rule1], {}),
expected=dict(
result=dict(failed=u''),
value=rule1,
summary=u'Deleted SELinux User Map "%s"' % rule1,
)
),
dict(
desc='Create rule %r' % rule1,
command=(
'selinuxusermap_add', [rule1], dict(ipaselinuxuser=selinuxuser1)
),
expected=dict(
value=rule1,
summary=u'Added SELinux User Map "%s"' % rule1,
result=dict(
cn=[rule1],
ipaselinuxuser=[selinuxuser1],
objectclass=objectclasses.selinuxusermap,
ipauniqueid=[fuzzy_uuid],
ipaenabledflag = [u'TRUE'],
dn=fuzzy_selinuxusermapdn,
),
),
),
dict(
desc='Add HBAC rule, hostcat and usercat to %r' % rule1,
command=(
'selinuxusermap_mod', [rule1], dict(seealso=hbacrule1,usercategory=u'all',hostcategory=u'all')
),
expected=errors.MutuallyExclusiveError(
reason=u'HBAC rule and local members cannot both be set'),
),
dict(
desc='Delete %r' % rule1,
command=('selinuxusermap_del', [rule1], {}),
expected=dict(
result=dict(failed=u''),
value=rule1,
summary=u'Deleted SELinux User Map "%s"' % rule1,
)
),
]
| gpl-3.0 |
esaunders/autopsy | pythonExamples/Aug2015DataSourceTutorial/RunExe.py | 5 | 7492 | # Sample module in the public domain. Feel free to use this as a template
# for your modules (and you can remove this header and take complete credit
# and liability)
#
# Contact: Brian Carrier [carrier <at> sleuthkit [dot] org]
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# Simple data source-level ingest module for Autopsy.
# Used as part of Python tutorials from Basis Technology - August 2015
#
# Runs img_stat tool from The Sleuth Kit on each data source, saves the
# output, and adds a report to the Case for the output
import jarray
import inspect
import os
import java.util.ArrayList as ArrayList
from java.lang import Class
from java.lang import System
from java.lang import ProcessBuilder
from java.io import File
from java.util.logging import Level
from org.sleuthkit.datamodel import SleuthkitCase
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import ReadContentInputStream
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import Image
from org.sleuthkit.autopsy.ingest import IngestModule
from org.sleuthkit.autopsy.ingest import IngestJobContext
from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException
from org.sleuthkit.autopsy.ingest import DataSourceIngestModule
from org.sleuthkit.autopsy.ingest import DataSourceIngestModuleProcessTerminator
from org.sleuthkit.autopsy.ingest import IngestModuleFactoryAdapter
from org.sleuthkit.autopsy.ingest import IngestMessage
from org.sleuthkit.autopsy.ingest import IngestServices
from org.sleuthkit.autopsy.ingest import ModuleDataEvent
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.coreutils import PlatformUtil
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import Services
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.coreutils import ExecUtil
# Factory that defines the name and details of the module and allows Autopsy
# to create instances of the modules that will do the analysis.
class RunExeIngestModuleFactory(IngestModuleFactoryAdapter):
moduleName = "Run EXE Module"
def getModuleDisplayName(self):
return self.moduleName
def getModuleDescription(self):
return "Sample module that runs img_stat on each disk image."
def getModuleVersionNumber(self):
return "1.0"
def isDataSourceIngestModuleFactory(self):
return True
def createDataSourceIngestModule(self, ingestOptions):
return RunExeIngestModule()
# Data Source-level ingest module. One gets created per data source.
class RunExeIngestModule(DataSourceIngestModule):
_logger = Logger.getLogger(RunExeIngestModuleFactory.moduleName)
def log(self, level, msg):
self._logger.logp(level, self.__class__.__name__, inspect.stack()[1][3], msg)
def __init__(self):
self.context = None
# Where any setup and configuration is done
# 'context' is an instance of org.sleuthkit.autopsy.ingest.IngestJobContext.
# See: http://sleuthkit.org/autopsy/docs/api-docs/latest/classorg_1_1sleuthkit_1_1autopsy_1_1ingest_1_1_ingest_job_context.html
def startUp(self, context):
self.context = context
# Get path to EXE based on where this script is run from.
# Assumes EXE is in same folder as script
# Verify it is there before any ingest starts
exe_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "img_stat.exe")
self.pathToEXE = File(exe_path)
if not self.pathToEXE.exists():
raise IngestModuleException("EXE was not found in module folder")
# Where the analysis is done.
# The 'dataSource' object being passed in is of type org.sleuthkit.datamodel.Content.
# See: http://www.sleuthkit.org/sleuthkit/docs/jni-docs/latest/interfaceorg_1_1sleuthkit_1_1datamodel_1_1_content.html
# 'progressBar' is of type org.sleuthkit.autopsy.ingest.DataSourceIngestModuleProgress
# See: http://sleuthkit.org/autopsy/docs/api-docs/latest/classorg_1_1sleuthkit_1_1autopsy_1_1ingest_1_1_data_source_ingest_module_progress.html
def process(self, dataSource, progressBar):
# we don't know how much work there will be
progressBar.switchToIndeterminate()
# Example has only a Windows EXE, so bail if we aren't on Windows
if not PlatformUtil.isWindowsOS():
self.log(Level.INFO, "Ignoring data source. Not running on Windows")
return IngestModule.ProcessResult.OK
# Verify we have a disk image and not a folder of files
if not isinstance(dataSource, Image):
self.log(Level.INFO, "Ignoring data source. Not an image")
return IngestModule.ProcessResult.OK
# Get disk image paths
imagePaths = dataSource.getPaths()
# We'll save our output to a file in the reports folder, named based on EXE and data source ID
reportFile = File(Case.getCurrentCase().getCaseDirectory() + "\\Reports" + "\\img_stat-" + str(dataSource.getId()) + ".txt")
# Run the EXE, saving output to reportFile
# We use ExecUtil because it will deal with the user cancelling the job
self.log(Level.INFO, "Running program on data source")
cmd = ArrayList()
cmd.add(self.pathToEXE.toString())
# Add each argument in its own line. I.e. "-f foo" would be two calls to .add()
cmd.add(imagePaths[0])
processBuilder = ProcessBuilder(cmd);
processBuilder.redirectOutput(reportFile)
ExecUtil.execute(processBuilder, DataSourceIngestModuleProcessTerminator(self.context))
# Add the report to the case, so it shows up in the tree
# Do not add report to the case tree if the ingest is cancelled before finish.
if not self.context.dataSourceIngestIsCancelled():
Case.getCurrentCase().addReport(reportFile.toString(), "Run EXE", "img_stat output")
else:
if reportFile.exists():
if not reportFile.delete():
self.log(LEVEL.warning,"Error deleting the incomplete report file")
return IngestModule.ProcessResult.OK
| apache-2.0 |
ff94315/hiwifi-openwrt-HC5661-HC5761 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/bsddb/test/test_fileid.py | 111 | 1830 | """TestCase for reseting File ID.
"""
import os
import shutil
import unittest
from test_all import db, test_support, get_new_environment_path, get_new_database_path
class FileidResetTestCase(unittest.TestCase):
def setUp(self):
self.db_path_1 = get_new_database_path()
self.db_path_2 = get_new_database_path()
self.db_env_path = get_new_environment_path()
def test_fileid_reset(self):
# create DB 1
self.db1 = db.DB()
self.db1.open(self.db_path_1, dbtype=db.DB_HASH, flags=(db.DB_CREATE|db.DB_EXCL))
self.db1.put('spam', 'eggs')
self.db1.close()
shutil.copy(self.db_path_1, self.db_path_2)
self.db2 = db.DB()
self.db2.open(self.db_path_2, dbtype=db.DB_HASH)
self.db2.put('spam', 'spam')
self.db2.close()
self.db_env = db.DBEnv()
self.db_env.open(self.db_env_path, db.DB_CREATE|db.DB_INIT_MPOOL)
# use fileid_reset() here
self.db_env.fileid_reset(self.db_path_2)
self.db1 = db.DB(self.db_env)
self.db1.open(self.db_path_1, dbtype=db.DB_HASH, flags=db.DB_RDONLY)
self.assertEqual(self.db1.get('spam'), 'eggs')
self.db2 = db.DB(self.db_env)
self.db2.open(self.db_path_2, dbtype=db.DB_HASH, flags=db.DB_RDONLY)
self.assertEqual(self.db2.get('spam'), 'spam')
self.db1.close()
self.db2.close()
self.db_env.close()
def tearDown(self):
test_support.unlink(self.db_path_1)
test_support.unlink(self.db_path_2)
test_support.rmtree(self.db_env_path)
def test_suite():
suite = unittest.TestSuite()
if db.version() >= (4, 4):
suite.addTest(unittest.makeSuite(FileidResetTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| gpl-2.0 |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/numpy/distutils/system_info.py | 15 | 83925 | #!/bin/env python
"""
This file defines a set of system_info classes for getting
information about various resources (libraries, library directories,
include directories, etc.) in the system. Currently, the following
classes are available:
atlas_info
atlas_threads_info
atlas_blas_info
atlas_blas_threads_info
lapack_atlas_info
lapack_atlas_threads_info
atlas_3_10_info
atlas_3_10_threads_info
atlas_3_10_blas_info,
atlas_3_10_blas_threads_info,
lapack_atlas_3_10_info
lapack_atlas_3_10_threads_info
blas_info
lapack_info
openblas_info
blas_opt_info # usage recommended
lapack_opt_info # usage recommended
fftw_info,dfftw_info,sfftw_info
fftw_threads_info,dfftw_threads_info,sfftw_threads_info
djbfft_info
x11_info
lapack_src_info
blas_src_info
numpy_info
numarray_info
numpy_info
boost_python_info
agg2_info
wx_info
gdk_pixbuf_xlib_2_info
gdk_pixbuf_2_info
gdk_x11_2_info
gtkp_x11_2_info
gtkp_2_info
xft_info
freetype2_info
umfpack_info
Usage:
info_dict = get_info(<name>)
where <name> is a string 'atlas','x11','fftw','lapack','blas',
'lapack_src', 'blas_src', etc. For a complete list of allowed names,
see the definition of get_info() function below.
Returned info_dict is a dictionary which is compatible with
distutils.setup keyword arguments. If info_dict == {}, then the
asked resource is not available (system_info could not find it).
Several *_info classes specify an environment variable to specify
the locations of software. When setting the corresponding environment
variable to 'None' then the software will be ignored, even when it
is available in system.
Global parameters:
system_info.search_static_first - search static libraries (.a)
in precedence to shared ones (.so, .sl) if enabled.
system_info.verbosity - output the results to stdout if enabled.
The file 'site.cfg' is looked for in
1) Directory of main setup.py file being run.
2) Home directory of user running the setup.py file as ~/.numpy-site.cfg
3) System wide directory (location of this file...)
The first one found is used to get system configuration options The
format is that used by ConfigParser (i.e., Windows .INI style). The
section ALL has options that are the default for each section. The
available sections are fftw, atlas, and x11. Appropiate defaults are
used if nothing is specified.
The order of finding the locations of resources is the following:
1. environment variable
2. section in site.cfg
3. ALL section in site.cfg
Only the first complete match is returned.
Example:
----------
[ALL]
library_dirs = /usr/lib:/usr/local/lib:/opt/lib
include_dirs = /usr/include:/usr/local/include:/opt/include
src_dirs = /usr/local/src:/opt/src
# search static libraries (.a) in preference to shared ones (.so)
search_static_first = 0
[fftw]
fftw_libs = rfftw, fftw
fftw_opt_libs = rfftw_threaded, fftw_threaded
# if the above aren't found, look for {s,d}fftw_libs and {s,d}fftw_opt_libs
[atlas]
library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas
# for overriding the names of the atlas libraries
atlas_libs = lapack, f77blas, cblas, atlas
[x11]
library_dirs = /usr/X11R6/lib
include_dirs = /usr/X11R6/include
----------
Authors:
Pearu Peterson <pearu@cens.ioc.ee>, February 2002
David M. Cooke <cookedm@physics.mcmaster.ca>, April 2002
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
"""
from __future__ import division, absolute_import, print_function
import sys
import os
import re
import copy
import warnings
from glob import glob
from functools import reduce
if sys.version_info[0] < 3:
from ConfigParser import NoOptionError, ConfigParser
else:
from configparser import NoOptionError, ConfigParser
from distutils.errors import DistutilsError
from distutils.dist import Distribution
import distutils.sysconfig
from distutils import log
from distutils.util import get_platform
from numpy.distutils.exec_command import \
find_executable, exec_command, get_pythonexe
from numpy.distutils.misc_util import is_sequence, is_string, \
get_shared_lib_extension
from numpy.distutils.command.config import config as cmd_config
from numpy.distutils.compat import get_exception
import distutils.ccompiler
import tempfile
import shutil
# Determine number of bits
import platform
_bits = {'32bit': 32, '64bit': 64}
platform_bits = _bits[platform.architecture()[0]]
def libpaths(paths, bits):
"""Return a list of library paths valid on 32 or 64 bit systems.
Inputs:
paths : sequence
A sequence of strings (typically paths)
bits : int
An integer, the only valid values are 32 or 64. A ValueError exception
is raised otherwise.
Examples:
Consider a list of directories
>>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib']
For a 32-bit platform, this is already valid:
>>> np.distutils.system_info.libpaths(paths,32)
['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib']
On 64 bits, we prepend the '64' postfix
>>> np.distutils.system_info.libpaths(paths,64)
['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib',
'/usr/lib64', '/usr/lib']
"""
if bits not in (32, 64):
raise ValueError("Invalid bit size in libpaths: 32 or 64 only")
# Handle 32bit case
if bits == 32:
return paths
# Handle 64bit case
out = []
for p in paths:
out.extend([p + '64', p])
return out
if sys.platform == 'win32':
default_lib_dirs = ['C:\\',
os.path.join(distutils.sysconfig.EXEC_PREFIX,
'libs')]
default_runtime_dirs = []
default_include_dirs = []
default_src_dirs = ['.']
default_x11_lib_dirs = []
default_x11_include_dirs = []
else:
default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib',
'/opt/local/lib', '/sw/lib'], platform_bits)
default_runtime_dirs = []
default_include_dirs = ['/usr/local/include',
'/opt/include', '/usr/include',
# path of umfpack under macports
'/opt/local/include/ufsparse',
'/opt/local/include', '/sw/include',
'/usr/include/suitesparse']
default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src']
default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib',
'/usr/lib'], platform_bits)
default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include',
'/usr/include']
if os.path.exists('/usr/lib/X11'):
globbed_x11_dir = glob('/usr/lib/*/libX11.so')
if globbed_x11_dir:
x11_so_dir = os.path.split(globbed_x11_dir[0])[0]
default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11'])
default_x11_include_dirs.extend(['/usr/lib/X11/include',
'/usr/include/X11'])
import subprocess as sp
tmp = None
try:
# Explicitly open/close file to avoid ResourceWarning when
# tests are run in debug mode Python 3.
tmp = open(os.devnull, 'w')
p = sp.Popen(["gcc", "-print-multiarch"], stdout=sp.PIPE,
stderr=tmp)
except (OSError, DistutilsError):
# OSError if gcc is not installed, or SandboxViolation (DistutilsError
# subclass) if an old setuptools bug is triggered (see gh-3160).
pass
else:
triplet = str(p.communicate()[0].decode().strip())
if p.returncode == 0:
# gcc supports the "-print-multiarch" option
default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)]
default_lib_dirs += [os.path.join("/usr/lib/", triplet)]
finally:
if tmp is not None:
tmp.close()
if os.path.join(sys.prefix, 'lib') not in default_lib_dirs:
default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib'))
default_include_dirs.append(os.path.join(sys.prefix, 'include'))
default_src_dirs.append(os.path.join(sys.prefix, 'src'))
default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)]
default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)]
default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)]
default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)]
so_ext = get_shared_lib_extension()
def get_standard_file(fname):
"""Returns a list of files named 'fname' from
1) System-wide directory (directory-location of this module)
2) Users HOME directory (os.environ['HOME'])
3) Local directory
"""
# System-wide file
filenames = []
try:
f = __file__
except NameError:
f = sys.argv[0]
else:
sysfile = os.path.join(os.path.split(os.path.abspath(f))[0],
fname)
if os.path.isfile(sysfile):
filenames.append(sysfile)
# Home directory
# And look for the user config file
try:
f = os.path.expanduser('~')
except KeyError:
pass
else:
user_file = os.path.join(f, fname)
if os.path.isfile(user_file):
filenames.append(user_file)
# Local file
if os.path.isfile(fname):
filenames.append(os.path.abspath(fname))
return filenames
def get_info(name, notfound_action=0):
"""
notfound_action:
0 - do nothing
1 - display warning message
2 - raise error
"""
cl = {'atlas': atlas_info, # use lapack_opt or blas_opt instead
'atlas_threads': atlas_threads_info, # ditto
'atlas_blas': atlas_blas_info,
'atlas_blas_threads': atlas_blas_threads_info,
'lapack_atlas': lapack_atlas_info, # use lapack_opt instead
'lapack_atlas_threads': lapack_atlas_threads_info, # ditto
'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead
'atlas_3_10_threads': atlas_3_10_threads_info, # ditto
'atlas_3_10_blas': atlas_3_10_blas_info,
'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info,
'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead
'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto
'mkl': mkl_info,
# openblas which may or may not have embedded lapack
'openblas': openblas_info, # use blas_opt instead
# openblas with embedded lapack
'openblas_lapack': openblas_lapack_info, # use blas_opt instead
'lapack_mkl': lapack_mkl_info, # use lapack_opt instead
'blas_mkl': blas_mkl_info, # use blas_opt instead
'x11': x11_info,
'fft_opt': fft_opt_info,
'fftw': fftw_info,
'fftw2': fftw2_info,
'fftw3': fftw3_info,
'dfftw': dfftw_info,
'sfftw': sfftw_info,
'fftw_threads': fftw_threads_info,
'dfftw_threads': dfftw_threads_info,
'sfftw_threads': sfftw_threads_info,
'djbfft': djbfft_info,
'blas': blas_info, # use blas_opt instead
'lapack': lapack_info, # use lapack_opt instead
'lapack_src': lapack_src_info,
'blas_src': blas_src_info,
'numpy': numpy_info,
'f2py': f2py_info,
'Numeric': Numeric_info,
'numeric': Numeric_info,
'numarray': numarray_info,
'numerix': numerix_info,
'lapack_opt': lapack_opt_info,
'blas_opt': blas_opt_info,
'boost_python': boost_python_info,
'agg2': agg2_info,
'wx': wx_info,
'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info,
'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info,
'gdk_pixbuf_2': gdk_pixbuf_2_info,
'gdk-pixbuf-2.0': gdk_pixbuf_2_info,
'gdk': gdk_info,
'gdk_2': gdk_2_info,
'gdk-2.0': gdk_2_info,
'gdk_x11_2': gdk_x11_2_info,
'gdk-x11-2.0': gdk_x11_2_info,
'gtkp_x11_2': gtkp_x11_2_info,
'gtk+-x11-2.0': gtkp_x11_2_info,
'gtkp_2': gtkp_2_info,
'gtk+-2.0': gtkp_2_info,
'xft': xft_info,
'freetype2': freetype2_info,
'umfpack': umfpack_info,
'amd': amd_info,
}.get(name.lower(), system_info)
return cl().get_info(notfound_action)
class NotFoundError(DistutilsError):
"""Some third-party program or library is not found."""
class AtlasNotFoundError(NotFoundError):
"""
Atlas (http://math-atlas.sourceforge.net/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [atlas]) or by setting
the ATLAS environment variable."""
class LapackNotFoundError(NotFoundError):
"""
Lapack (http://www.netlib.org/lapack/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [lapack]) or by setting
the LAPACK environment variable."""
class LapackSrcNotFoundError(LapackNotFoundError):
"""
Lapack (http://www.netlib.org/lapack/) sources not found.
Directories to search for the sources can be specified in the
numpy/distutils/site.cfg file (section [lapack_src]) or by setting
the LAPACK_SRC environment variable."""
class BlasNotFoundError(NotFoundError):
"""
Blas (http://www.netlib.org/blas/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [blas]) or by setting
the BLAS environment variable."""
class BlasSrcNotFoundError(BlasNotFoundError):
"""
Blas (http://www.netlib.org/blas/) sources not found.
Directories to search for the sources can be specified in the
numpy/distutils/site.cfg file (section [blas_src]) or by setting
the BLAS_SRC environment variable."""
class FFTWNotFoundError(NotFoundError):
"""
FFTW (http://www.fftw.org/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [fftw]) or by setting
the FFTW environment variable."""
class DJBFFTNotFoundError(NotFoundError):
"""
DJBFFT (http://cr.yp.to/djbfft.html) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [djbfft]) or by setting
the DJBFFT environment variable."""
class NumericNotFoundError(NotFoundError):
"""
Numeric (http://www.numpy.org/) module not found.
Get it from above location, install it, and retry setup.py."""
class X11NotFoundError(NotFoundError):
"""X11 libraries not found."""
class UmfpackNotFoundError(NotFoundError):
"""
UMFPACK sparse solver (http://www.cise.ufl.edu/research/sparse/umfpack/)
not found. Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [umfpack]) or by setting
the UMFPACK environment variable."""
class system_info(object):
""" get_info() is the only public method. Don't use others.
"""
section = 'ALL'
dir_env_var = None
search_static_first = 0 # XXX: disabled by default, may disappear in
# future unless it is proved to be useful.
verbosity = 1
saved_results = {}
notfounderror = NotFoundError
def __init__(self,
default_lib_dirs=default_lib_dirs,
default_include_dirs=default_include_dirs,
verbosity=1,
):
self.__class__.info = {}
self.local_prefixes = []
defaults = {}
defaults['library_dirs'] = os.pathsep.join(default_lib_dirs)
defaults['include_dirs'] = os.pathsep.join(default_include_dirs)
defaults['runtime_library_dirs'] = os.pathsep.join(default_runtime_dirs)
defaults['rpath'] = ''
defaults['src_dirs'] = os.pathsep.join(default_src_dirs)
defaults['search_static_first'] = str(self.search_static_first)
defaults['extra_compile_args'] = ''
defaults['extra_link_args'] = ''
self.cp = ConfigParser(defaults)
self.files = []
self.files.extend(get_standard_file('.numpy-site.cfg'))
self.files.extend(get_standard_file('site.cfg'))
self.parse_config_files()
if self.section is not None:
self.search_static_first = self.cp.getboolean(
self.section, 'search_static_first')
assert isinstance(self.search_static_first, int)
def parse_config_files(self):
self.cp.read(self.files)
if not self.cp.has_section(self.section):
if self.section is not None:
self.cp.add_section(self.section)
def calc_libraries_info(self):
libs = self.get_libraries()
dirs = self.get_lib_dirs()
# The extensions use runtime_library_dirs
r_dirs = self.get_runtime_lib_dirs()
# Intrinsic distutils use rpath, we simply append both entries
# as though they were one entry
r_dirs.extend(self.get_runtime_lib_dirs(key='rpath'))
info = {}
for lib in libs:
i = self.check_libs(dirs, [lib])
if i is not None:
dict_append(info, **i)
else:
log.info('Library %s was not found. Ignoring' % (lib))
i = self.check_libs(r_dirs, [lib])
if i is not None:
# Swap library keywords found to runtime_library_dirs
# the libraries are insisting on the user having defined
# them using the library_dirs, and not necessarily by
# runtime_library_dirs
del i['libraries']
i['runtime_library_dirs'] = i.pop('library_dirs')
dict_append(info, **i)
else:
log.info('Runtime library %s was not found. Ignoring' % (lib))
return info
def set_info(self, **info):
if info:
lib_info = self.calc_libraries_info()
dict_append(info, **lib_info)
# Update extra information
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
self.saved_results[self.__class__.__name__] = info
def has_info(self):
return self.__class__.__name__ in self.saved_results
def calc_extra_info(self):
""" Updates the information in the current information with
respect to these flags:
extra_compile_args
extra_link_args
"""
info = {}
for key in ['extra_compile_args', 'extra_link_args']:
# Get values
opt = self.cp.get(self.section, key)
if opt:
tmp = {key : [opt]}
dict_append(info, **tmp)
return info
def get_info(self, notfound_action=0):
""" Return a dictonary with items that are compatible
with numpy.distutils.setup keyword arguments.
"""
flag = 0
if not self.has_info():
flag = 1
log.info(self.__class__.__name__ + ':')
if hasattr(self, 'calc_info'):
self.calc_info()
if notfound_action:
if not self.has_info():
if notfound_action == 1:
warnings.warn(self.notfounderror.__doc__)
elif notfound_action == 2:
raise self.notfounderror(self.notfounderror.__doc__)
else:
raise ValueError(repr(notfound_action))
if not self.has_info():
log.info(' NOT AVAILABLE')
self.set_info()
else:
log.info(' FOUND:')
res = self.saved_results.get(self.__class__.__name__)
if self.verbosity > 0 and flag:
for k, v in res.items():
v = str(v)
if k in ['sources', 'libraries'] and len(v) > 270:
v = v[:120] + '...\n...\n...' + v[-120:]
log.info(' %s = %s', k, v)
log.info('')
return copy.deepcopy(res)
def get_paths(self, section, key):
dirs = self.cp.get(section, key).split(os.pathsep)
env_var = self.dir_env_var
if env_var:
if is_sequence(env_var):
e0 = env_var[-1]
for e in env_var:
if e in os.environ:
e0 = e
break
if not env_var[0] == e0:
log.info('Setting %s=%s' % (env_var[0], e0))
env_var = e0
if env_var and env_var in os.environ:
d = os.environ[env_var]
if d == 'None':
log.info('Disabled %s: %s',
self.__class__.__name__, '(%s is None)'
% (env_var,))
return []
if os.path.isfile(d):
dirs = [os.path.dirname(d)] + dirs
l = getattr(self, '_lib_names', [])
if len(l) == 1:
b = os.path.basename(d)
b = os.path.splitext(b)[0]
if b[:3] == 'lib':
log.info('Replacing _lib_names[0]==%r with %r' \
% (self._lib_names[0], b[3:]))
self._lib_names[0] = b[3:]
else:
ds = d.split(os.pathsep)
ds2 = []
for d in ds:
if os.path.isdir(d):
ds2.append(d)
for dd in ['include', 'lib']:
d1 = os.path.join(d, dd)
if os.path.isdir(d1):
ds2.append(d1)
dirs = ds2 + dirs
default_dirs = self.cp.get(self.section, key).split(os.pathsep)
dirs.extend(default_dirs)
ret = []
for d in dirs:
if not os.path.isdir(d):
warnings.warn('Specified path %s is invalid.' % d)
continue
if d not in ret:
ret.append(d)
log.debug('( %s = %s )', key, ':'.join(ret))
return ret
def get_lib_dirs(self, key='library_dirs'):
return self.get_paths(self.section, key)
def get_runtime_lib_dirs(self, key='runtime_library_dirs'):
return self.get_paths(self.section, key)
def get_include_dirs(self, key='include_dirs'):
return self.get_paths(self.section, key)
def get_src_dirs(self, key='src_dirs'):
return self.get_paths(self.section, key)
def get_libs(self, key, default):
try:
libs = self.cp.get(self.section, key)
except NoOptionError:
if not default:
return []
if is_string(default):
return [default]
return default
return [b for b in [a.strip() for a in libs.split(',')] if b]
def get_libraries(self, key='libraries'):
return self.get_libs(key, '')
def library_extensions(self):
static_exts = ['.a']
if sys.platform == 'win32':
static_exts.append('.lib') # .lib is used by MSVC
if self.search_static_first:
exts = static_exts + [so_ext]
else:
exts = [so_ext] + static_exts
if sys.platform == 'cygwin':
exts.append('.dll.a')
if sys.platform == 'darwin':
exts.append('.dylib')
# Debian and Ubuntu added a g3f suffix to shared library to deal with
# g77 -> gfortran ABI transition
# XXX: disabled, it hides more problem than it solves.
#if sys.platform[:5] == 'linux':
# exts.append('.so.3gf')
return exts
def check_libs(self, lib_dirs, libs, opt_libs=[]):
"""If static or shared libraries are available then return
their info dictionary.
Checks for all libraries as shared libraries first, then
static (or vice versa if self.search_static_first is True).
"""
exts = self.library_extensions()
info = None
for ext in exts:
info = self._check_libs(lib_dirs, libs, opt_libs, [ext])
if info is not None:
break
if not info:
log.info(' libraries %s not found in %s', ','.join(libs),
lib_dirs)
return info
def check_libs2(self, lib_dirs, libs, opt_libs=[]):
"""If static or shared libraries are available then return
their info dictionary.
Checks each library for shared or static.
"""
exts = self.library_extensions()
info = self._check_libs(lib_dirs, libs, opt_libs, exts)
if not info:
log.info(' libraries %s not found in %s', ','.join(libs),
lib_dirs)
return info
def _lib_list(self, lib_dir, libs, exts):
assert is_string(lib_dir)
liblist = []
# under windows first try without 'lib' prefix
if sys.platform == 'win32':
lib_prefixes = ['', 'lib']
else:
lib_prefixes = ['lib']
# for each library name, see if we can find a file for it.
for l in libs:
for ext in exts:
for prefix in lib_prefixes:
p = self.combine_paths(lib_dir, prefix + l + ext)
if p:
break
if p:
assert len(p) == 1
# ??? splitext on p[0] would do this for cygwin
# doesn't seem correct
if ext == '.dll.a':
l += '.dll'
liblist.append(l)
break
return liblist
def _check_libs(self, lib_dirs, libs, opt_libs, exts):
"""Find mandatory and optional libs in expected paths.
Missing optional libraries are silently forgotten.
"""
# First, try to find the mandatory libraries
if is_sequence(lib_dirs):
found_libs, found_dirs = [], []
for dir_ in lib_dirs:
found_libs1 = self._lib_list(dir_, libs, exts)
# It's possible that we'll find the same library in multiple
# directories. It's also possible that we'll find some
# libraries on in directory, and some in another. So the
# obvious thing would be to use a set instead of a list, but I
# don't know if preserving order matters (does it?).
for found_lib in found_libs1:
if found_lib not in found_libs:
found_libs.append(found_lib)
if dir_ not in found_dirs:
found_dirs.append(dir_)
else:
found_libs = self._lib_list(lib_dirs, libs, exts)
found_dirs = [lib_dirs]
if len(found_libs) > 0 and len(found_libs) == len(libs):
info = {'libraries': found_libs, 'library_dirs': found_dirs}
# Now, check for optional libraries
if is_sequence(lib_dirs):
for dir_ in lib_dirs:
opt_found_libs = self._lib_list(dir_, opt_libs, exts)
if opt_found_libs:
if dir_ not in found_dirs:
found_dirs.extend(dir_)
found_libs.extend(opt_found_libs)
else:
opt_found_libs = self._lib_list(lib_dirs, opt_libs, exts)
if opt_found_libs:
found_libs.extend(opt_found_libs)
return info
else:
return None
def combine_paths(self, *args):
"""Return a list of existing paths composed by all combinations
of items from the arguments.
"""
return combine_paths(*args, **{'verbosity': self.verbosity})
class fft_opt_info(system_info):
def calc_info(self):
info = {}
fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw')
djbfft_info = get_info('djbfft')
if fftw_info:
dict_append(info, **fftw_info)
if djbfft_info:
dict_append(info, **djbfft_info)
self.set_info(**info)
return
class fftw_info(system_info):
#variables to override
section = 'fftw'
dir_env_var = 'FFTW'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw3',
'libs':['fftw3'],
'includes':['fftw3.h'],
'macros':[('SCIPY_FFTW3_H', None)]},
{'name':'fftw2',
'libs':['rfftw', 'fftw'],
'includes':['fftw.h', 'rfftw.h'],
'macros':[('SCIPY_FFTW_H', None)]}]
def calc_ver_info(self, ver_param):
"""Returns True on successful version detection, else False"""
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
incl_dir = None
libs = self.get_libs(self.section + '_libs', ver_param['libs'])
info = self.check_libs(lib_dirs, libs)
if info is not None:
flag = 0
for d in incl_dirs:
if len(self.combine_paths(d, ver_param['includes'])) \
== len(ver_param['includes']):
dict_append(info, include_dirs=[d])
flag = 1
incl_dirs = [d]
break
if flag:
dict_append(info, define_macros=ver_param['macros'])
else:
info = None
if info is not None:
self.set_info(**info)
return True
else:
log.info(' %s not found' % (ver_param['name']))
return False
def calc_info(self):
for i in self.ver_info:
if self.calc_ver_info(i):
break
class fftw2_info(fftw_info):
#variables to override
section = 'fftw'
dir_env_var = 'FFTW'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw2',
'libs':['rfftw', 'fftw'],
'includes':['fftw.h', 'rfftw.h'],
'macros':[('SCIPY_FFTW_H', None)]}
]
class fftw3_info(fftw_info):
#variables to override
section = 'fftw3'
dir_env_var = 'FFTW3'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw3',
'libs':['fftw3'],
'includes':['fftw3.h'],
'macros':[('SCIPY_FFTW3_H', None)]},
]
class dfftw_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'dfftw',
'libs':['drfftw', 'dfftw'],
'includes':['dfftw.h', 'drfftw.h'],
'macros':[('SCIPY_DFFTW_H', None)]}]
class sfftw_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'sfftw',
'libs':['srfftw', 'sfftw'],
'includes':['sfftw.h', 'srfftw.h'],
'macros':[('SCIPY_SFFTW_H', None)]}]
class fftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'fftw threads',
'libs':['rfftw_threads', 'fftw_threads'],
'includes':['fftw_threads.h', 'rfftw_threads.h'],
'macros':[('SCIPY_FFTW_THREADS_H', None)]}]
class dfftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'dfftw threads',
'libs':['drfftw_threads', 'dfftw_threads'],
'includes':['dfftw_threads.h', 'drfftw_threads.h'],
'macros':[('SCIPY_DFFTW_THREADS_H', None)]}]
class sfftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'sfftw threads',
'libs':['srfftw_threads', 'sfftw_threads'],
'includes':['sfftw_threads.h', 'srfftw_threads.h'],
'macros':[('SCIPY_SFFTW_THREADS_H', None)]}]
class djbfft_info(system_info):
section = 'djbfft'
dir_env_var = 'DJBFFT'
notfounderror = DJBFFTNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend(self.combine_paths(d, ['djbfft']) + [d])
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
info = None
for d in lib_dirs:
p = self.combine_paths(d, ['djbfft.a'])
if p:
info = {'extra_objects': p}
break
p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext])
if p:
info = {'libraries': ['djbfft'], 'library_dirs': [d]}
break
if info is None:
return
for d in incl_dirs:
if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2:
dict_append(info, include_dirs=[d],
define_macros=[('SCIPY_DJBFFT_H', None)])
self.set_info(**info)
return
return
class mkl_info(system_info):
section = 'mkl'
dir_env_var = 'MKL'
_lib_mkl = ['mkl', 'vml', 'guide']
def get_mkl_rootdir(self):
mklroot = os.environ.get('MKLROOT', None)
if mklroot is not None:
return mklroot
paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep)
ld_so_conf = '/etc/ld.so.conf'
if os.path.isfile(ld_so_conf):
for d in open(ld_so_conf, 'r'):
d = d.strip()
if d:
paths.append(d)
intel_mkl_dirs = []
for path in paths:
path_atoms = path.split(os.sep)
for m in path_atoms:
if m.startswith('mkl'):
d = os.sep.join(path_atoms[:path_atoms.index(m) + 2])
intel_mkl_dirs.append(d)
break
for d in paths:
dirs = glob(os.path.join(d, 'mkl', '*'))
dirs += glob(os.path.join(d, 'mkl*'))
for d in dirs:
if os.path.isdir(os.path.join(d, 'lib')):
return d
return None
def __init__(self):
mklroot = self.get_mkl_rootdir()
if mklroot is None:
system_info.__init__(self)
else:
from .cpuinfo import cpu
l = 'mkl' # use shared library
if cpu.is_Itanium():
plt = '64'
#l = 'mkl_ipf'
elif cpu.is_Xeon():
plt = 'intel64'
#l = 'mkl_intel64'
else:
plt = '32'
#l = 'mkl_ia32'
if l not in self._lib_mkl:
self._lib_mkl.insert(0, l)
system_info.__init__(
self,
default_lib_dirs=[os.path.join(mklroot, 'lib', plt)],
default_include_dirs=[os.path.join(mklroot, 'include')])
def calc_info(self):
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
mkl_libs = self.get_libs('mkl_libs', self._lib_mkl)
info = self.check_libs2(lib_dirs, mkl_libs)
if info is None:
return
dict_append(info,
define_macros=[('SCIPY_MKL_H', None),
('HAVE_CBLAS', None)],
include_dirs=incl_dirs)
if sys.platform == 'win32':
pass # win32 has no pthread library
else:
dict_append(info, libraries=['pthread'])
self.set_info(**info)
class lapack_mkl_info(mkl_info):
def calc_info(self):
mkl = get_info('mkl')
if not mkl:
return
if sys.platform == 'win32':
lapack_libs = self.get_libs('lapack_libs', ['mkl_lapack'])
else:
lapack_libs = self.get_libs('lapack_libs',
['mkl_lapack32', 'mkl_lapack64'])
info = {'libraries': lapack_libs}
dict_append(info, **mkl)
self.set_info(**info)
class blas_mkl_info(mkl_info):
pass
class atlas_info(system_info):
section = 'atlas'
dir_env_var = 'ATLAS'
_lib_names = ['f77blas', 'cblas']
if sys.platform[:7] == 'freebsd':
_lib_atlas = ['atlas_r']
_lib_lapack = ['alapack_r']
else:
_lib_atlas = ['atlas']
_lib_lapack = ['lapack']
notfounderror = AtlasNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*',
'sse', '3dnow', 'sse2']) + [d])
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
atlas_libs = self.get_libs('atlas_libs',
self._lib_names + self._lib_atlas)
lapack_libs = self.get_libs('lapack_libs', self._lib_lapack)
atlas = None
lapack = None
atlas_1 = None
for d in lib_dirs:
atlas = self.check_libs2(d, atlas_libs, [])
lapack_atlas = self.check_libs2(d, ['lapack_atlas'], [])
if atlas is not None:
lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*'])
lapack = self.check_libs2(lib_dirs2, lapack_libs, [])
if lapack is not None:
break
if atlas:
atlas_1 = atlas
log.info(self.__class__)
if atlas is None:
atlas = atlas_1
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
if lapack is not None:
dict_append(info, **lapack)
dict_append(info, **atlas)
elif 'lapack_atlas' in atlas['libraries']:
dict_append(info, **atlas)
dict_append(info,
define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)])
self.set_info(**info)
return
else:
dict_append(info, **atlas)
dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)])
message = """
*********************************************************************
Could not find lapack library within the ATLAS installation.
*********************************************************************
"""
warnings.warn(message)
self.set_info(**info)
return
# Check if lapack library is complete, only warn if it is not.
lapack_dir = lapack['library_dirs'][0]
lapack_name = lapack['libraries'][0]
lapack_lib = None
lib_prefixes = ['lib']
if sys.platform == 'win32':
lib_prefixes.append('')
for e in self.library_extensions():
for prefix in lib_prefixes:
fn = os.path.join(lapack_dir, prefix + lapack_name + e)
if os.path.exists(fn):
lapack_lib = fn
break
if lapack_lib:
break
if lapack_lib is not None:
sz = os.stat(lapack_lib)[6]
if sz <= 4000 * 1024:
message = """
*********************************************************************
Lapack library (from ATLAS) is probably incomplete:
size of %s is %sk (expected >4000k)
Follow the instructions in the KNOWN PROBLEMS section of the file
numpy/INSTALL.txt.
*********************************************************************
""" % (lapack_lib, sz / 1024)
warnings.warn(message)
else:
info['language'] = 'f77'
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(info, **atlas_extra_info)
self.set_info(**info)
class atlas_blas_info(atlas_info):
_lib_names = ['f77blas', 'cblas']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
atlas_libs = self.get_libs('atlas_libs',
self._lib_names + self._lib_atlas)
atlas = self.check_libs2(lib_dirs, atlas_libs, [])
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(atlas, **atlas_extra_info)
dict_append(info, **atlas)
self.set_info(**info)
return
class atlas_threads_info(atlas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['ptf77blas', 'ptcblas']
class atlas_blas_threads_info(atlas_blas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['ptf77blas', 'ptcblas']
class lapack_atlas_info(atlas_info):
_lib_names = ['lapack_atlas'] + atlas_info._lib_names
class lapack_atlas_threads_info(atlas_threads_info):
_lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names
class atlas_3_10_info(atlas_info):
_lib_names = ['satlas']
_lib_atlas = _lib_names
_lib_lapack = _lib_names
class atlas_3_10_blas_info(atlas_3_10_info):
_lib_names = ['satlas']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
atlas_libs = self.get_libs('atlas_libs',
self._lib_names)
atlas = self.check_libs2(lib_dirs, atlas_libs, [])
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(atlas, **atlas_extra_info)
dict_append(info, **atlas)
self.set_info(**info)
return
class atlas_3_10_threads_info(atlas_3_10_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['tatlas']
#if sys.platfcorm[:7] == 'freebsd':
## I don't think freebsd supports 3.10 at this time - 2014
_lib_atlas = _lib_names
_lib_lapack = _lib_names
class atlas_3_10_blas_threads_info(atlas_3_10_blas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['tatlas']
class lapack_atlas_3_10_info(atlas_3_10_info):
pass
class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info):
pass
class lapack_info(system_info):
section = 'lapack'
dir_env_var = 'LAPACK'
_lib_names = ['lapack']
notfounderror = LapackNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
lapack_libs = self.get_libs('lapack_libs', self._lib_names)
info = self.check_libs(lib_dirs, lapack_libs, [])
if info is None:
return
info['language'] = 'f77'
self.set_info(**info)
class lapack_src_info(system_info):
section = 'lapack_src'
dir_env_var = 'LAPACK_SRC'
notfounderror = LapackSrcNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'dgesv.f')):
src_dir = d
break
if not src_dir:
#XXX: Get sources from netlib. May be ask first.
return
# The following is extracted from LAPACK-3.0/SRC/Makefile.
# Added missing names from lapack-lite-3.1.1/SRC/Makefile
# while keeping removed names for Lapack-3.0 compatibility.
allaux = '''
ilaenv ieeeck lsame lsamen xerbla
iparmq
''' # *.f
laux = '''
bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1
laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2
lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre
larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4
lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1
lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf
stebz stedc steqr sterf
larra larrc larrd larr larrk larrj larrr laneg laisnan isnan
lazq3 lazq4
''' # [s|d]*.f
lasrc = '''
gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak
gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv
gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2
geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd
gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal
gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd
ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein
hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0
lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb
lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp
laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv
lartv larz larzb larzt laswp lasyf latbs latdf latps latrd
latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv
pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2
potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri
pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs
spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv
sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2
tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs
trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs
tzrqf tzrzf
lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5
''' # [s|c|d|z]*.f
sd_lasrc = '''
laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l
org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr
orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3
ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx
sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd
stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd
sygvx sytd2 sytrd
''' # [s|d]*.f
cz_lasrc = '''
bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev
heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv
hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd
hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf
hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7
laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe
laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv
spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq
ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2
unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr
''' # [c|z]*.f
#######
sclaux = laux + ' econd ' # s*.f
dzlaux = laux + ' secnd ' # d*.f
slasrc = lasrc + sd_lasrc # s*.f
dlasrc = lasrc + sd_lasrc # d*.f
clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f
zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f
oclasrc = ' icmax1 scsum1 ' # *.f
ozlasrc = ' izmax1 dzsum1 ' # *.f
sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \
+ ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \
+ ['c%s.f' % f for f in (clasrc).split()] \
+ ['z%s.f' % f for f in (zlasrc).split()] \
+ ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()]
sources = [os.path.join(src_dir, f) for f in sources]
# Lapack 3.1:
src_dir2 = os.path.join(src_dir, '..', 'INSTALL')
sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz']
# Lapack 3.2.1:
sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz']
sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz']
sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz']
# Should we check here actual existence of source files?
# Yes, the file listing is different between 3.0 and 3.1
# versions.
sources = [f for f in sources if os.path.isfile(f)]
info = {'sources': sources, 'language': 'f77'}
self.set_info(**info)
atlas_version_c_text = r'''
/* This file is generated from numpy/distutils/system_info.py */
void ATL_buildinfo(void);
int main(void) {
ATL_buildinfo();
return 0;
}
'''
_cached_atlas_version = {}
def get_atlas_version(**config):
libraries = config.get('libraries', [])
library_dirs = config.get('library_dirs', [])
key = (tuple(libraries), tuple(library_dirs))
if key in _cached_atlas_version:
return _cached_atlas_version[key]
c = cmd_config(Distribution())
atlas_version = None
info = {}
try:
s, o = c.get_output(atlas_version_c_text,
libraries=libraries, library_dirs=library_dirs,
use_tee=(system_info.verbosity > 0))
if s and re.search(r'undefined reference to `_gfortran', o, re.M):
s, o = c.get_output(atlas_version_c_text,
libraries=libraries + ['gfortran'],
library_dirs=library_dirs,
use_tee=(system_info.verbosity > 0))
if not s:
warnings.warn("""
*****************************************************
Linkage with ATLAS requires gfortran. Use
python setup.py config_fc --fcompiler=gnu95 ...
when building extension libraries that use ATLAS.
Make sure that -lgfortran is used for C++ extensions.
*****************************************************
""")
dict_append(info, language='f90',
define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)])
except Exception: # failed to get version from file -- maybe on Windows
# look at directory name
for o in library_dirs:
m = re.search(r'ATLAS_(?P<version>\d+[.]\d+[.]\d+)_', o)
if m:
atlas_version = m.group('version')
if atlas_version is not None:
break
# final choice --- look at ATLAS_VERSION environment
# variable
if atlas_version is None:
atlas_version = os.environ.get('ATLAS_VERSION', None)
if atlas_version:
dict_append(info, define_macros=[(
'ATLAS_INFO', '"\\"%s\\""' % atlas_version)
])
else:
dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)])
return atlas_version or '?.?.?', info
if not s:
m = re.search(r'ATLAS version (?P<version>\d+[.]\d+[.]\d+)', o)
if m:
atlas_version = m.group('version')
if atlas_version is None:
if re.search(r'undefined symbol: ATL_buildinfo', o, re.M):
atlas_version = '3.2.1_pre3.3.6'
else:
log.info('Status: %d', s)
log.info('Output: %s', o)
if atlas_version == '3.2.1_pre3.3.6':
dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)])
else:
dict_append(info, define_macros=[(
'ATLAS_INFO', '"\\"%s\\""' % atlas_version)
])
result = _cached_atlas_version[key] = atlas_version, info
return result
class lapack_opt_info(system_info):
notfounderror = LapackNotFoundError
def calc_info(self):
openblas_info = get_info('openblas_lapack')
if openblas_info:
self.set_info(**openblas_info)
return
lapack_mkl_info = get_info('lapack_mkl')
if lapack_mkl_info:
self.set_info(**lapack_mkl_info)
return
atlas_info = get_info('atlas_3_10_threads')
if not atlas_info:
atlas_info = get_info('atlas_3_10')
if not atlas_info:
atlas_info = get_info('atlas_threads')
if not atlas_info:
atlas_info = get_info('atlas')
if sys.platform == 'darwin' and not atlas_info:
# Use the system lapack from Accelerate or vecLib under OSX
args = []
link_args = []
if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
'x86_64' in get_platform() or \
'i386' in platform.platform():
intel = 1
else:
intel = 0
if os.path.exists('/System/Library/Frameworks'
'/Accelerate.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
elif os.path.exists('/System/Library/Frameworks'
'/vecLib.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
if args:
self.set_info(extra_compile_args=args,
extra_link_args=link_args,
define_macros=[('NO_ATLAS_INFO', 3),
('HAVE_CBLAS', None)])
return
#atlas_info = {} ## uncomment for testing
need_lapack = 0
need_blas = 0
info = {}
if atlas_info:
l = atlas_info.get('define_macros', [])
if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \
or ('ATLAS_WITHOUT_LAPACK', None) in l:
need_lapack = 1
info = atlas_info
else:
warnings.warn(AtlasNotFoundError.__doc__)
need_blas = 1
need_lapack = 1
dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
if need_lapack:
lapack_info = get_info('lapack')
#lapack_info = {} ## uncomment for testing
if lapack_info:
dict_append(info, **lapack_info)
else:
warnings.warn(LapackNotFoundError.__doc__)
lapack_src_info = get_info('lapack_src')
if not lapack_src_info:
warnings.warn(LapackSrcNotFoundError.__doc__)
return
dict_append(info, libraries=[('flapack_src', lapack_src_info)])
if need_blas:
blas_info = get_info('blas')
#blas_info = {} ## uncomment for testing
if blas_info:
dict_append(info, **blas_info)
else:
warnings.warn(BlasNotFoundError.__doc__)
blas_src_info = get_info('blas_src')
if not blas_src_info:
warnings.warn(BlasSrcNotFoundError.__doc__)
return
dict_append(info, libraries=[('fblas_src', blas_src_info)])
self.set_info(**info)
return
class blas_opt_info(system_info):
notfounderror = BlasNotFoundError
def calc_info(self):
blas_mkl_info = get_info('blas_mkl')
if blas_mkl_info:
self.set_info(**blas_mkl_info)
return
openblas_info = get_info('openblas')
if openblas_info:
self.set_info(**openblas_info)
return
atlas_info = get_info('atlas_3_10_blas_threads')
if not atlas_info:
atlas_info = get_info('atlas_3_10_blas')
if not atlas_info:
atlas_info = get_info('atlas_blas_threads')
if not atlas_info:
atlas_info = get_info('atlas_blas')
if sys.platform == 'darwin' and not atlas_info:
# Use the system BLAS from Accelerate or vecLib under OSX
args = []
link_args = []
if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
'x86_64' in get_platform() or \
'i386' in platform.platform():
intel = 1
else:
intel = 0
if os.path.exists('/System/Library/Frameworks'
'/Accelerate.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
args.extend([
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
elif os.path.exists('/System/Library/Frameworks'
'/vecLib.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
args.extend([
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
if args:
self.set_info(extra_compile_args=args,
extra_link_args=link_args,
define_macros=[('NO_ATLAS_INFO', 3),
('HAVE_CBLAS', None)])
return
need_blas = 0
info = {}
if atlas_info:
info = atlas_info
else:
warnings.warn(AtlasNotFoundError.__doc__)
need_blas = 1
dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
if need_blas:
blas_info = get_info('blas')
if blas_info:
dict_append(info, **blas_info)
else:
warnings.warn(BlasNotFoundError.__doc__)
blas_src_info = get_info('blas_src')
if not blas_src_info:
warnings.warn(BlasSrcNotFoundError.__doc__)
return
dict_append(info, libraries=[('fblas_src', blas_src_info)])
self.set_info(**info)
return
class blas_info(system_info):
section = 'blas'
dir_env_var = 'BLAS'
_lib_names = ['blas']
notfounderror = BlasNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
blas_libs = self.get_libs('blas_libs', self._lib_names)
info = self.check_libs(lib_dirs, blas_libs, [])
if info is None:
return
info['language'] = 'f77' # XXX: is it generally true?
self.set_info(**info)
class openblas_info(blas_info):
section = 'openblas'
dir_env_var = 'OPENBLAS'
_lib_names = ['openblas']
notfounderror = BlasNotFoundError
def check_embedded_lapack(self, info):
return True
def calc_info(self):
lib_dirs = self.get_lib_dirs()
openblas_libs = self.get_libs('libraries', self._lib_names)
if openblas_libs == self._lib_names: # backward compat with 1.8.0
openblas_libs = self.get_libs('openblas_libs', self._lib_names)
info = self.check_libs(lib_dirs, openblas_libs, [])
if info is None:
return
# Add extra info for OpenBLAS
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
if not self.check_embedded_lapack(info):
return
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
self.set_info(**info)
class openblas_lapack_info(openblas_info):
section = 'openblas'
dir_env_var = 'OPENBLAS'
_lib_names = ['openblas']
notfounderror = BlasNotFoundError
def check_embedded_lapack(self, info):
res = False
c = distutils.ccompiler.new_compiler()
tmpdir = tempfile.mkdtemp()
s = """void zungqr();
int main(int argc, const char *argv[])
{
zungqr_();
return 0;
}"""
src = os.path.join(tmpdir, 'source.c')
out = os.path.join(tmpdir, 'a.out')
# Add the additional "extra" arguments
try:
extra_args = info['extra_link_args']
except:
extra_args = []
try:
with open(src, 'wt') as f:
f.write(s)
obj = c.compile([src], output_dir=tmpdir)
try:
c.link_executable(obj, out, libraries=info['libraries'],
library_dirs=info['library_dirs'],
extra_postargs=extra_args)
res = True
except distutils.ccompiler.LinkError:
res = False
finally:
shutil.rmtree(tmpdir)
if sys.platform == 'win32' and not res:
c = distutils.ccompiler.new_compiler(compiler='mingw32')
tmpdir = tempfile.mkdtemp()
src = os.path.join(tmpdir, 'source.c')
out = os.path.join(tmpdir, 'a.out')
try:
with open(src, 'wt') as f:
f.write(s)
obj = c.compile([src], output_dir=tmpdir)
try:
c.link_executable(obj, out, libraries=info['libraries'],
library_dirs=info['library_dirs'],
extra_postargs=extra_args)
res = True
except distutils.ccompiler.LinkError:
res = False
finally:
shutil.rmtree(tmpdir)
return res
class blas_src_info(system_info):
section = 'blas_src'
dir_env_var = 'BLAS_SRC'
notfounderror = BlasSrcNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['blas']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'daxpy.f')):
src_dir = d
break
if not src_dir:
#XXX: Get sources from netlib. May be ask first.
return
blas1 = '''
caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot
dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2
srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg
dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax
snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap
scabs1
'''
blas2 = '''
cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv
chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv
dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv
sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger
stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc
zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2
ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv
'''
blas3 = '''
cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k
dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm
ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm
'''
sources = [os.path.join(src_dir, f + '.f') \
for f in (blas1 + blas2 + blas3).split()]
#XXX: should we check here actual existence of source files?
sources = [f for f in sources if os.path.isfile(f)]
info = {'sources': sources, 'language': 'f77'}
self.set_info(**info)
class x11_info(system_info):
section = 'x11'
notfounderror = X11NotFoundError
def __init__(self):
system_info.__init__(self,
default_lib_dirs=default_x11_lib_dirs,
default_include_dirs=default_x11_include_dirs)
def calc_info(self):
if sys.platform in ['win32']:
return
lib_dirs = self.get_lib_dirs()
include_dirs = self.get_include_dirs()
x11_libs = self.get_libs('x11_libs', ['X11'])
info = self.check_libs(lib_dirs, x11_libs, [])
if info is None:
return
inc_dir = None
for d in include_dirs:
if self.combine_paths(d, 'X11/X.h'):
inc_dir = d
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir])
self.set_info(**info)
class _numpy_info(system_info):
section = 'Numeric'
modulename = 'Numeric'
notfounderror = NumericNotFoundError
def __init__(self):
include_dirs = []
try:
module = __import__(self.modulename)
prefix = []
for name in module.__file__.split(os.sep):
if name == 'lib':
break
prefix.append(name)
# Ask numpy for its own include path before attempting
# anything else
try:
include_dirs.append(getattr(module, 'get_include')())
except AttributeError:
pass
include_dirs.append(distutils.sysconfig.get_python_inc(
prefix=os.sep.join(prefix)))
except ImportError:
pass
py_incl_dir = distutils.sysconfig.get_python_inc()
include_dirs.append(py_incl_dir)
py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
if py_pincl_dir not in include_dirs:
include_dirs.append(py_pincl_dir)
for d in default_include_dirs:
d = os.path.join(d, os.path.basename(py_incl_dir))
if d not in include_dirs:
include_dirs.append(d)
system_info.__init__(self,
default_lib_dirs=[],
default_include_dirs=include_dirs)
def calc_info(self):
try:
module = __import__(self.modulename)
except ImportError:
return
info = {}
macros = []
for v in ['__version__', 'version']:
vrs = getattr(module, v, None)
if vrs is None:
continue
macros = [(self.modulename.upper() + '_VERSION',
'"\\"%s\\""' % (vrs)),
(self.modulename.upper(), None)]
break
## try:
## macros.append(
## (self.modulename.upper()+'_VERSION_HEX',
## hex(vstr2hex(module.__version__))),
## )
## except Exception as msg:
## print msg
dict_append(info, define_macros=macros)
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
if self.combine_paths(d,
os.path.join(self.modulename,
'arrayobject.h')):
inc_dir = d
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir])
if info:
self.set_info(**info)
return
class numarray_info(_numpy_info):
section = 'numarray'
modulename = 'numarray'
class Numeric_info(_numpy_info):
section = 'Numeric'
modulename = 'Numeric'
class numpy_info(_numpy_info):
section = 'numpy'
modulename = 'numpy'
class numerix_info(system_info):
section = 'numerix'
def calc_info(self):
which = None, None
if os.getenv("NUMERIX"):
which = os.getenv("NUMERIX"), "environment var"
# If all the above fail, default to numpy.
if which[0] is None:
which = "numpy", "defaulted"
try:
import numpy
which = "numpy", "defaulted"
except ImportError:
msg1 = str(get_exception())
try:
import Numeric
which = "numeric", "defaulted"
except ImportError:
msg2 = str(get_exception())
try:
import numarray
which = "numarray", "defaulted"
except ImportError:
msg3 = str(get_exception())
log.info(msg1)
log.info(msg2)
log.info(msg3)
which = which[0].strip().lower(), which[1]
if which[0] not in ["numeric", "numarray", "numpy"]:
raise ValueError("numerix selector must be either 'Numeric' "
"or 'numarray' or 'numpy' but the value obtained"
" from the %s was '%s'." % (which[1], which[0]))
os.environ['NUMERIX'] = which[0]
self.set_info(**get_info(which[0]))
class f2py_info(system_info):
def calc_info(self):
try:
import numpy.f2py as f2py
except ImportError:
return
f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src')
self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')],
include_dirs=[f2py_dir])
return
class boost_python_info(system_info):
section = 'boost_python'
dir_env_var = 'BOOST'
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['boost*']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'libs', 'python', 'src',
'module.cpp')):
src_dir = d
break
if not src_dir:
return
py_incl_dirs = [distutils.sysconfig.get_python_inc()]
py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
if py_pincl_dir not in py_incl_dirs:
py_incl_dirs.append(py_pincl_dir)
srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src')
bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp'))
bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp'))
info = {'libraries': [('boost_python_src',
{'include_dirs': [src_dir] + py_incl_dirs,
'sources':bpl_srcs}
)],
'include_dirs': [src_dir],
}
if info:
self.set_info(**info)
return
class agg2_info(system_info):
section = 'agg2'
dir_env_var = 'AGG2'
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['agg2*']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')):
src_dir = d
break
if not src_dir:
return
if sys.platform == 'win32':
agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform',
'win32', 'agg_win32_bmp.cpp'))
else:
agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp'))
agg2_srcs += [os.path.join(src_dir, 'src', 'platform',
'X11',
'agg_platform_support.cpp')]
info = {'libraries':
[('agg2_src',
{'sources': agg2_srcs,
'include_dirs': [os.path.join(src_dir, 'include')],
}
)],
'include_dirs': [os.path.join(src_dir, 'include')],
}
if info:
self.set_info(**info)
return
class _pkg_config_info(system_info):
section = None
config_env_var = 'PKG_CONFIG'
default_config_exe = 'pkg-config'
append_config_exe = ''
version_macro_name = None
release_macro_name = None
version_flag = '--modversion'
cflags_flag = '--cflags'
def get_config_exe(self):
if self.config_env_var in os.environ:
return os.environ[self.config_env_var]
return self.default_config_exe
def get_config_output(self, config_exe, option):
cmd = config_exe + ' ' + self.append_config_exe + ' ' + option
s, o = exec_command(cmd, use_tee=0)
if not s:
return o
def calc_info(self):
config_exe = find_executable(self.get_config_exe())
if not config_exe:
log.warn('File not found: %s. Cannot determine %s info.' \
% (config_exe, self.section))
return
info = {}
macros = []
libraries = []
library_dirs = []
include_dirs = []
extra_link_args = []
extra_compile_args = []
version = self.get_config_output(config_exe, self.version_flag)
if version:
macros.append((self.__class__.__name__.split('.')[-1].upper(),
'"\\"%s\\""' % (version)))
if self.version_macro_name:
macros.append((self.version_macro_name + '_%s'
% (version.replace('.', '_')), None))
if self.release_macro_name:
release = self.get_config_output(config_exe, '--release')
if release:
macros.append((self.release_macro_name + '_%s'
% (release.replace('.', '_')), None))
opts = self.get_config_output(config_exe, '--libs')
if opts:
for opt in opts.split():
if opt[:2] == '-l':
libraries.append(opt[2:])
elif opt[:2] == '-L':
library_dirs.append(opt[2:])
else:
extra_link_args.append(opt)
opts = self.get_config_output(config_exe, self.cflags_flag)
if opts:
for opt in opts.split():
if opt[:2] == '-I':
include_dirs.append(opt[2:])
elif opt[:2] == '-D':
if '=' in opt:
n, v = opt[2:].split('=')
macros.append((n, v))
else:
macros.append((opt[2:], None))
else:
extra_compile_args.append(opt)
if macros:
dict_append(info, define_macros=macros)
if libraries:
dict_append(info, libraries=libraries)
if library_dirs:
dict_append(info, library_dirs=library_dirs)
if include_dirs:
dict_append(info, include_dirs=include_dirs)
if extra_link_args:
dict_append(info, extra_link_args=extra_link_args)
if extra_compile_args:
dict_append(info, extra_compile_args=extra_compile_args)
if info:
self.set_info(**info)
return
class wx_info(_pkg_config_info):
section = 'wx'
config_env_var = 'WX_CONFIG'
default_config_exe = 'wx-config'
append_config_exe = ''
version_macro_name = 'WX_VERSION'
release_macro_name = 'WX_RELEASE'
version_flag = '--version'
cflags_flag = '--cxxflags'
class gdk_pixbuf_xlib_2_info(_pkg_config_info):
section = 'gdk_pixbuf_xlib_2'
append_config_exe = 'gdk-pixbuf-xlib-2.0'
version_macro_name = 'GDK_PIXBUF_XLIB_VERSION'
class gdk_pixbuf_2_info(_pkg_config_info):
section = 'gdk_pixbuf_2'
append_config_exe = 'gdk-pixbuf-2.0'
version_macro_name = 'GDK_PIXBUF_VERSION'
class gdk_x11_2_info(_pkg_config_info):
section = 'gdk_x11_2'
append_config_exe = 'gdk-x11-2.0'
version_macro_name = 'GDK_X11_VERSION'
class gdk_2_info(_pkg_config_info):
section = 'gdk_2'
append_config_exe = 'gdk-2.0'
version_macro_name = 'GDK_VERSION'
class gdk_info(_pkg_config_info):
section = 'gdk'
append_config_exe = 'gdk'
version_macro_name = 'GDK_VERSION'
class gtkp_x11_2_info(_pkg_config_info):
section = 'gtkp_x11_2'
append_config_exe = 'gtk+-x11-2.0'
version_macro_name = 'GTK_X11_VERSION'
class gtkp_2_info(_pkg_config_info):
section = 'gtkp_2'
append_config_exe = 'gtk+-2.0'
version_macro_name = 'GTK_VERSION'
class xft_info(_pkg_config_info):
section = 'xft'
append_config_exe = 'xft'
version_macro_name = 'XFT_VERSION'
class freetype2_info(_pkg_config_info):
section = 'freetype2'
append_config_exe = 'freetype2'
version_macro_name = 'FREETYPE2_VERSION'
class amd_info(system_info):
section = 'amd'
dir_env_var = 'AMD'
_lib_names = ['amd']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
amd_libs = self.get_libs('amd_libs', self._lib_names)
info = self.check_libs(lib_dirs, amd_libs, [])
if info is None:
return
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, 'amd.h')
if p:
inc_dir = os.path.dirname(p[0])
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir],
define_macros=[('SCIPY_AMD_H', None)],
swig_opts=['-I' + inc_dir])
self.set_info(**info)
return
class umfpack_info(system_info):
section = 'umfpack'
dir_env_var = 'UMFPACK'
notfounderror = UmfpackNotFoundError
_lib_names = ['umfpack']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
umfpack_libs = self.get_libs('umfpack_libs', self._lib_names)
info = self.check_libs(lib_dirs, umfpack_libs, [])
if info is None:
return
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h')
if p:
inc_dir = os.path.dirname(p[0])
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir],
define_macros=[('SCIPY_UMFPACK_H', None)],
swig_opts=['-I' + inc_dir])
amd = get_info('amd')
dict_append(info, **get_info('amd'))
self.set_info(**info)
return
## def vstr2hex(version):
## bits = []
## n = [24,16,8,4,0]
## r = 0
## for s in version.split('.'):
## r |= int(s) << n[0]
## del n[0]
## return r
#--------------------------------------------------------------------
def combine_paths(*args, **kws):
""" Return a list of existing paths composed by all combinations of
items from arguments.
"""
r = []
for a in args:
if not a:
continue
if is_string(a):
a = [a]
r.append(a)
args = r
if not args:
return []
if len(args) == 1:
result = reduce(lambda a, b: a + b, map(glob, args[0]), [])
elif len(args) == 2:
result = []
for a0 in args[0]:
for a1 in args[1]:
result.extend(glob(os.path.join(a0, a1)))
else:
result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:]))
verbosity = kws.get('verbosity', 1)
log.debug('(paths: %s)', ','.join(result))
return result
language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3}
inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'}
def dict_append(d, **kws):
languages = []
for k, v in kws.items():
if k == 'language':
languages.append(v)
continue
if k in d:
if k in ['library_dirs', 'include_dirs',
'extra_compile_args', 'extra_link_args',
'runtime_library_dirs', 'define_macros']:
[d[k].append(vv) for vv in v if vv not in d[k]]
else:
d[k].extend(v)
else:
d[k] = v
if languages:
l = inv_language_map[max([language_map.get(l, 0) for l in languages])]
d['language'] = l
return
def parseCmdLine(argv=(None,)):
import optparse
parser = optparse.OptionParser("usage: %prog [-v] [info objs]")
parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
default=False,
help='be verbose and print more messages')
opts, args = parser.parse_args(args=argv[1:])
return opts, args
def show_all(argv=None):
import inspect
if argv is None:
argv = sys.argv
opts, args = parseCmdLine(argv)
if opts.verbose:
log.set_threshold(log.DEBUG)
else:
log.set_threshold(log.INFO)
show_only = []
for n in args:
if n[-5:] != '_info':
n = n + '_info'
show_only.append(n)
show_all = not show_only
_gdict_ = globals().copy()
for name, c in _gdict_.items():
if not inspect.isclass(c):
continue
if not issubclass(c, system_info) or c is system_info:
continue
if not show_all:
if name not in show_only:
continue
del show_only[show_only.index(name)]
conf = c()
conf.verbosity = 2
r = conf.get_info()
if show_only:
log.info('Info classes not defined: %s', ','.join(show_only))
if __name__ == "__main__":
show_all()
| mit |
pabloborrego93/edx-platform | lms/djangoapps/instructor/views/coupons.py | 61 | 6574 | """
E-commerce Tab Instructor Dashboard Coupons Operations views
"""
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _
from util.json_request import JsonResponse
from shoppingcart.models import Coupon, CourseRegistrationCode
from opaque_keys.edx.locations import SlashSeparatedCourseKey
import datetime
import pytz
import logging
log = logging.getLogger(__name__)
@require_POST
@login_required
def remove_coupon(request, course_id): # pylint: disable=unused-argument
"""
remove the coupon against the coupon id
set the coupon is_active flag to false
"""
coupon_id = request.POST.get('id', None)
if not coupon_id:
return JsonResponse({
'message': _('coupon id is None')
}, status=400) # status code 400: Bad Request
try:
coupon = Coupon.objects.get(id=coupon_id)
except ObjectDoesNotExist:
return JsonResponse({
'message': _('coupon with the coupon id ({coupon_id}) DoesNotExist').format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
if not coupon.is_active:
return JsonResponse({
'message': _('coupon with the coupon id ({coupon_id}) is already inactive').format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
coupon.is_active = False
coupon.save()
return JsonResponse({
'message': _('coupon with the coupon id ({coupon_id}) updated successfully').format(coupon_id=coupon_id)
}) # status code 200: OK by default
@require_POST
@login_required
def add_coupon(request, course_id):
"""
add coupon in the Coupons Table
"""
code = request.POST.get('code')
# check if the code is already in the Coupons Table and active
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)
coupon = Coupon.objects.get(is_active=True, code=code, course_id=course_id)
except Coupon.DoesNotExist:
# check if the coupon code is in the CourseRegistrationCode Table
course_registration_code = CourseRegistrationCode.objects.filter(code=code)
if course_registration_code:
return JsonResponse(
{'message': _("The code ({code}) that you have tried to define is already in use as a registration code").format(code=code)},
status=400) # status code 400: Bad Request
description = request.POST.get('description')
course_id = request.POST.get('course_id')
try:
discount = int(request.POST.get('discount'))
except ValueError:
return JsonResponse({
'message': _("Please Enter the Integer Value for Coupon Discount")
}, status=400) # status code 400: Bad Request
if discount > 100 or discount < 0:
return JsonResponse({
'message': _("Please Enter the Coupon Discount Value Less than or Equal to 100")
}, status=400) # status code 400: Bad Request
expiration_date = None
if request.POST.get('expiration_date'):
expiration_date = request.POST.get('expiration_date')
try:
expiration_date = datetime.datetime.strptime(expiration_date, "%m/%d/%Y").replace(tzinfo=pytz.UTC) + datetime.timedelta(days=1)
except ValueError:
return JsonResponse({
'message': _("Please enter the date in this format i-e month/day/year")
}, status=400) # status code 400: Bad Request
coupon = Coupon(
code=code, description=description,
course_id=course_id,
percentage_discount=discount,
created_by_id=request.user.id,
expiration_date=expiration_date
)
coupon.save()
return JsonResponse(
{'message': _("coupon with the coupon code ({code}) added successfully").format(code=code)}
)
if coupon:
return JsonResponse(
{'message': _("coupon with the coupon code ({code}) already exists for this course").format(code=code)},
status=400) # status code 400: Bad Request
@require_POST
@login_required
def update_coupon(request, course_id): # pylint: disable=unused-argument
"""
update the coupon object in the database
"""
coupon_id = request.POST.get('coupon_id', None)
if not coupon_id:
return JsonResponse({'message': _("coupon id not found")}, status=400) # status code 400: Bad Request
try:
coupon = Coupon.objects.get(pk=coupon_id)
except ObjectDoesNotExist:
return JsonResponse(
{'message': _("coupon with the coupon id ({coupon_id}) DoesNotExist").format(coupon_id=coupon_id)},
status=400) # status code 400: Bad Request
description = request.POST.get('description')
coupon.description = description
coupon.save()
return JsonResponse(
{'message': _("coupon with the coupon id ({coupon_id}) updated Successfully").format(coupon_id=coupon_id)}
)
@require_POST
@login_required
def get_coupon_info(request, course_id): # pylint: disable=unused-argument
"""
get the coupon information to display in the pop up form
"""
coupon_id = request.POST.get('id', None)
if not coupon_id:
return JsonResponse({
'message': _("coupon id not found")
}, status=400) # status code 400: Bad Request
try:
coupon = Coupon.objects.get(id=coupon_id)
except ObjectDoesNotExist:
return JsonResponse({
'message': _("coupon with the coupon id ({coupon_id}) DoesNotExist").format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
if not coupon.is_active:
return JsonResponse({
'message': _("coupon with the coupon id ({coupon_id}) is already inactive").format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
expiry_date = coupon.display_expiry_date
return JsonResponse({
'coupon_code': coupon.code,
'coupon_description': coupon.description,
'coupon_course_id': coupon.course_id.to_deprecated_string(),
'coupon_discount': coupon.percentage_discount,
'expiry_date': expiry_date,
'message': _('coupon with the coupon id ({coupon_id}) updated successfully').format(coupon_id=coupon_id)
}) # status code 200: OK by default
| agpl-3.0 |
jose-raul-barreras/letscode | textJustification.py | 1 | 2695 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 17 12:04:09 2017
@author: jrb
"""
def justify(strings, L):
res = []
if len(strings) == 1:
rest = L - sum([len(w) for w in strings[0]])
res.append(' '.join(strings[0])+' ')
else:
for s in strings:
rest = L - sum([len(w) for w in s])
while rest > 0:
if len(s) > 1:
for i in range(len(s)-1):
if rest > 0:
s[i] = s[i]+' '
rest -= 1
else:
break
else:
s[0] += ' '*rest
rest = 0
res.append(''.join(s))
return res
def textJustification(words, L):
"""
https://codefights.com/interview/ibANT8ZGc3shACBRF/description
Given an array of words and a length L, format the text such that each
line has exactly L characters and is fully justified on both the left and
the right. Words should be packed in a greedy approach; that is, pack as
many words as possible in each line. Add extra spaces when necessary so
that each line has exactly L characters.
Extra spaces between words should be distributed as evenly as possible. If
the number of spaces on a line does not divide evenly between words, the
empty slots on the left will be assigned more spaces than the slots on the
right. For the last line of text and lines with one word only, the words
should be left justified with no extra space inserted between them.
Example
For words = ["This", "is", "an", "example", "of", "text", "justification."]
and L = 16, the output should be
textJustification(words, L) = ["This is an",
"example of text",
"justification. "]
>>> words = ["This", "is", "an", "example", "of", "text", "justification."]
>>> L = 16
>>> textJustification(words, L)
["This is an", "example of text", "justification. "]
>>> words = ["Two", "words."]
>>> L = 10
>>> textJustification(words, L)
["Two words."]
"""
res = []
while words != []:
s = []
while len(s)+sum([len(w) for w in s])+len(words[0]) <= L:
s.append(words[0])
words = words[1:]
if words == []:
break
res.append(s)
print(res)
return justify(res, L)
if __name__ == "__main__":
words = ["Two", "words."]
L = 10
textJustification(words, L)
# import doctest
# doctest.testmod()
| mit |
wshallum/ansible | lib/ansible/utils/module_docs_fragments/aws.py | 16 | 3156 | # (c) 2014, Will Thames <will@thames.id.au>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# AWS only documentation fragment
DOCUMENTATION = """
options:
ec2_url:
description:
- Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Ignored for modules where region is required. Must be specified for all other modules if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used.
required: false
default: null
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_ACCESS_KEY, AWS_SECRET_KEY, or EC2_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY_ID, AWS_ACCESS_KEY or EC2_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
security_token:
description:
- AWS STS security token. If not set then the value of the AWS_SECURITY_TOKEN or EC2_SECURITY_TOKEN environment variable is used.
required: false
default: null
aliases: [ 'access_token' ]
version_added: "1.6"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
profile:
description:
- Uses a boto profile. Only works with boto >= 2.24.0.
required: false
default: null
aliases: []
version_added: "1.6"
requirements:
- "python >= 2.6"
- boto
notes:
- If parameters are not set within the module, the following
environment variables can be used in decreasing order of precedence
C(AWS_URL) or C(EC2_URL),
C(AWS_ACCESS_KEY_ID) or C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY),
C(AWS_SECRET_ACCESS_KEY) or C(AWS_SECRET_KEY) or C(EC2_SECRET_KEY),
C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN),
C(AWS_REGION) or C(EC2_REGION)
- Ansible uses the boto configuration file (typically ~/.boto) if no
credentials are provided. See http://boto.readthedocs.org/en/latest/boto_config_tut.html
- C(AWS_REGION) or C(EC2_REGION) can be typically be used to specify the
AWS region, when required, but this can also be configured in the boto config file
"""
| gpl-3.0 |
ferabra/edx-platform | lms/djangoapps/teams/models.py | 8 | 9825 | """Django models related to teams functionality."""
from datetime import datetime
from uuid import uuid4
import pytz
from datetime import datetime
from model_utils import FieldTracker
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from django.db import models
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy
from django_countries.fields import CountryField
from django_comment_common.signals import (
thread_created,
thread_edited,
thread_deleted,
thread_voted,
comment_created,
comment_edited,
comment_deleted,
comment_voted,
comment_endorsed
)
from xmodule_django.models import CourseKeyField
from util.model_utils import slugify
from student.models import LanguageField, CourseEnrollment
from .errors import AlreadyOnTeamInCourse, NotEnrolledInCourseForTeam, ImmutableMembershipFieldException
from teams.utils import emit_team_event
from teams import TEAM_DISCUSSION_CONTEXT
@receiver(thread_voted)
@receiver(thread_created)
@receiver(comment_voted)
@receiver(comment_created)
def post_create_vote_handler(sender, **kwargs): # pylint: disable=unused-argument
"""Update the user's last activity date upon creating or voting for a
post."""
handle_activity(kwargs['user'], kwargs['post'])
@receiver(thread_edited)
@receiver(thread_deleted)
@receiver(comment_edited)
@receiver(comment_deleted)
def post_edit_delete_handler(sender, **kwargs): # pylint: disable=unused-argument
"""Update the user's last activity date upon editing or deleting a
post."""
post = kwargs['post']
handle_activity(kwargs['user'], post, long(post.user_id))
@receiver(comment_endorsed)
def comment_endorsed_handler(sender, **kwargs): # pylint: disable=unused-argument
"""Update the user's last activity date upon endorsing a comment."""
comment = kwargs['post']
handle_activity(kwargs['user'], comment, long(comment.thread.user_id))
def handle_activity(user, post, original_author_id=None):
"""Handle user activity from django_comment_client and discussion_api
and update the user's last activity date. Checks if the user who
performed the action is the original author, and that the
discussion has the team context.
"""
if original_author_id is not None and user.id != original_author_id:
return
if getattr(post, "context", "course") == TEAM_DISCUSSION_CONTEXT:
CourseTeamMembership.update_last_activity(user, post.commentable_id)
class CourseTeam(models.Model):
"""This model represents team related info."""
team_id = models.CharField(max_length=255, unique=True)
discussion_topic_id = models.CharField(max_length=255, unique=True)
name = models.CharField(max_length=255, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
topic_id = models.CharField(max_length=255, db_index=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
description = models.CharField(max_length=300)
country = CountryField(blank=True)
language = LanguageField(
blank=True,
help_text=ugettext_lazy("Optional language the team uses as ISO 639-1 code."),
)
last_activity_at = models.DateTimeField(db_index=True) # indexed for ordering
users = models.ManyToManyField(User, db_index=True, related_name='teams', through='CourseTeamMembership')
team_size = models.IntegerField(default=0, db_index=True) # indexed for ordering
field_tracker = FieldTracker()
# Don't emit changed events when these fields change.
FIELD_BLACKLIST = ['last_activity_at', 'team_size']
@classmethod
def create(cls, name, course_id, description, topic_id=None, country=None, language=None):
"""Create a complete CourseTeam object.
Args:
name (str): The name of the team to be created.
course_id (str): The ID string of the course associated
with this team.
description (str): A description of the team.
topic_id (str): An optional identifier for the topic the
team formed around.
country (str, optional): An optional country where the team
is based, as ISO 3166-1 code.
language (str, optional): An optional language which the
team uses, as ISO 639-1 code.
"""
unique_id = uuid4().hex
team_id = slugify(name)[0:20] + '-' + unique_id
discussion_topic_id = unique_id
course_team = cls(
team_id=team_id,
discussion_topic_id=discussion_topic_id,
name=name,
course_id=course_id,
topic_id=topic_id if topic_id else '',
description=description,
country=country if country else '',
language=language if language else '',
last_activity_at=datetime.utcnow().replace(tzinfo=pytz.utc)
)
return course_team
def add_user(self, user):
"""Adds the given user to the CourseTeam."""
if not CourseEnrollment.is_enrolled(user, self.course_id):
raise NotEnrolledInCourseForTeam
if CourseTeamMembership.user_in_team_for_course(user, self.course_id):
raise AlreadyOnTeamInCourse
return CourseTeamMembership.objects.create(
user=user,
team=self
)
def reset_team_size(self):
"""Reset team_size to reflect the current membership count."""
self.team_size = CourseTeamMembership.objects.filter(team=self).count()
self.save()
class CourseTeamMembership(models.Model):
"""This model represents the membership of a single user in a single team."""
class Meta(object):
"""Stores meta information for the model."""
unique_together = (('user', 'team'),)
user = models.ForeignKey(User)
team = models.ForeignKey(CourseTeam, related_name='membership')
date_joined = models.DateTimeField(auto_now_add=True)
last_activity_at = models.DateTimeField()
immutable_fields = ('user', 'team', 'date_joined')
def __setattr__(self, name, value):
"""Memberships are immutable, with the exception of last activity
date.
"""
if name in self.immutable_fields:
# Check the current value -- if it is None, then this
# model is being created from the database and it's fine
# to set the value. Otherwise, we're trying to overwrite
# an immutable field.
current_value = getattr(self, name, None)
if current_value is not None:
raise ImmutableMembershipFieldException
super(CourseTeamMembership, self).__setattr__(name, value)
def save(self, *args, **kwargs):
"""Customize save method to set the last_activity_at if it does not
currently exist. Also resets the team's size if this model is
being created.
"""
should_reset_team_size = False
if self.pk is None:
should_reset_team_size = True
if not self.last_activity_at:
self.last_activity_at = datetime.utcnow().replace(tzinfo=pytz.utc)
super(CourseTeamMembership, self).save(*args, **kwargs)
if should_reset_team_size:
self.team.reset_team_size() # pylint: disable=no-member
def delete(self, *args, **kwargs):
"""Recompute the related team's team_size after deleting a membership"""
super(CourseTeamMembership, self).delete(*args, **kwargs)
self.team.reset_team_size() # pylint: disable=no-member
@classmethod
def get_memberships(cls, username=None, course_ids=None, team_id=None):
"""
Get a queryset of memberships.
Args:
username (unicode, optional): The username to filter on.
course_ids (list of unicode, optional) Course IDs to filter on.
team_id (unicode, optional): The team_id to filter on.
"""
queryset = cls.objects.all()
if username is not None:
queryset = queryset.filter(user__username=username)
if course_ids is not None:
queryset = queryset.filter(team__course_id__in=course_ids)
if team_id is not None:
queryset = queryset.filter(team__team_id=team_id)
return queryset
@classmethod
def user_in_team_for_course(cls, user, course_id):
"""
Checks whether or not a user is already in a team in the given course.
Args:
user: the user that we want to query on
course_id: the course_id of the course we're interested in
Returns:
True if the user is on a team in the course already
False if not
"""
return cls.objects.filter(user=user, team__course_id=course_id).exists()
@classmethod
def update_last_activity(cls, user, discussion_topic_id):
"""Set the `last_activity_at` for both this user and their team in the
given discussion topic. No-op if the user is not a member of
the team for this discussion.
"""
try:
membership = cls.objects.get(user=user, team__discussion_topic_id=discussion_topic_id)
# If a privileged user is active in the discussion of a team
# they do not belong to, do not update their last activity
# information.
except ObjectDoesNotExist:
return
now = datetime.utcnow().replace(tzinfo=pytz.utc)
membership.last_activity_at = now
membership.team.last_activity_at = now
membership.team.save()
membership.save()
emit_team_event('edx.team.activity_updated', membership.team.course_id, {
'team_id': membership.team_id,
})
| agpl-3.0 |
OWASP/django-DefectDojo | dojo/tools/openscap/parser.py | 2 | 6339 | from xml.dom import NamespaceErr
import hashlib
from urllib.parse import urlparse
import re
from defusedxml import ElementTree as ET
from dojo.models import Endpoint, Finding
__author__ = 'dr3dd589'
class OpenscapXMLParser(object):
def __init__(self, file, test):
self.dupes = dict()
self.items = ()
if file is None:
return
tree = ET.parse(file)
# get root of tree.
root = tree.getroot()
namespace = self.get_namespace(root)
# go to test result
test_result = tree.find('./{0}TestResult'.format(namespace))
ips = []
# append all target in a list.
for ip in test_result.findall('./{0}target-address'.format(namespace)):
ips.append(ip.text)
# check if xml file hash correct root or not.
if 'Benchmark' not in root.tag:
raise NamespaceErr("This doesn't seem to be a valid Openscap vulnerability scan xml file.")
# run both rule, and rule-result in parallel so that we can get title for failed test from rule.
for rule, rule_result in zip(root.findall('./{0}Rule'.format(namespace)), test_result.findall('./{0}rule-result'.format(namespace))):
cves = []
result = rule_result.find('./{0}result'.format(namespace)).text
# find only failed report.
if "fail" in result:
# get title of Rule corrosponding rule-result.
title = rule.find('./{0}title'.format(namespace)).text
description = "**Title** : " + title + "\n\n"
mitigation = "N/A"
impact = "N/A"
for cve in rule_result.findall('./{0}ident'.format(namespace)):
cves.append(cve.text)
# if finding has only one cve then ok. otherwise insert it in description field.
if len(cves) > 1:
cve_desc = ""
for cve in cves:
cve_desc += '[{0}](https://cve.mitre.org/cgi-bin/cvename.cgi?name={0})'.format(cve) + ", "
description += "**Releted CVE's** : " + cve_desc[:-2]
else:
try:
cve = cves[0]
except:
pass
# get severity.
severity = rule_result.attrib['severity'].lower().capitalize()
check_content = rule_result.find('./{0}check/{0}check-content-ref'.format(namespace)).attrib
# get references.
references = "**name** : " + check_content['name'] + "\n" + \
"**href** : " + check_content['href'] + "\n"
dupe_key = hashlib.md5(references.encode('utf-8')).hexdigest()
if dupe_key in self.dupes:
finding = self.dupes[dupe_key]
if finding.references:
finding.references = finding.references
for ip in ips:
self.process_endpoints(finding, ip)
self.dupes[dupe_key] = finding
else:
self.dupes[dupe_key] = True
finding = Finding(title=title,
test=test,
active=False,
verified=False,
cve=cve,
description=description,
severity=severity,
numerical_severity=Finding.get_numerical_severity(
severity),
mitigation=mitigation,
impact=impact,
references=references,
dynamic_finding=True)
self.dupes[dupe_key] = finding
for ip in ips:
self.process_endpoints(finding, ip)
self.items = list(self.dupes.values())
# this function is extract namespace present in xml file.
def get_namespace(self, element):
m = re.match(r'\{.*\}', element.tag)
return m.group(0) if m else ''
# this function create endpoints with url parsing.
def process_endpoints(self, finding, host):
protocol = "http"
query = ""
fragment = ""
path = ""
url = urlparse(host)
if url:
path = url.path
if path == host:
path = ""
rhost = re.search(
r"(http|https|ftp)\://([a-zA-Z0-9\.\-]+(\:[a-zA-Z0-9\.&%\$\-]+)*@)*((25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])|localhost|([a-zA-Z0-9\-]+\.)*[a-zA-Z0-9\-]+\.(com|edu|gov|int|mil|net|org|biz|arpa|info|name|pro|aero|coop|museum|[a-zA-Z]{2}))[\:]*([0-9]+)*([/]*($|[a-zA-Z0-9\.\,\?\'\\\+&%\$#\=~_\-]+)).*?$",
host)
try:
protocol = rhost.group(1)
host = rhost.group(4)
except:
pass
try:
dupe_endpoint = Endpoint.objects.get(protocol=protocol,
host=host,
query=query,
fragment=fragment,
path=path,
)
except Endpoint.DoesNotExist:
dupe_endpoint = None
if not dupe_endpoint:
endpoint = Endpoint(protocol=protocol,
host=host,
query=query,
fragment=fragment,
path=path,
)
else:
endpoint = dupe_endpoint
if not dupe_endpoint:
endpoints = [endpoint]
else:
endpoints = [endpoint, dupe_endpoint]
finding.unsaved_endpoints = finding.unsaved_endpoints + endpoints
| bsd-3-clause |
neumerance/deploy | .venv/lib/python2.7/site-packages/nose_exclude.py | 3 | 3293 | import os
import logging
from nose.plugins import Plugin
log = logging.getLogger('nose.plugins.nose_exclude')
class NoseExclude(Plugin):
def options(self, parser, env=os.environ):
"""Define the command line options for the plugin."""
super(NoseExclude, self).options(parser, env)
env_dirs = []
if 'NOSE_EXCLUDE_DIRS' in env:
exclude_dirs = env.get('NOSE_EXCLUDE_DIRS','')
env_dirs.extend(exclude_dirs.split(';'))
parser.add_option(
"--exclude-dir", action="append",
dest="exclude_dirs",
default=env_dirs,
help="Directory to exclude from test discovery. \
Path can be relative to current working directory \
or an absolute path. May be specified multiple \
times. [NOSE_EXCLUDE_DIRS]")
parser.add_option(
"--exclude-dir-file", type="string",
dest="exclude_dir_file",
default=env.get('NOSE_EXCLUDE_DIRS_FILE', False),
help="A file containing a list of directories to exclude \
from test discovery. Paths can be relative to current \
working directory or an absolute path. \
[NOSE_EXCLUDE_DIRS_FILE]")
def _force_to_abspath(self, pathname):
if os.path.isabs(pathname):
abspath = pathname
else:
abspath = os.path.abspath(pathname)
if os.path.exists(abspath):
return abspath
else:
log.warn('The following path was not found: %s' % pathname)
def _load_from_file(self, filename):
infile = open(filename)
new_list = [l.strip() for l in infile.readlines() if l.strip()
and not l.startswith('#')]
return new_list
def configure(self, options, conf):
"""Configure plugin based on command line options"""
super(NoseExclude, self).configure(options, conf)
self.exclude_dirs = {}
# preload directories from file
if options.exclude_dir_file:
if not options.exclude_dirs:
options.exclude_dirs = []
new_dirs = self._load_from_file(options.exclude_dir_file)
options.exclude_dirs.extend(new_dirs)
if not options.exclude_dirs:
self.enabled = False
return
self.enabled = True
root = os.getcwd()
log.debug('cwd: %s' % root)
# Normalize excluded directory names for lookup
for exclude_param in options.exclude_dirs:
# when using setup.cfg, you can specify only one 'exclude-dir'
# separated by some character (new line is good enough)
for d in exclude_param.split('\n'):
d = d.strip()
abs_d = self._force_to_abspath(d)
if abs_d:
self.exclude_dirs[abs_d] = True
exclude_str = "excluding dirs: %s" % ",".join(self.exclude_dirs.keys())
log.debug(exclude_str)
def wantDirectory(self, dirname):
"""Check if directory is eligible for test discovery"""
if dirname in self.exclude_dirs:
log.debug("excluded: %s" % dirname)
return False
else:
return None
| apache-2.0 |
morrillo/l10n_ar_chart_generic | __init__.py | 2 | 1024 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 OpenERP - Team de Localización Argentina.
# https://launchpad.net/~openerp-l10n-ar-localization
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hypernicon/pyec | pyec/distribution/nn/net_gpu.py | 1 | 5196 | """
Copyright (C) 2012 Alan J Lockett
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
try:
import theano
except ImportError:
print "Could not find Theano. Falling back on CPU-based RNN instance. If you want",
print "to use a GPU, please `easy_install Theano`"
raise
else:
import theano.tensor as T
import numpy as np
TIDENTITY = 0
TLOGISTIC = 1
TTHRESHOLD = 2
TBIAS = 3
TRADIAL = 4
THYPERBOLIC = 5
def RnnEvaluator(weights):
"""Build a Theano function that computes the internal state of the network
when called.
"""
numInputs = 0
numOutputs = 0
for neurons, activator, isInput, isOutput, weightFrame in weights:
if isInput:
numInputs += 1
if isOutput:
numOutputs += 1
def evaluate_net(*states):
activations = T.fvectors(len(weights))
idx = 0
for neurons, activator, isInput, isOutput, weightFrame in weights:
sumParts = []
for i, info in enumerate(weightFrame):
srcIdx, w = info
sumParts.append(T.dot(states[srcIdx], w.transpose()))
if len(sumParts):
sumParts = T.stack(*sumParts)
activity = T.sum(sumParts, axis=0)
if activator == TIDENTITY:
activation = activity
elif activator == TLOGISTIC:
activation = 1. / (1. + T.exp(-activity))
elif activator == THYPERBOLIC:
activation = T.tanh(activity)
elif activator == TTHRESHOLD:
activation = T.sgn(activity)
elif activator == TBIAS:
activation = T.ones_like(activity, dtype='float32')
elif activator == TRADIAL:
activation = T.exp(-activity*activity/2.0)
else:
raise Exception("Unknown activation function for layer {0}" + layer.id)
else:
activation = T.zeros_like(states[idx])#states[idx]
activations[idx] = activation
idx += 1
checklist = [T.all(T.eq(a,s)) for a,s in zip(activations, states)]
condition = T.all(T.as_tensor_variable(checklist))
return activations, {}, theano.scan_module.until(condition )
def make_states(*inputs):
states = []
idx = 0
numPoints = len(inputs) and inputs[0].shape[0] or 1
for neurons, activator, isInput, isOutput, weightFrame in weights:
if isInput:
states.append(inputs[idx])
idx += 1
else:
states.append(T.ones((numPoints,neurons), dtype='float32'))
return states
def project_output(states):
outputs = []
idx = 0
for neurons, activator, isInput, isOutput, weightFrame in weights:
if isOutput:
outputs.append(states[idx])
idx += 1
return outputs
inputs = T.fmatrices(numInputs)
times = T.iscalar()
netValue, updates = theano.scan(
fn=evaluate_net,
outputs_info=make_states(*inputs),
n_steps=times
)
result = [n[-1] for n in netValue]
outputs = project_output(result)
net = theano.function(inputs + [times], outputs)
def fix_inputs(inputs, times=5):
reshape = False
if len(inputs) and (len(np.shape(inputs[0])) == 1):
reshape = True
inputs = [np.reshape(i, (1,i.shape[0])) for i in inputs]
args = list(inputs) + [times]
outputs = net(*args)
if reshape:
return [o[0] for o in outputs]
return outputs
return fix_inputs | mit |
Biles430/FPF_PIV | piv_outer.py | 1 | 3559 | import pandas as pd
from pandas import DataFrame
import numpy as np
import PIV
import h5py
import matplotlib.pyplot as plt
import hotwire as hw
################################################
# PURPOSE
# 1. Compute Integral Parameters
# 2. Outer Normalize
# 3. Plot
##################################################
#note- vel and axis are flipped to properlly calc delta
def piv_outer(date, num_tests, legend1):
#initalize variables
umean_fov = dict()
vmean_fov = dict()
umean = dict()
vmean = dict()
urms = dict()
vrms = dict()
uvprime = dict()
x = dict()
y = dict()
for j in range(0, num_tests):
#read in variables
name = 'data/PIV_' + date + '_' +str(j) + '.h5'
umean_fov[j] = np.array(pd.read_hdf(name, 'umean'))
vmean_fov[j] = np.array(pd.read_hdf(name, 'vmean'))
umean[j] = np.array(pd.read_hdf(name, 'umean_profile_avg'))
vmean[j] = np.array(pd.read_hdf(name, 'vmean_profile_avg'))
urms[j] = np.array(pd.read_hdf(name, 'urms_profile_avg'))
vrms[j] = np.array(pd.read_hdf(name, 'vrms_profile_avg'))
uvprime[j] = np.array(pd.read_hdf(name, 'uvprime_profile_avg'))
x[j] = np.array(pd.read_hdf(name, 'xaxis'))
y[j] = np.array(pd.read_hdf(name, 'yaxis'))
###2. Outer Normalize #############
###################################
###3. PLOTS ######################
###################################
marker_u = ['-xr', '-or','-sr']
marker_v = ['-xb', '-ob','-sb']
#mean profiles
#U vs y
plt.figure()
for j in range(0, num_tests):
plt.plot(y[j], umean[j], marker_u[j])
plt.ylabel('U (m/sec)', fontsize=14)
plt.xlabel('Wall Normal Position (m)', fontsize=14)
plt.legend(legend1, loc=0)
plt.show()
#V vs y
plt.figure()
for j in range(0, num_tests):
plt.plot(y[j], vmean[j], marker_v[j])
plt.ylabel('V (m/sec)', fontsize=14)
plt.xlabel('Wall Normal Position (m)', fontsize=14)
plt.legend(legend1, loc=0)
plt.show()
#urms vs y
plt.figure()
for j in range(0, num_tests):
plt.plot(y[j], urms[j], marker_u[j])
plt.ylabel('$U_{rms}$ (m/sec)', fontsize=20)
plt.xlabel('Wall Normal Position (m)', fontsize=14)
plt.legend(legend1, loc=0)
plt.show()
#vrms vs y
plt.figure()
for j in range(0, num_tests):
plt.plot(y[j], vrms[j], marker_v[j])
plt.ylabel('$V_{rms}$ (m/sec)', fontsize=20)
plt.xlabel('Wall Normal Position (m)', fontsize=14)
plt.legend(legend1, loc=0)
plt.show()
#uprime vs y
plt.figure()
for j in range(0, num_tests):
plt.plot(y[j], uvprime[j], marker_u[j])
plt.ylabel('$u^,v^,$', fontsize=20)
plt.xlabel('Wall Normal Position (m)', fontsize=14)
plt.legend(legend1, loc=0)
plt.show()
### Mean Vecotr plot
skip_num = 5
umean_fov2 = umean_fov[0]
vmean_fov2 = vmean_fov[0]
x2 = x[0]
umean_fov2 = umean_fov2[:, 0:-1:skip_num]
vmean_fov2 = vmean_fov2[:, 0:-1:skip_num]
x2 = x2[0:-1:skip_num]
y2 = y[0]
Y = np.tile(y2, (len(x2), 1))
Y = np.transpose(Y)
X = np.tile(x2-.0543, (len(y2), 1))
mean_fov2 = (umean_fov2**2 + vmean_fov2**2)**(1/2)
contour_levels = np.arange(0, 5, .05)
plt.figure()
c = plt.contourf(X, Y, mean_fov2, levels = contour_levels, linewidth=40, alpha=.6)
cbar = plt.colorbar(c)
cbar.ax.set_ylabel('Velocity (m/sec)')
plt.hold(True)
q = plt.quiver(X, Y, umean_fov2, vmean_fov2, angles='xy', scale=50, width=.0025)
p = plt.quiverkey(q, .11, -.025, 4,"4 m/s",coordinates='data',color='r')
plt.axis([0, .1, 0, .2])
plt.ylabel('Wall Normal Position, $y/\delta$', fontsize=18)
plt.xlabel('Streamwise Position, x (m)', fontsize=14)
plt.title('Mean PIV Vector Field', fontsize=14)
plt.show()
print('Done!')
return
| mit |
abenzbiria/clients_odoo | addons/l10n_hr/__init__.py | 432 | 1164 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Module: l10n_hr
# Author: Goran Kliska
# mail: goran.kliska(AT)slobodni-programi.hr
# Copyright (C) 2011- Slobodni programi d.o.o., Zagreb
# Contributions:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
skirsdeda/django | django/contrib/sitemaps/__init__.py | 26 | 6010 | from django.apps import apps as django_apps
from django.conf import settings
from django.core import urlresolvers, paginator
from django.core.exceptions import ImproperlyConfigured
from django.utils import translation
from django.utils.six.moves.urllib.parse import urlencode
from django.utils.six.moves.urllib.request import urlopen
PING_URL = "http://www.google.com/webmasters/tools/ping"
class SitemapNotFound(Exception):
pass
def ping_google(sitemap_url=None, ping_url=PING_URL):
"""
Alerts Google that the sitemap for the current site has been updated.
If sitemap_url is provided, it should be an absolute path to the sitemap
for this site -- e.g., '/sitemap.xml'. If sitemap_url is not provided, this
function will attempt to deduce it by using urlresolvers.reverse().
"""
if sitemap_url is None:
try:
# First, try to get the "index" sitemap URL.
sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.index')
except urlresolvers.NoReverseMatch:
try:
# Next, try for the "global" sitemap URL.
sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.sitemap')
except urlresolvers.NoReverseMatch:
pass
if sitemap_url is None:
raise SitemapNotFound("You didn't provide a sitemap_url, and the sitemap URL couldn't be auto-detected.")
if not django_apps.is_installed('django.contrib.sites'):
raise ImproperlyConfigured("ping_google requires django.contrib.sites, which isn't installed.")
Site = django_apps.get_model('sites.Site')
current_site = Site.objects.get_current()
url = "http://%s%s" % (current_site.domain, sitemap_url)
params = urlencode({'sitemap': url})
urlopen("%s?%s" % (ping_url, params))
class Sitemap(object):
# This limit is defined by Google. See the index documentation at
# http://sitemaps.org/protocol.php#index.
limit = 50000
# If protocol is None, the URLs in the sitemap will use the protocol
# with which the sitemap was requested.
protocol = None
def __get(self, name, obj, default=None):
try:
attr = getattr(self, name)
except AttributeError:
return default
if callable(attr):
return attr(obj)
return attr
def items(self):
return []
def location(self, obj):
return obj.get_absolute_url()
def _get_paginator(self):
return paginator.Paginator(self.items(), self.limit)
paginator = property(_get_paginator)
def get_urls(self, page=1, site=None, protocol=None):
# Determine protocol
if self.protocol is not None:
protocol = self.protocol
if protocol is None:
protocol = 'http'
# Determine domain
if site is None:
if django_apps.is_installed('django.contrib.sites'):
Site = django_apps.get_model('sites.Site')
try:
site = Site.objects.get_current()
except Site.DoesNotExist:
pass
if site is None:
raise ImproperlyConfigured(
"To use sitemaps, either enable the sites framework or pass "
"a Site/RequestSite object in your view."
)
domain = site.domain
if getattr(self, 'i18n', False):
urls = []
current_lang_code = translation.get_language()
for lang_code, lang_name in settings.LANGUAGES:
translation.activate(lang_code)
urls += self._urls(page, protocol, domain)
translation.activate(current_lang_code)
else:
urls = self._urls(page, protocol, domain)
return urls
def _urls(self, page, protocol, domain):
urls = []
latest_lastmod = None
all_items_lastmod = True # track if all items have a lastmod
for item in self.paginator.page(page).object_list:
loc = "%s://%s%s" % (protocol, domain, self.__get('location', item))
priority = self.__get('priority', item, None)
lastmod = self.__get('lastmod', item, None)
if all_items_lastmod:
all_items_lastmod = lastmod is not None
if (all_items_lastmod and
(latest_lastmod is None or lastmod > latest_lastmod)):
latest_lastmod = lastmod
url_info = {
'item': item,
'location': loc,
'lastmod': lastmod,
'changefreq': self.__get('changefreq', item, None),
'priority': str(priority if priority is not None else ''),
}
urls.append(url_info)
if all_items_lastmod and latest_lastmod:
self.latest_lastmod = latest_lastmod
return urls
class FlatPageSitemap(Sitemap):
def items(self):
if not django_apps.is_installed('django.contrib.sites'):
raise ImproperlyConfigured("FlatPageSitemap requires django.contrib.sites, which isn't installed.")
Site = django_apps.get_model('sites.Site')
current_site = Site.objects.get_current()
return current_site.flatpage_set.filter(registration_required=False)
class GenericSitemap(Sitemap):
priority = None
changefreq = None
def __init__(self, info_dict, priority=None, changefreq=None):
self.queryset = info_dict['queryset']
self.date_field = info_dict.get('date_field', None)
self.priority = priority
self.changefreq = changefreq
def items(self):
# Make sure to return a clone; we don't want premature evaluation.
return self.queryset.filter()
def lastmod(self, item):
if self.date_field is not None:
return getattr(item, self.date_field)
return None
default_app_config = 'django.contrib.sitemaps.apps.SiteMapsConfig'
| bsd-3-clause |
sbidoul/buildbot | worker/buildbot_worker/null.py | 15 | 1618 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet import defer
from buildbot_worker.base import WorkerBase
class LocalWorker(WorkerBase):
@defer.inlineCallbacks
def startService(self):
# importing here to avoid dependency on buildbot master package
# requires buildot version >= 0.9.0b5
from buildbot.worker.protocols.null import Connection
yield WorkerBase.startService(self)
self.workername = self.name
conn = Connection(self.parent, self)
# I don't have a master property, but my parent has.
master = self.parent.master
res = yield master.workers.newConnection(conn, self.name)
if res:
yield self.parent.attached(conn)
@defer.inlineCallbacks
def stopService(self):
yield self.parent.detached()
yield WorkerBase.stopService(self)
| gpl-2.0 |
darktears/chromium-crosswalk | tools/telemetry/telemetry/testing/disabled_cases.py | 17 | 1242 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry import decorators
# These are not real unittests.
# They are merely to test our Enable/Disable annotations.
class DisabledCases(unittest.TestCase):
def testAllEnabled(self):
pass
@decorators.Disabled('all')
def testAllDisabled(self):
pass
@decorators.Enabled('mavericks')
def testMavericksOnly(self):
pass
@decorators.Disabled('mavericks')
def testNoMavericks(self):
pass
@decorators.Enabled('mac')
def testMacOnly(self):
pass
@decorators.Disabled('mac')
def testNoMac(self):
pass
@decorators.Enabled('chromeos')
def testChromeOSOnly(self):
pass
@decorators.Disabled('chromeos')
def testNoChromeOS(self):
pass
@decorators.Enabled('win', 'linux')
def testWinOrLinuxOnly(self):
pass
@decorators.Disabled('win', 'linux')
def testNoWinLinux(self):
pass
@decorators.Enabled('system')
def testSystemOnly(self):
pass
@decorators.Disabled('system')
def testNoSystem(self):
pass
@decorators.Enabled('has tabs')
def testHasTabs(self):
pass
| bsd-3-clause |
tracierenea/gnuradio | gr-trellis/examples/python/test_cpm.py | 24 | 5571 | #!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: CPM test
# Author: Achilleas Anastasopoulos
# Description: gnuradio flow graph
# Generated: Thu Feb 19 23:16:23 2009
##################################################
from gnuradio import gr
from gnuradio import trellis, digital, filter, blocks
from grc_gnuradio import blks2 as grc_blks2
import math
import numpy
from gnuradio import trellis
from gnuradio.trellis import fsm_utils
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy.stats
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
def run_test(seed,blocksize):
tb = gr.top_block()
##################################################
# Variables
##################################################
M = 2
K = 1
P = 2
h = (1.0*K)/P
L = 3
Q = 4
frac = 0.99
f = trellis.fsm(P,M,L)
# CPFSK signals
#p = numpy.ones(L*Q)
#p = p/sum(p)*Q/2.0;
#q = numpy.cumsum(p)
#q = q/q[-1]/2.0;
# GMSK signals
BT=0.3;
tt=numpy.arange(0,L*Q)/(1.0*Q)-L/2.0;
#print tt
p=(0.5*scipy.special.erfc(2*math.pi*BT*(tt-0.5)/math.sqrt(math.log(2.0))/math.sqrt(2.0))-0.5*scipy.special.erfc(2*math.pi*BT*(tt+0.5)/math.sqrt(math.log(2.0))/math.sqrt(2.0)))/2.0;
p=p/sum(p)*Q/2.0;
#print p
q=numpy.cumsum(p);
q=q/q[-1]/2.0;
#print q
(f0T,SS,S,F,Sf,Ff,N) = fsm_utils.make_cpm_signals(K,P,M,L,q,frac)
#print N
#print Ff
Ffa = numpy.insert(Ff,Q,numpy.zeros(N),axis=0)
#print Ffa
MF = numpy.fliplr(numpy.transpose(Ffa))
#print MF
E = numpy.sum(numpy.abs(Sf)**2,axis=0)
Es = numpy.sum(E)/f.O()
#print Es
constellation = numpy.reshape(numpy.transpose(Sf),N*f.O())
#print Ff
#print Sf
#print constellation
#print numpy.max(numpy.abs(SS - numpy.dot(Ff , Sf)))
EsN0_db = 10.0
N0 = Es * 10.0**(-(1.0*EsN0_db)/10.0)
#N0 = 0.0
#print N0
head = 4
tail = 4
numpy.random.seed(seed*666)
data = numpy.random.randint(0, M, head+blocksize+tail+1)
#data = numpy.zeros(blocksize+1+head+tail,'int')
for i in range(head):
data[i]=0
for i in range(tail+1):
data[-i]=0
##################################################
# Blocks
##################################################
random_source_x_0 = blocks.vector_source_b(data.tolist(), False)
digital_chunks_to_symbols_xx_0 = digital.chunks_to_symbols_bf((-1, 1), 1)
filter_interp_fir_filter_xxx_0 = filter.interp_fir_filter_fff(Q, p)
analog_frequency_modulator_fc_0 = analog.frequency_modulator_fc(2*math.pi*h*(1.0/Q))
blocks_add_vxx_0 = blocks.add_vcc(1)
analog_noise_source_x_0 = analog.noise_source_c(analog.GR_GAUSSIAN, (N0/2.0)**0.5, -long(seed))
blocks_multiply_vxx_0 = blocks.multiply_vcc(1)
analog_sig_source_x_0 = analog.sig_source_c(Q, analog.GR_COS_WAVE, -f0T, 1, 0)
# only works for N=2, do it manually for N>2...
filter_fir_filter_xxx_0_0 = filter.fir_filter_ccc(Q, MF[0].conjugate())
filter_fir_filter_xxx_0_0_0 = filter.fir_filter_ccc(Q, MF[1].conjugate())
blocks_streams_to_stream_0 = blocks.streams_to_stream(gr.sizeof_gr_complex*1, int(N))
blocks_skiphead_0 = blocks.skiphead(gr.sizeof_gr_complex*1, int(N*(1+0)))
viterbi = trellis.viterbi_combined_cb(f, head+blocksize+tail, 0, -1, int(N),
constellation, digital.TRELLIS_EUCLIDEAN)
blocks_vector_sink_x_0 = blocks.vector_sink_b()
##################################################
# Connections
##################################################
tb.connect((random_source_x_0, 0), (digital_chunks_to_symbols_xx_0, 0))
tb.connect((digital_chunks_to_symbols_xx_0, 0), (filter_interp_fir_filter_xxx_0, 0))
tb.connect((filter_interp_fir_filter_xxx_0, 0), (analog_frequency_modulator_fc_0, 0))
tb.connect((analog_frequency_modulator_fc_0, 0), (blocks_add_vxx_0, 0))
tb.connect((analog_noise_source_x_0, 0), (blocks_add_vxx_0, 1))
tb.connect((blocks_add_vxx_0, 0), (blocks_multiply_vxx_0, 0))
tb.connect((analog_sig_source_x_0, 0), (blocks_multiply_vxx_0, 1))
tb.connect((blocks_multiply_vxx_0, 0), (filter_fir_filter_xxx_0_0, 0))
tb.connect((blocks_multiply_vxx_0, 0), (filter_fir_filter_xxx_0_0_0, 0))
tb.connect((filter_fir_filter_xxx_0_0, 0), (blocks_streams_to_stream_0, 0))
tb.connect((filter_fir_filter_xxx_0_0_0, 0), (blocks_streams_to_stream_0, 1))
tb.connect((blocks_streams_to_stream_0, 0), (blocks_skiphead_0, 0))
tb.connect((blocks_skiphead_0, 0), (viterbi, 0))
tb.connect((viterbi, 0), (blocks_vector_sink_x_0, 0))
tb.run()
dataest = blocks_vector_sink_x_0.data()
#print data
#print numpy.array(dataest)
perr = 0
err = 0
for i in range(blocksize):
if data[head+i] != dataest[head+i]:
#print i
err += 1
if err != 0 :
perr = 1
return (err,perr)
if __name__ == '__main__':
blocksize = 1000
ss=0
ee=0
for i in range(10000):
(s,e) = run_test(i,blocksize)
ss += s
ee += e
if (i+1) % 100 == 0:
print i+1,ss,ee,(1.0*ss)/(i+1)/(1.0*blocksize),(1.0*ee)/(i+1)
print i+1,ss,ee,(1.0*ss)/(i+1)/(1.0*blocksize),(1.0*ee)/(i+1)
| gpl-3.0 |
tfmorris/cc-mrjob | tag_counter.py | 4 | 1852 | import re
#
from collections import Counter
#
from mrcc import CCJob
def get_tag_count(data, ctr=None):
"""Extract the names and total usage count of all the opening HTML tags in the document"""
if ctr is None:
ctr = Counter()
# Convert the document to lower case as HTML tags are case insensitive
ctr.update(HTML_TAG_PATTERN.findall(data.lower()))
return ctr
# Optimization: compile the regular expression once so it's not done each time
# The regular expression looks for (1) a tag name using letters (assumes lowercased input) and numbers
# and (2) allows an optional for a space and then extra parameters, eventually ended by a closing >
HTML_TAG_PATTERN = re.compile('<([a-z0-9]+)[^>]*>')
# Let's check to make sure the tag counter works as expected
assert get_tag_count('<html><a href="..."></a><h1 /><br/><p><p></p></p>') == {'html': 1, 'a': 1, 'p': 2, 'h1': 1, 'br': 1}
class TagCounter(CCJob):
def process_record(self, record):
# WARC records have three different types:
# ["application/warc-fields", "application/http; msgtype=request", "application/http; msgtype=response"]
# We're only interested in the HTTP responses
if record['Content-Type'] == 'application/http; msgtype=response':
payload = record.payload.read()
# The HTTP response is defined by a specification: first part is headers (metadata)
# and then following two CRLFs (newlines) has the data for the response
headers, body = payload.split('\r\n\r\n', 1)
if 'Content-Type: text/html' in headers:
# We avoid creating a new Counter for each page as that's actually quite slow
tag_count = get_tag_count(body)
for tag, count in tag_count.items():
yield tag, count
self.increment_counter('commoncrawl', 'processed_pages', 1)
if __name__ == '__main__':
TagCounter.run()
| mit |
JamesMura/sentry | src/sentry/filters/web_crawlers.py | 3 | 1518 | from __future__ import absolute_import
import re
from .base import Filter
# not all of these agents are guaranteed to execute JavaScript, but to avoid
# overhead of identifying which ones do, and which ones will over time we simply
# target all of the major ones
CRAWLERS = re.compile(r'|'.join((
# various Google services
r'AdsBot',
# Google Adsense
r'Mediapartners',
# Google+ and Google web search
r'Google',
# Bing search
r'BingBot',
# Baidu search
r'Baiduspider',
# Yahoo
r'Slurp',
# Sogou
r'Sogou',
# facebook
r'facebook',
# Alexa
r'ia_archiver',
# Generic bot
r'bot[\/\s\)\;]',
# Generic spider
r'spider[\/\s\)\;]',
)), re.I)
class WebCrawlersFilter(Filter):
id = 'web-crawlers'
name = 'Filter out known web crawlers'
description = 'Some crawlers may execute pages in incompatible ways which then cause errors that are unlikely to be seen by a normal user.'
default = True
def get_user_agent(self, data):
try:
for key, value in data['sentry.interfaces.Http']['headers']:
if key.lower() == 'user-agent':
return value
except LookupError:
return ''
def test(self, data):
# TODO(dcramer): we could also look at UA parser and use the 'Spider'
# device type
user_agent = self.get_user_agent(data)
if not user_agent:
return False
return bool(CRAWLERS.search(user_agent))
| bsd-3-clause |
loicdubois/pdm_crazyflie | catkin_old/build_isolated/crazyflie/catkin_generated/installspace/_setup_util.py | 5 | 12465 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/home/loic_dubois/Documents/pdm_crazyflie/catkin/devel;/opt/ros/kinetic'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| mit |
likr/causalnet | bin/json_to_neo4j.py | 1 | 2689 | import csv
import json
import os.path
import argparse
variables_header = [
'name:ID(Phenotype)',
'description',
'timeGroup',
'timeGroupDetail',
'timeOrder:INT',
'unit',
'type',
'cells:STRING[]',
'data:DOUBLE[]',
]
edges_header = [
':START_ID(Phenotype)',
':END_ID(Phenotype)',
'value:DOUBLE',
]
texts_header = sorted([
'id:ID(Text)',
'cell_P0:INT',
'word_sphericity:INT',
'word_position:INT',
'cell_E:INT',
'cell_ABal:INT',
'count_word:INT',
'count_cell:INT',
'word_axis:INT',
'arg1',
'cell_ABpr:INT',
'cell_C:INT',
'arg2',
'cell_ABpl:INT',
'line:INT',
'cell_P3:INT',
'word_period:INT',
'word_distance:INT',
'text',
'cell_ABa:INT',
'cell_EMS:INT',
'cell_AB:INT',
'cell_MS:INT',
'type',
'word_volume:INT',
'authors',
'doc_line',
'arg3',
'cell_P1:INT',
'cell_P2:INT',
'published',
'word_angle:INT',
'doc_id',
'title',
'cell_ABar:INT',
'cell_ABp:INT',
'verb',
])
def opencsv(outpath, filename):
path = os.path.abspath(outpath) + '/' + filename
return csv.writer(open(path, 'w'))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', dest='infile', required=True)
parser.add_argument('-d', dest='dest', default='.')
args = parser.parse_args()
data = json.load(open(args.infile))
writer = opencsv(args.dest, 'variables.csv')
writer.writerow(variables_header)
for node in data['vertices']:
writer.writerow([
node['d']['name'],
node['d']['description'],
node['d']['layer'].split('|')[0].strip(),
node['d']['layer'],
node['d']['layerOrder'],
node['d']['unit'],
node['d']['variableType'],
';'.join(node['d']['cells']),
';'.join(map(str, node['d']['data'])),
])
indices = {}
for node in data['vertices']:
indices[node['u']] = node['d']['name']
edge_types = [
('r', 'correlation.csv'),
('l', 'lasso.csv'),
('bl', 'baysian-lasso.csv'),
]
for key, filename in edge_types:
writer = opencsv(args.dest, filename)
writer.writerow(edges_header)
for edge in data['edges']:
writer.writerow([
indices[edge['u']],
indices[edge['v']],
edge['d'][key],
])
writer = opencsv(args.dest, 'texts.csv')
writer.writerow(texts_header)
for text in data['texts']:
writer.writerow([text[k.split(':')[0]] for k in texts_header])
if __name__ == '__main__':
main()
| mit |
upibhalla/moose-core | python/moose/chemMerge/merge.py | 4 | 35192 | # -*- coding: utf-8 -*-
#*******************************************************************
# * File: merge.py
# * Description:
# * Author: HarshaRani
# * E-mail: hrani@ncbs.res.in
# ********************************************************************/
# **********************************************************************
#** This program is part of 'MOOSE', the
#** Messaging Object Oriented Simulation Environment,
#** also known as GENESIS 3 base code.
#** copyright (C) 2003-2017 Upinder S. Bhalla. and NCBS
#Created : Friday Dec 16 23:19:00 2016(+0530)
#Version
#Last-Updated: Wed May 21 11:52:33 2018(+0530)
# By: Harsha
#**********************************************************************/
# This program is used to merge chem models from src to destination
#Rules are :
# -- If Compartment from the src model doesn't exist in destination model,
# then entire compartment and its children are copied over including groups
# -- Models are mergered at group level (if exists)
# (Group is Neutral object in moose, which may represent pathway in network model)
# -- Pool's are copied from source to destination if it doesn't exist, if exist nothing is done
# -- Reaction (Reac), Enzyme (Enz) are copied
# --- if any danglling Reac or Enz exist then that is not copied
#
# --- if Reac Name's is different for a given path (group level)
# then copy the entire Reac along with substrate/product
# --- if same Reac Name and same sub and prd then nothing is copied
# --- if same Reac Name but sub or prd is different then duplicated and copied
#
# --- if Enz Name's is different for a given parent pool path
# then copy the entire Enz along with substrate/product
# --- if same Enz Name and same sub and prd then nothing is copied
# --- if same Enz Name but sub or prd is different then duplicated and copied
# -- Function are copied only if destination pool to which its suppose to connect doesn't exist with function of its own
# which is a limitation in moose that no function can be connected to same pool
#
'''
Change log
May 21 : Instead of A and B changed to S and D (source and destination),
- at Pool level Neutral and its objected where copied, but again at Enz and Reac level copying was done which caused duplicates which is taken care
- If Neutral object name is changed for destination, then change made to source file which would be easy to copy else path issue will come
Oct 25 : line to load SBML file was commented, which is uncommented now and also while copying MMenz had a problem which also cleaned up
Oct 14 : absolute import with mtypes just for python3
Oct 12 : clean way of cheking the type of path provided, filepath,moose obj, moose path are taken,
if source is empty then nothing to copy,
if destination was empty list is update with new object
Oct 11 : missing group are copied instead of creating one in new path which also copies Annotator info
earlier if user asked to save the model, it was saving default to kkit format, now user need to run the command to save (if this is run in command)
To check: When Gui is allowed to merge 2 models, need to see what happens
'''
import sys
import os
#from . import _moose as moose
import moose
from moose.chemMerge import mtypes
from moose.chemUtil.chemConnectUtil import *
from moose.chemUtil.graphUtils import *
#from moose.genesis import mooseWriteKkit
def checkFile_Obj_str(file_Obj_str):
model = moose.element('/')
loaded = False
found = False
if isinstance(file_Obj_str, str):
if os.path.isfile(file_Obj_str) == True:
model,loaded = loadModels(file_Obj_str)
found = True
elif file_Obj_str.find('/') != -1 :
if not isinstance(file_Obj_str,moose.Neutral):
if moose.exists(file_Obj_str):
model = file_Obj_str
loaded = True
found = True
elif isinstance(file_Obj_str, moose.Neutral):
if moose.exists(file_Obj_str.path):
model = file_Obj_str.path
loaded = True
found = True
elif isinstance(file_Obj_str, moose.Neutral):
if moose.exists(file_Obj_str.path):
model = file_Obj_str.path
loaded = True
found = True
if not found:
print ("%s path or filename doesnot exist. " % (file_Obj_str))
return model,loaded
def mergeChemModel(src,des):
""" Merges two model or the path """
A = src
B = des
sfile = src
dfile = des
loadedA = False
loadedB = False
modelA = moose.element('/')
modelB = moose.element('/')
modelA,loadedA = checkFile_Obj_str(A)
modelB,loadedB = checkFile_Obj_str(B)
if loadedA and loadedB:
## yet deleteSolver is called to make sure all the moose object are off from solver
deleteSolver(modelA)
deleteSolver(modelB)
global poolListina
poolListina = {}
grpNotcopiedyet = []
dictComptA = dict( [ (i.name,i) for i in moose.wildcardFind(modelA+'/##[ISA=ChemCompt]') ] )
dictComptB = dict( [ (i.name,i) for i in moose.wildcardFind(modelB+'/##[ISA=ChemCompt]') ] )
poolNotcopiedyet = []
if len(dictComptA):
for key in list(dictComptA.keys()):
if key not in dictComptB:
# if compartment name from modelB does not exist in modelA, then copy
copy = moose.copy(dictComptA[key],moose.element(modelB))
dictComptB[key]=moose.element(copy)
else:
#if compartment name from modelB exist in modelA,
#volume is not same, then change volume of ModelB same as ModelA
if abs(dictComptB[key].volume - dictComptA[key].volume):
#hack for now
while (abs(dictComptB[key].volume - dictComptA[key].volume) != 0.0):
dictComptA[key].volume = float(dictComptB[key].volume)
dictComptB = dict( [ (i.name,i) for i in moose.wildcardFind(modelB+'/##[ISA=ChemCompt]') ] )
#Mergering pool
poolMerge(dictComptA[key],dictComptB[key],poolNotcopiedyet)
comptBdict = comptList(modelB)
poolListinb = {}
poolListinb = updatePoolList(comptBdict)
R_Duplicated, R_Notcopiedyet,R_Dangling = [], [], []
E_Duplicated, E_Notcopiedyet,E_Dangling = [], [], []
for key in list(dictComptA.keys()):
funcExist, funcNotallowed = [], []
funcExist,funcNotallowed = functionMerge(dictComptB,dictComptA,key)
poolListinb = updatePoolList(dictComptB)
R_Duplicated,R_Notcopiedyet,R_Dangling = reacMerge(dictComptA,dictComptB,key,poolListinb)
poolListinb = updatePoolList(dictComptB)
E_Duplicated,E_Notcopiedyet,E_Dangling = enzymeMerge(dictComptB,dictComptA,key,poolListinb)
# if isinstance(src, str):
# if os.path.isfile(src) == True:
# spath, sfile = os.path.split(src)
# else:
# sfile = src
# else:
# sfile = src
# if isinstance(des, str):
# print " A str",des
# if os.path.isfile(des) == True:
# dpath, dfile = os.path.split(des)
# else:
# dfile = des
# else:
# dfile = des
print("\nThe content of %s (src) model is merged to %s (des)." %(sfile, dfile))
# Here any error or warning during Merge is written it down
if funcExist:
print( "\nIn model \"%s\" pool already has connection from a function, these function from model \"%s\" is not allowed to connect to same pool,\n since no two function are allowed to connect to same pool:"%(dfile, sfile))
for fl in list(funcExist):
print("\t [Pool]: %s [Function]: %s \n" %(str(fl.parent.name), str(fl.path)))
if funcNotallowed:
print( "\nThese functions is not to copied, since pool connected to function input are from different compartment:")
for fl in list(funcNotallowed):
print("\t [Pool]: %s [Function]: %s \n" %(str(fl.parent.name), str(fl.path)))
if R_Duplicated or E_Duplicated:
print ("These Reaction / Enzyme are \"Duplicated\" into destination file \"%s\", due to "
"\n 1. If substrate / product name's are different for a give reaction/Enzyme name "
"\n 2. If product belongs to different compartment "
"\n Models have to decide to keep or delete these reaction/enzyme in %s" %(dfile, dfile))
if E_Duplicated:
print("Reaction: ")
for rd in list(R_Duplicated):
print ("%s " %str(rd.name))
if E_Duplicated:
print ("Enzyme:")
for ed in list(E_Duplicated):
print ("%s " %str(ed.name))
if R_Notcopiedyet or E_Notcopiedyet:
print ("\nThese Reaction/Enzyme in model are not dagging but while copying the associated substrate or product is missing")
if R_Notcopiedyet:
print("Reaction: ")
for rd in list(R_Notcopiedyet):
print ("%s " %str(rd.name))
if E_Notcopiedyet:
print ("Enzyme:")
for ed in list(E_Notcopiedyet):
print ("%s " %str(ed.name))
if R_Dangling or E_Dangling:
print ("\n Dangling reaction/enzyme are not allowed in moose, these are not merged to %s from %s" %(dfile, sfile))
if R_Dangling:
print("Reaction: ")
for rd in list(R_Dangling):
print ("%s " %str(rd.name))
if E_Dangling:
print ("Enzyme:")
for ed in list(E_Dangling):
print ("%s " %str(ed.name))
## Model is saved
print ("\n ")
print ('\nMerged model is available under moose.element(\'%s\')' %(modelB))
print (' From the python terminal itself \n to save the model in to genesis format use \n >moose.mooseWriteKkit(\'%s\',\'filename.g\')' %(modelB))
print (' to save into SBML format \n >moose.mooseWriteSBML(\'%s\',\'filename.xml\')' %(modelB))
return modelB
# savemodel = raw_input("Do you want to save the model? \"YES\" \"NO\" ")
# if savemodel.lower() == 'yes' or savemodel.lower() == 'y':
# mergeto = raw_input("Enter File name ")
# if mergeto and mergeto.strip():
# filenameto = 'merge.g'
# else:
# if str(mergeto).rfind('.') != -1:
# mergeto = mergeto[:str(mergeto).rfind('.')]
# if str(mergeto).rfind('/'):
# mergeto = mergeto+'merge'
# filenameto = mergeto+'.g'
# error,written = moose.mooseWriteKkit(modelB, filenameto)
# if written == False:
# print('Could not save the Model, check the files')
# else:
# if error == "":
# print(" \n The merged model is saved into \'%s\' " %(filenameto))
# else:
# print('Model is saved but these are not written\n %s' %(error))
# else:
# print ('\nMerged model is available under moose.element(\'%s\')' %(modelB))
# print (' If you are in python terminal you could save \n >moose.mooseWriteKkit(\'%s\',\'filename.g\')' %(modelB))
# print (' If you are in python terminal you could save \n >moose.mooseWriteSBML(\'%s\',\'filename.g\')' %(modelB))
#return modelB
else:
print ('\nSource file has no objects to copy(\'%s\')' %(modelA))
return moose.element('/')
def functionMerge(comptA,comptB,key):
funcNotallowed, funcExist = [], []
comptApath = moose.element(comptA[key]).path
comptBpath = moose.element(comptB[key]).path
objA = moose.element(comptApath).parent.name
objB = moose.element(comptBpath).parent.name
#This will give us all the function which exist in modelB
funcListinb = moose.wildcardFind(comptBpath+'/##[ISA=Function]')
for fb in funcListinb:
#This will give us all the pools that its connected to, for this function
fvalueOut = moose.element(fb).neighbors['valueOut']
for poolinB in fvalueOut:
poolinBpath = poolinB.path
poolinA = poolinBpath.replace(objB,objA)
connectionexist = []
if moose.exists(poolinA):
#This is give us if pool which is to be connected already exist any connection
connectionexist = moose.element(poolinA).neighbors['setN']+moose.element(poolinA).neighbors['setConc']+ moose.element(poolinA).neighbors['increment']
if len(connectionexist) == 0:
#This pool in model A doesnot exist with any function
inputs = moose.element(fb.path+'/x').neighbors['input']
volumes = []
for ins in inputs:
volumes.append((findCompartment(moose.element(ins))).volume)
if len(set(volumes)) == 1:
# If all the input connected belongs to one compartment then copy
createFunction(fb,poolinA,objB,objA)
else:
# moose doesn't allow function's input to come from different compartment
funcNotallowed.append(fb)
else:
#Pool in model 'A' already exist function "
funcExist.append(fb)
else:
print(" Path in model A doesn't exists %s" %(poolinA))
return funcExist,funcNotallowed
def createFunction(fb,setpool,objB,objA):
fapath1 = fb.path.replace(objB,objA)
fapath = fapath1.replace('[0]','')
if not moose.exists(fapath):
# if fb.parent.className in ['CubeMesh','CyclMesh']:
# des = moose.Function('/'+objA+'/'+fb.parent.name+'/'+fb.name)
# elif fb.parent.className in ['Pool','ZombiePool','BufPool','ZombieBufPool']:
# for akey in list(poolListina[findCompartment(fb).name]):
# if fb.parent.name == akey.name:
# des = moose.Function(akey.path+'/'+fb.name)
des = moose.Function(fapath)
else:
des = moose.element(fapath)
inputB = moose.element(fb.path+'/x').neighbors["input"]
moose.connect(des, 'valueOut', moose.element(setpool),'setN' )
inputA = []
inputA = moose.element(fapath+'/x').neighbors["input"]
if not inputA:
for src in inputB:
pool = ((src.path).replace(objB,objA)).replace('[0]','')
numVariables = des.numVars
expr = ""
expr = (des.expr+'+'+'x'+str(numVariables))
expr = expr.lstrip("0 +")
expr = expr.replace(" ","")
des.expr = expr
moose.connect( pool, 'nOut', des.x[numVariables], 'input' )
def comptList(modelpath):
comptdict = {}
for ca in moose.wildcardFind(modelpath+'/##[ISA=ChemCompt]'):
comptdict[ca.name] = ca
return comptdict
def loadModels(filepath):
""" load models into moose if file, if moosepath itself it passes back the path and
delete solver if exist """
modelpath = '/'
loaded = False
if os.path.isfile(filepath) :
fpath, filename = os.path.split(filepath)
# print " head and tail ",head, " ",tail
# modelpath = filename[filename.rfind('/'): filename.rfind('.')]
# print "modelpath ",modelpath
# ext = os.path.splitext(filename)[1]
# filename = filename.strip()
modelpath = '/'+filename[:filename.rfind('.')]
modeltype = mtypes.getType(filepath)
subtype = mtypes.getSubtype(filepath, modeltype)
if subtype == 'kkit' or modeltype == "cspace":
if moose.exists(modelpath):
moose.delete(modelpath)
moose.loadModel(filepath,modelpath)
loaded = True
elif subtype == 'sbml':
if moose.exists(modelpath):
moose.delete(modelpath)
moose.mooseReadSBML(filepath,modelpath)
loaded = True
else:
print("This file is not supported for mergering")
modelpath = moose.Shell('/')
elif moose.exists(filepath):
modelpath = filepath
loaded = True
return modelpath,loaded
def deleteSolver(modelRoot):
compts = moose.wildcardFind(modelRoot+'/##[ISA=ChemCompt]')
for compt in compts:
if moose.exists(compt.path+'/stoich'):
st = moose.element(compt.path+'/stoich')
st_ksolve = st.ksolve
moose.delete(st)
if moose.exists((st_ksolve).path):
moose.delete(st_ksolve)
def poolMerge(comptS,comptD,poolNotcopiedyet):
#Here from source file all the groups are check if exist, if doesn't exist then create those groups
#Then for that group pools are copied
SCmptGrp = moose.wildcardFind(comptS.path+'/#[TYPE=Neutral]')
SCmptGrp = SCmptGrp +(moose.element(comptS.path),)
DCmptGrp = moose.wildcardFind(comptD.path+'/#[TYPE=Neutral]')
DCmptGrp = DCmptGrp +(moose.element(comptD.path),)
objS = moose.element(comptS.path).parent.name
objD = moose.element(comptD.path).parent.name
for spath in SCmptGrp:
grp_cmpt = ((spath.path).replace(objS,objD)).replace('[0]','')
if moose.exists(grp_cmpt):
#If path exist, but its not the Neutral obj then creating a neutral obj with _grp
#It has happened that pool, reac, enz name might exist with the same name, which when tried to create a group
# it silently ignored the path and object copied under that pool instead of Group
if moose.element(grp_cmpt).className != spath.className:
grp_cmpt = grp_cmpt+'_grp'
moose.Neutral(grp_cmpt)
# If group name is changed while creating in destination, then in source file the same is changed,
# so that later path issue doens't come
spath.name = spath.name+'_grp'
else:
#Neutral obj from src if doesn't exist in destination,then create src's Neutral obj in des
src = spath
srcpath = (spath.parent).path
des = srcpath.replace(objS,objD)
moose.Neutral(moose.element(des).path+'/'+spath.name)
dpath = moose.element(spath.path.replace(objS,objD))
spoollist = moose.wildcardFind(spath.path+'/#[ISA=PoolBase]')
dpoollist = moose.wildcardFind(dpath.path+'/#[ISA=PoolBase]')
#check made, for a given Neutral or group if pool doesn't exist then copied
# but some pool if enzyme cplx then there are holded untill that enzyme is copied in
# `enzymeMerge` function
for spool in spoollist:
if spool.name not in [dpool.name for dpool in dpoollist]:
copied = copy_deleteUnlyingPoolObj(spool,dpath)
if copied == False:
#hold it for later, this pool may be under enzyme, as cplx
poolNotcopiedyet.append(spool)
def copy_deleteUnlyingPoolObj(pool,path):
# check if this pool is under compartement or under enzyme?(which is enzyme_cplx)
# if enzyme_cplx then don't copy untill this perticular enzyme is copied
# case: This enzyme_cplx might exist in modelA if enzyme exist then this
# will automatically copie's the pool
copied = False
if pool.parent.className not in ["Enz","ZombieEnz","MMenz","ZombieMMenz"]:
if path.className in ["Neutral","CubeMesh","CyclMesh"]:
poolcopied = moose.copy(pool,path)
copied = True
# deleting function and enzyme which gets copied if exist under pool
# This is done to ensure Dangling function / enzyme not copied.
funclist = []
for types in ['setConc','setN','increment']:
funclist.extend(moose.element(poolcopied).neighbors[types])
for fl in funclist:
moose.delete(fl)
enzlist = moose.element(poolcopied).neighbors['reac']
for el in list(set(enzlist)):
moose.delete(el.path)
return copied
def updatePoolList(comptAdict):
for key,value in list(comptAdict.items()):
plist = moose.wildcardFind(value.path+'/##[ISA=PoolBase]')
poolListina[key] = plist
return poolListina
def enzymeMerge(comptD,comptS,key,poolListind):
war_msg = ""
RE_Duplicated, RE_Notcopiedyet, RE_Dangling = [], [], []
comptDpath = moose.element(comptD[key]).path
comptSpath = moose.element(comptS[key]).path
objD = moose.element(comptDpath).parent.name
objS = moose.element(comptSpath).parent.name
#nzyListina => enzyListind
enzyListind = moose.wildcardFind(comptDpath+'/##[ISA=EnzBase]')
enzyListins = moose.wildcardFind(comptSpath+'/##[ISA=EnzBase]')
for es in enzyListins:
eSsubname, eSprdname = [],[]
eSsubname = subprdList(es,"sub")
eSprdname = subprdList(es,"prd")
allexists, allexistp = False, False
allclean = False
poolinDlist = poolListind[findCompartment(es).name]
for pD in poolinDlist:
if es.parent.name == pD.name:
edpath = es.parent.path.replace(objS,objD)
if not moose.exists(edpath+'/'+es.name):
#This will take care
# -- If same enzparent name but different enzyme name
# -- or different parent/enzyme name
if eSsubname and eSprdname:
allexists = checkexist(eSsubname,objS,objD)
allexistp = checkexist(eSprdname,objS,objD)
if allexists and allexistp:
enzPool = moose.element(pD.path)
edpath = es.parent.path.replace(objS,objD)
enz = moose.element(moose.copy(es,moose.element(edpath)))
enzPool = enz.parent
if es.className in ["ZombieEnz","Enz"]:
moose.connect(moose.element(enz),"enz",enzPool,"reac")
if es.className in ["ZombieMMenz","MMenz"]:
moose.connect(enzPool,"nOut",enz,"enzDest")
connectObj(enz,eSsubname,"sub",comptD,war_msg)
connectObj(enz,eSprdname,"prd",comptD,war_msg)
allclean = True
else:
# didn't find sub or prd for this Enzyme
RE_Notcopiedyet.append(es)
else:
# -- it is dagging reaction
RE_Dangling.append(es)
else:
#Same Enzyme name
# -- Same substrate and product including same volume then don't copy
# -- different substrate/product or if sub/prd's volume is different then DUPLICATE the Enzyme
allclean = False
ed = moose.element(es.path.replace(objS,objD))
eDsubname = subprdList(ed,"sub")
eSsubname = subprdList(es,"sub")
hasSamenoofsublen,hasSameS,hasSamevols = same_len_name_vol(eDsubname,eSsubname)
eDprdname = subprdList(ed,"prd")
eSprdname = subprdList(es,"prd")
hasSamenoofprdlen,hasSameP,hasSamevolp = same_len_name_vol(eDprdname,eSprdname)
if not all((hasSamenoofsublen,hasSameS,hasSamevols,hasSamenoofprdlen,hasSameP,hasSamevolp)):
# May be different substrate or product or volume of Sub/prd may be different,
# Duplicating the enzyme
if eSsubname and eSprdname:
allexists,allexistp = False,False
allexists = checkexist(eSsubname,objS,objD)
allexistp = checkexist(eSprdname,objS,objD)
if allexists and allexistp:
es.name = es.name+"_duplicated"
if es.className in ["ZombieEnz","Enz"]:
edpath = es.parent.path.replace(objS,objD)
enz = moose.copy(es,moose.element(edpath))
moose.connect(enz, 'enz', edpath, 'reac' )
if es.className in ["ZombieMMenz","MMenz"]:
edpath = es.parent.path.replace(objS,objD)
enz = moose.copy(es,moose.element(edpath))
enzinfo = moose.Annotator(enz.path+'/info')
moose.connect(moose.element(enz).parent,"nOut",moose.element(enz),"enzDest")
#moose.connect(moose.element(enz),"enz",moose.element(enz).parent,"reac")
connectObj(enz,eSsubname,"sub",comptD,war_msg)
connectObj(enz,eSprdname,"prd",comptD,war_msg)
RE_Duplicated.append(enz)
allclean = True
else:
allclean = False
else:
allclean = True
if not allclean:
# didn't find sub or prd for this enzyme
# -- it may be connected Enzyme cplx
if eSsubname and eSprdname:
RE_Notcopiedyet.append(es)
else:
RE_Dangling.append(es)
return RE_Duplicated,RE_Notcopiedyet,RE_Dangling
def reacMerge(comptS,comptD,key,poolListina):
RE_Duplicated, RE_Notcopiedyet, RE_Dangling = [], [], []
war_msg = ""
comptSpath = moose.element(comptS[key]).path
comptDpath = moose.element(comptD[key]).path
objS = moose.element(comptSpath).parent.name
objD = moose.element(comptDpath).parent.name
reacListins = moose.wildcardFind(comptSpath+'/##[ISA=ReacBase]')
reacListind = moose.wildcardFind(comptDpath+'/##[ISA=ReacBase]')
for rs in reacListins:
rSsubname, rSprdname = [],[]
rSsubname = subprdList(rs,"sub")
rSprdname = subprdList(rs,"prd")
allexists, allexistp = False, False
allclean = False
if rs.name not in [rd.name for rd in reacListind]:
# reaction name not found then copy
# And assuming that pools are copied earlier EXPECT POOL CPLX
# To be assured the it takes correct compartment name incase reaction sub's
# belongs to different compt
key = findCompartment(rs).name
if rSsubname and rSprdname:
allexists = checkexist(rSsubname,objS,objD)
allexistp = checkexist(rSprdname,objS,objD)
if allexists and allexistp:
rdpath = rs.parent.path.replace(objS,objD)
reac = moose.copy(rs,moose.element(rdpath))
connectObj(reac,rSsubname,"sub",comptD,war_msg)
connectObj(reac,rSprdname,"prd",comptD,war_msg)
allclean = True
else:
# didn't find sub or prd for this reaction
# -- it may be connected Enzyme cplx
RE_Notcopiedyet.append(rs)
else:
# -- it is dagging reaction
RE_Dangling.append(rs)
#print ("This reaction \""+rb.path+"\" has no substrate/product Dangling reaction are not copied")
#war_msg = war_msg+"\nThis reaction \""+rb.path+"\" has no substrate/product Dangling reaction are not copied"
else:
#Same reaction name
# -- Same substrate and product including same volume then don't copy
# -- different substrate/product or if sub/prd's volume is different then DUPLICATE the reaction
allclean = False
for rd in reacListind:
if rs.name == rd.name:
rSsubname = subprdList(rs,"sub")
rDsubname = subprdList(rd,"sub")
hasSamenoofsublen,hasSameS,hasSamevols = same_len_name_vol(rSsubname,rDsubname)
rSprdname = subprdList(rs,"prd")
rDprdname = subprdList(rd,"prd")
hasSamenoofprdlen,hasSameP,hasSamevolp = same_len_name_vol(rSprdname,rDprdname)
if not all((hasSamenoofsublen,hasSameS,hasSamevols,hasSamenoofprdlen,hasSameP,hasSamevolp)):
# May be different substrate or product or volume of Sub/prd may be different,
# Duplicating the reaction
if rSsubname and rSprdname:
allexists,allexistp = False,False
allexists = checkexist(rSsubname,objS,objD)
allexistp = checkexist(rSprdname,objS,objD)
if allexists and allexistp:
rs.name = rs.name+"_duplicated"
#reac = moose.Reac(comptA[key].path+'/'+rb.name+"_duplicated")
rdpath = rs.parent.path.replace(objS,objD)
reac = moose.copy(rs,moose.element(rdpath))
connectObj(reac,rSsubname,"sub",comptD,war_msg)
connectObj(reac,rSprdname,"prd",comptD,war_msg)
RE_Duplicated.append(reac)
allclean = True
else:
allclean = False
else:
allclean = True
if not allclean:
# didn't find sub or prd for this reaction
# -- it may be connected Enzyme cplx
if rSsubname and rSprdname:
RE_Notcopiedyet.append(rs)
else:
RE_Dangling.append(rs)
return RE_Duplicated,RE_Notcopiedyet,RE_Dangling
def subprdList(reac,subprd):
rtype = moose.element(reac).neighbors[subprd]
rname = []
for rs in rtype:
rname.append(moose.element(rs))
return rname
def same_len_name_vol(rA,rB):
uaS = set(rA)
ubS = set(rB)
aS = set([uas.name for uas in uaS])
bS = set([ubs.name for ubs in ubS])
hassameLen = False
hassameSP = False
hassamevol = False
hassamevollist = []
if (len(rA) == len(rB) ):
hassameLen = True
if not (len (aS.union(bS) - aS.intersection(bS))):
hassameSP = True
if rB and rA:
rAdict = dict( [ (i.name,i) for i in (rA) ] )
rBdict = dict( [ (i.name,i) for i in (rB) ] )
for key,bpath in rBdict.items():
apath = rAdict[key]
comptA = moose.element(findCompartment(apath))
comptB = moose.element(findCompartment(bpath))
if not abs(comptA.volume -comptB.volume):
hassamevollist.append(True)
else:
hassamevollist.append(False)
if len(set(hassamevollist))==1:
for x in set(hassamevollist):
hassamevol = x
return ( hassameLen,hassameSP,hassamevol)
def connectObj(reac,spList,spType,comptA,war_msg):
#It should not come here unless the sub/prd is connected to enzyme cplx pool
allclean = False
for rsp in spList:
for akey in list(poolListina[findCompartment(rsp).name]):
if rsp.name == akey.name:
if moose.exists(akey.path):
moose.connect(moose.element(reac), spType, moose.element(akey), 'reac', 'OneToOne')
allclean = True
else:
#It should not come here unless the sub/prd is connected to enzyme cplx pool
allclean = False
return allclean
def checkexist(spList,objB,objA):
allexistL = []
allexist = False
for rsp in spList:
found = False
rspPath = rsp.path.replace(objB,objA)
if moose.exists(rspPath):
found = True
allexistL.append(found)
if len(set(allexistL))==1:
for x in set(allexistL):
allexist = x
return allexist
if __name__ == "__main__":
try:
sys.argv[1]
except IndexError:
print("Source filename or path not given")
exit(0)
else:
src = sys.argv[1]
if not os.path.exists(src):
print("Filename or path does not exist %s." %(src))
else:
try:
sys.argv[2]
except IndexError:
print("Destination filename or path not given")
exit(0)
else:
des = sys.argv[2]
if not os.path.exists(src):
print("Filename or path does not exist %s." %(des))
exit(0)
else:
print ("src and des %s, %s." %(src, des))
mergered = mergeChemModel(src,des)
'''
try:
sys.argv[3]
except IndexError:
print ("Merge to save not specified")
mergeto = "merge"
else:
mergeto = sys.argv[3]
if str(mergeto).rfind('.') != -1:
mergeto = mergeto[:str(mergeto).rfind('.')]
if str(mergeto).rfind('/'):
mergeto = mergeto+'merge'
mergered = mergeChemModel(src,des,mergeto)
'''
| gpl-3.0 |
Manojkumar91/odoo_inresto | addons/hw_proxy/__openerp__.py | 8 | 1608 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Hardware Proxy',
'version': '1.0',
'category': 'Point Of Sale',
'sequence': 6,
'summary': 'Connect the Web Client to Hardware Peripherals',
'website': 'https://www.odoo.com/page/point-of-sale',
'description': """
Hardware Poxy
=============
This module allows you to remotely use peripherals connected to this server.
This modules only contains the enabling framework. The actual devices drivers
are found in other modules that must be installed separately.
""",
'author': 'OpenERP SA',
'depends': [],
'test': [
],
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
mj-5/volatility | volatility/plugins/mac/netstat.py | 44 | 2242 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import volatility.obj as obj
import volatility.plugins.mac.lsof as lsof
class mac_netstat(lsof.mac_lsof):
""" Lists active per-process network connections """
def render_text(self, outfd, data):
self.table_header(outfd, [("Proto", "6"),
("Local IP", "20"),
("Local Port", "6"),
("Remote IP", "20"),
("Remote Port", "6"),
("State", "10"),
("Process", "24")])
for proc, i, fd, _path in data:
if fd.f_fglob.fg_type == 'DTYPE_SOCKET':
socket = fd.f_fglob.fg_data.dereference_as("socket")
family = socket.family
if family == 1:
upcb = socket.so_pcb.dereference_as("unpcb")
path = upcb.unp_addr.sun_path
outfd.write("UNIX {0}\n".format(path))
elif family in [2, 30]:
proto = socket.protocol
state = socket.state
(lip, lport, rip, rport) = socket.get_connection_info()
self.table_row(outfd, proto, lip, lport, rip, rport, state, "{}/{}".format(proc.p_comm, proc.p_pid))
| gpl-2.0 |
codestersnyc/codesters-graphics | codesters/hitbox.py | 1 | 3277 | import math
from Tkinter import Tk, Canvas
from .manager import Manager
class Hitbox(object):
def __init__(self, top_right, top_left, bottom_right, bottom_left, sprite):
self.root = Manager.canvas
self.canvas = Manager.canvas
self.sprite = sprite
self.base_top_right_x = top_right[0]
self.base_top_right_y = top_right[1]
self.base_top_left_x = top_left[0]
self.base_top_left_y = top_left[1]
self.base_bottom_right_x = bottom_right[0]
self.base_bottom_right_y = bottom_right[1]
self.base_bottom_left_x = bottom_left[0]
self.base_bottom_left_y = bottom_left[1]
self.top_right = top_right
self.top_left = top_left
self.bottom_right = bottom_right
self.bottom_left = bottom_left
def update_corners(self):
head_cos = math.cos(self.sprite.heading * math.pi/180) * self.sprite.size
head_sin = math.sin(self.sprite.heading * math.pi/180) * self.sprite.size
self.top_right[0] = -(head_cos * self.base_top_right_x) + (self.base_top_right_y * head_sin) + self.sprite.xcor
self.top_right[1] = -(head_sin * self.base_top_right_x) - (self.base_top_right_y * head_cos) + self.sprite.ycor
self.top_left[0] = -(head_cos * self.base_top_left_x) + (self.base_top_left_y * head_sin) + self.sprite.xcor
self.top_left[1] = -(head_sin * self.base_top_left_x) - (self.base_top_left_y * head_cos) + self.sprite.ycor
self.bottom_right[0] = -(head_cos * self.base_bottom_right_x) + (self.base_bottom_right_y * head_sin) + self.sprite.xcor
self.bottom_right[1] = -(head_sin * self.base_bottom_right_x) - (self.base_bottom_right_y * head_cos) + self.sprite.ycor
self.bottom_left[0] = -(head_cos * self.base_bottom_left_x) + (self.base_bottom_left_y * head_sin) + self.sprite.xcor
self.bottom_left[1] = -(head_sin * self.base_bottom_left_x) - (self.base_bottom_left_y * head_cos) + self.sprite.ycor
def printCorners(self):
self.sprite.modes.append("print_corners")
def draw(self):
self.sprite.lines.append([(Manager.width + self.top_right[0],
Manager.height - self.top_right[1],
Manager.width + self.top_left[0],
Manager.height - self.top_left[1]), "red", 1.0])
self.sprite.lines.append([(Manager.width + self.top_right[0],
Manager.height - self.top_right[1],
Manager.width + self.bottom_right[0],
Manager.height - self.bottom_right[1]), "red", 1.0])
self.sprite.lines.append([(Manager.width + self.top_left[0],
Manager.height - self.top_left[1],
Manager.width + self.bottom_left[0],
Manager.height - self.bottom_left[1]),"red",1.0])
self.sprite.lines.append([(Manager.width + self.bottom_right[0],
Manager.height - self.bottom_right[1],
Manager.width + self.bottom_left[0],
Manager.height - self.bottom_left[1]),"red",1.0])
| mit |
theo-l/django | tests/postgres_tests/test_citext.py | 89 | 2781 | """
The citext PostgreSQL extension supports indexing of case-insensitive text
strings and thus eliminates the need for operations such as iexact and other
modifiers to enforce use of an index.
"""
from django.db import IntegrityError
from django.test.utils import modify_settings
from . import PostgreSQLTestCase
from .models import CITestModel
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'})
class CITextTestCase(PostgreSQLTestCase):
case_sensitive_lookups = ('contains', 'startswith', 'endswith', 'regex')
@classmethod
def setUpTestData(cls):
cls.john = CITestModel.objects.create(
name='JoHn',
email='joHn@johN.com',
description='Average Joe named JoHn',
array_field=['JoE', 'jOhn'],
)
def test_equal_lowercase(self):
"""
citext removes the need for iexact as the index is case-insensitive.
"""
self.assertEqual(CITestModel.objects.filter(name=self.john.name.lower()).count(), 1)
self.assertEqual(CITestModel.objects.filter(email=self.john.email.lower()).count(), 1)
self.assertEqual(CITestModel.objects.filter(description=self.john.description.lower()).count(), 1)
def test_fail_citext_primary_key(self):
"""
Creating an entry for a citext field used as a primary key which
clashes with an existing value isn't allowed.
"""
with self.assertRaises(IntegrityError):
CITestModel.objects.create(name='John')
def test_array_field(self):
instance = CITestModel.objects.get()
self.assertEqual(instance.array_field, self.john.array_field)
self.assertTrue(CITestModel.objects.filter(array_field__contains=['joe']).exists())
def test_lookups_name_char(self):
for lookup in self.case_sensitive_lookups:
with self.subTest(lookup=lookup):
query = {'name__{}'.format(lookup): 'john'}
self.assertSequenceEqual(CITestModel.objects.filter(**query), [self.john])
def test_lookups_description_text(self):
for lookup, string in zip(self.case_sensitive_lookups, ('average', 'average joe', 'john', 'Joe.named')):
with self.subTest(lookup=lookup, string=string):
query = {'description__{}'.format(lookup): string}
self.assertSequenceEqual(CITestModel.objects.filter(**query), [self.john])
def test_lookups_email(self):
for lookup, string in zip(self.case_sensitive_lookups, ('john', 'john', 'john.com', 'john.com')):
with self.subTest(lookup=lookup, string=string):
query = {'email__{}'.format(lookup): string}
self.assertSequenceEqual(CITestModel.objects.filter(**query), [self.john])
| bsd-3-clause |
deepesch/scikit-learn | sklearn/tests/test_lda.py | 77 | 6258 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import make_blobs
from sklearn import lda
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct values
# for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = lda.LDA(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = lda.LDA(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = lda.LDA(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = lda.LDA(solver="svd")
clf_lda_lsqr = lda.LDA(solver="lsqr")
clf_lda_eigen = lda.LDA(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = lda.LDA(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = lda.LDA(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = lda.LDA(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = lda._cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = lda._cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
| bsd-3-clause |
candrews/portage | pym/portage/tests/resolver/soname/test_unsatisfiable.py | 10 | 1585 | # Copyright 2015 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
from portage.tests.resolver.ResolverPlayground import (
ResolverPlayground, ResolverPlaygroundTestCase)
class SonameUnsatisfiableTestCase(TestCase):
def testSonameUnsatisfiable(self):
binpkgs = {
"app-misc/A-1" : {
"EAPI": "5",
"PROVIDES": "x86_32: libA.so.1",
},
"app-misc/B-1" : {
"DEPEND": "app-misc/A",
"RDEPEND": "app-misc/A",
"REQUIRES": "x86_32: libA.so.2",
},
"app-misc/B-0" : {
"DEPEND": "app-misc/A",
"RDEPEND": "app-misc/A",
"REQUIRES": "x86_32: libA.so.1",
},
}
installed = {
"app-misc/A-1" : {
"EAPI": "5",
"PROVIDES": "x86_32: libA.so.1",
},
"app-misc/B-0" : {
"DEPEND": "app-misc/A",
"RDEPEND": "app-misc/A",
"REQUIRES": "x86_32: libA.so.1",
},
}
world = ["app-misc/B"]
test_cases = (
# Skip update due to unsatisfied soname dependency.
ResolverPlaygroundTestCase(
["@world"],
options = {
"--deep": True,
"--ignore-soname-deps": "n",
"--update": True,
"--usepkgonly": True,
},
success = True,
mergelist = [],
),
)
playground = ResolverPlayground(binpkgs=binpkgs, debug=False,
installed=installed, world=world)
try:
for test_case in test_cases:
playground.run_TestCase(test_case)
self.assertEqual(
test_case.test_success, True, test_case.fail_msg)
finally:
# Disable debug so that cleanup works.
playground.debug = False
playground.cleanup()
| gpl-2.0 |
NewpTone/stacklab-nova | debian/tmp/usr/lib/python2.7/dist-packages/nova/virt/hyperv/volumeops.py | 7 | 13426 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for Storage-related functions (attach, detach, etc).
"""
import time
from nova import block_device
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.hyperv import baseops
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeutils
LOG = logging.getLogger(__name__)
hyper_volumeops_opts = [
cfg.IntOpt('hyperv_attaching_volume_retry_count',
default=10,
help='The number of times we retry on attaching volume '),
cfg.IntOpt('hyperv_wait_between_attach_retry',
default=5,
help='The seconds to wait between an volume attachment attempt'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(hyper_volumeops_opts)
class VolumeOps(baseops.BaseOps):
"""
Management class for Volume-related tasks
"""
def __init__(self):
super(VolumeOps, self).__init__()
self._vmutils = vmutils.VMUtils()
self._driver = driver
self._block_device = block_device
self._time = time
self._initiator = None
self._default_root_device = 'vda'
self._attaching_volume_retry_count = \
FLAGS.hyperv_attaching_volume_retry_count
self._wait_between_attach_retry = \
FLAGS.hyperv_wait_between_attach_retry
self._volutils = volumeutils.VolumeUtils()
def attach_boot_volume(self, block_device_info, vm_name):
"""Attach the boot volume to the IDE controller"""
LOG.debug(_("block device info: %s"), block_device_info)
ebs_root = self._driver.block_device_info_get_mapping(
block_device_info)[0]
connection_info = ebs_root['connection_info']
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
self._volutils.login_storage_target(target_lun, target_iqn,
target_portal)
try:
#Getting the mounted disk
mounted_disk = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
#Attach to IDE controller
#Find the IDE controller for the vm.
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
vm = vms[0]
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
wmi_result_class='MSVM_ResourceAllocationSettingData')
ctrller = [r for r in rasds
if r.ResourceSubType == 'Microsoft Emulated IDE Controller'
and r.Address == "0"]
#Attaching to the same slot as the VHD disk file
self._attach_volume_to_controller(ctrller, 0, mounted_disk, vm)
except Exception as exn:
LOG.exception(_('Attach boot from volume failed: %s'), exn)
self._volutils.logout_storage_target(self._conn_wmi, target_iqn)
raise vmutils.HyperVException(
_('Unable to attach boot volume to instance %s')
% vm_name)
def volume_in_mapping(self, mount_device, block_device_info):
return self._volutils.volume_in_mapping(mount_device,
block_device_info)
def attach_volume(self, connection_info, instance_name, mountpoint):
"""Attach a volume to the SCSI controller"""
LOG.debug(_("Attach_volume: %(connection_info)s, %(instance_name)s,"
" %(mountpoint)s") % locals())
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
self._volutils.login_storage_target(target_lun, target_iqn,
target_portal)
try:
#Getting the mounted disk
mounted_disk = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
#Find the SCSI controller for the vm
vms = self._conn.MSVM_ComputerSystem(ElementName=instance_name)
vm = vms[0]
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
wmi_result_class='MSVM_ResourceAllocationSettingData')
ctrller = [r for r in rasds
if r.ResourceSubType == 'Microsoft Synthetic SCSI Controller']
self._attach_volume_to_controller(
ctrller, self._get_free_controller_slot(ctrller[0]),
mounted_disk, vm)
except Exception as exn:
LOG.exception(_('Attach volume failed: %s'), exn)
self._volutils.logout_storage_target(self._conn_wmi, target_iqn)
raise vmutils.HyperVException(
_('Unable to attach volume to instance %s')
% instance_name)
def _attach_volume_to_controller(self, controller, address, mounted_disk,
instance):
"""Attach a volume to a controller """
#Find the default disk drive object for the vm and clone it.
diskdflt = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
WHERE ResourceSubType LIKE 'Microsoft Physical Disk Drive'\
AND InstanceID LIKE '%Default%'")[0]
diskdrive = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_ResourceAllocationSettingData', diskdflt)
diskdrive.Address = address
diskdrive.Parent = controller[0].path_()
diskdrive.HostResource = [mounted_disk[0].path_()]
new_resources = self._vmutils.add_virt_resource(self._conn, diskdrive,
instance)
if new_resources is None:
raise vmutils.HyperVException(_('Failed to add volume to VM %s') %
instance)
def _get_free_controller_slot(self, scsi_controller):
#Getting volumes mounted in the SCSI controller
volumes = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
WHERE ResourceSubType LIKE 'Microsoft Physical Disk Drive'\
AND Parent = '" + scsi_controller.path_() + "'")
#Slots starts from 0, so the lenght of the disks gives us the free slot
return len(volumes)
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Dettach a volume to the SCSI controller"""
LOG.debug(_("Detach_volume: %(connection_info)s, %(instance_name)s,"
" %(mountpoint)s") % locals())
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
#Getting the mounted disk
mounted_disk = self._get_mounted_disk_from_lun(target_iqn, target_lun)
physical_list = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
WHERE ResourceSubType LIKE 'Microsoft Physical Disk Drive'")
physical_disk = 0
for phydisk in physical_list:
host_resource_list = phydisk.HostResource
if host_resource_list is None:
continue
host_resource = str(host_resource_list[0].lower())
mounted_disk_path = str(mounted_disk[0].path_().lower())
LOG.debug(_("Mounted disk to detach is: %s"), mounted_disk_path)
LOG.debug(_("host_resource disk detached is: %s"), host_resource)
if host_resource == mounted_disk_path:
physical_disk = phydisk
LOG.debug(_("Physical disk detached is: %s"), physical_disk)
vms = self._conn.MSVM_ComputerSystem(ElementName=instance_name)
vm = vms[0]
remove_result = self._vmutils.remove_virt_resource(self._conn,
physical_disk, vm)
if remove_result is False:
raise vmutils.HyperVException(
_('Failed to remove volume from VM %s') %
instance_name)
#Sending logout
self._volutils.logout_storage_target(self._conn_wmi, target_iqn)
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = self._get_iscsi_initiator()
if not self._initiator:
LOG.warn(_('Could not determine iscsi initiator name'),
instance=instance)
return {
'ip': FLAGS.my_ip,
'initiator': self._initiator,
}
def _get_iscsi_initiator(self):
return self._volutils.get_iscsi_initiator(self._conn_cimv2)
def _get_mounted_disk_from_lun(self, target_iqn, target_lun):
initiator_session = self._conn_wmi.query(
"SELECT * FROM MSiSCSIInitiator_SessionClass \
WHERE TargetName='" + target_iqn + "'")[0]
devices = initiator_session.Devices
device_number = None
for device in devices:
LOG.debug(_("device.InitiatorName: %s"), device.InitiatorName)
LOG.debug(_("device.TargetName: %s"), device.TargetName)
LOG.debug(_("device.ScsiPortNumber: %s"), device.ScsiPortNumber)
LOG.debug(_("device.ScsiPathId: %s"), device.ScsiPathId)
LOG.debug(_("device.ScsiTargetId): %s"), device.ScsiTargetId)
LOG.debug(_("device.ScsiLun: %s"), device.ScsiLun)
LOG.debug(_("device.DeviceInterfaceGuid :%s"),
device.DeviceInterfaceGuid)
LOG.debug(_("device.DeviceInterfaceName: %s"),
device.DeviceInterfaceName)
LOG.debug(_("device.LegacyName: %s"), device.LegacyName)
LOG.debug(_("device.DeviceType: %s"), device.DeviceType)
LOG.debug(_("device.DeviceNumber %s"), device.DeviceNumber)
LOG.debug(_("device.PartitionNumber :%s"), device.PartitionNumber)
scsi_lun = device.ScsiLun
if scsi_lun == target_lun:
device_number = device.DeviceNumber
if device_number is None:
raise vmutils.HyperVException(
_('Unable to find a mounted disk for'
' target_iqn: %s') % target_iqn)
LOG.debug(_("Device number : %s"), device_number)
LOG.debug(_("Target lun : %s"), target_lun)
#Finding Mounted disk drive
for i in range(1, self._attaching_volume_retry_count):
mounted_disk = self._conn.query(
"SELECT * FROM Msvm_DiskDrive WHERE DriveNumber=" +
str(device_number) + "")
LOG.debug(_("Mounted disk is: %s"), mounted_disk)
if len(mounted_disk) > 0:
break
self._time.sleep(self._wait_between_attach_retry)
mounted_disk = self._conn.query(
"SELECT * FROM Msvm_DiskDrive WHERE DriveNumber=" +
str(device_number) + "")
LOG.debug(_("Mounted disk is: %s"), mounted_disk)
if len(mounted_disk) == 0:
raise vmutils.HyperVException(
_('Unable to find a mounted disk for'
' target_iqn: %s') % target_iqn)
return mounted_disk
def disconnect_volume(self, physical_drive_path):
#Get the session_id of the ISCSI connection
session_id = self._get_session_id_from_mounted_disk(
physical_drive_path)
#Logging out the target
self._volutils.execute_log_out(session_id)
def _get_session_id_from_mounted_disk(self, physical_drive_path):
drive_number = self._get_drive_number_from_disk_path(
physical_drive_path)
LOG.debug(_("Drive number to disconnect is: %s"), drive_number)
initiator_sessions = self._conn_wmi.query(
"SELECT * FROM MSiSCSIInitiator_SessionClass")
for initiator_session in initiator_sessions:
devices = initiator_session.Devices
for device in devices:
deviceNumber = str(device.DeviceNumber)
LOG.debug(_("DeviceNumber : %s"), deviceNumber)
if deviceNumber == drive_number:
return initiator_session.SessionId
def _get_drive_number_from_disk_path(self, disk_path):
LOG.debug(_("Disk path to parse: %s"), disk_path)
start_device_id = disk_path.find('"', disk_path.find('DeviceID'))
LOG.debug(_("start_device_id: %s"), start_device_id)
end_device_id = disk_path.find('"', start_device_id + 1)
LOG.debug(_("end_device_id: %s"), end_device_id)
deviceID = disk_path[start_device_id + 1:end_device_id]
return deviceID[deviceID.find("\\") + 2:]
def get_default_root_device(self):
return self._default_root_device
| apache-2.0 |
soarpenguin/ansible | lib/ansible/modules/packaging/language/pip.py | 4 | 21299 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Matt Wright <matt@nobien.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: pip
short_description: Manages Python library dependencies.
description:
- "Manage Python library dependencies. To use this module, one of the following keys is required: C(name)
or C(requirements)."
version_added: "0.7"
options:
name:
description:
- The name of a Python library to install or the url of the remote package.
- As of 2.2 you can supply a list of names.
required: false
default: null
version:
description:
- The version number to install of the Python library specified in the I(name) parameter
required: false
default: null
requirements:
description:
- The path to a pip requirements file, which should be local to the remote system.
File can be specified as a relative path if using the chdir option.
required: false
default: null
virtualenv:
description:
- An optional path to a I(virtualenv) directory to install into.
It cannot be specified together with the 'executable' parameter
(added in 2.1).
If the virtualenv does not exist, it will be created before installing
packages. The optional virtualenv_site_packages, virtualenv_command,
and virtualenv_python options affect the creation of the virtualenv.
required: false
default: null
virtualenv_site_packages:
version_added: "1.0"
description:
- Whether the virtual environment will inherit packages from the
global site-packages directory. Note that if this setting is
changed on an already existing virtual environment it will not
have any effect, the environment must be deleted and newly
created.
required: false
default: "no"
choices: [ "yes", "no" ]
virtualenv_command:
version_added: "1.1"
description:
- The command or a pathname to the command to create the virtual
environment with. For example C(pyvenv), C(virtualenv),
C(virtualenv2), C(~/bin/virtualenv), C(/usr/local/bin/virtualenv).
required: false
default: virtualenv
virtualenv_python:
version_added: "2.0"
description:
- The Python executable used for creating the virtual environment.
For example C(python3.5), C(python2.7). When not specified, the
Python version used to run the ansible module is used. This parameter
should not be used when C(virtualenv_command) is using C(pyvenv) or
the C(-m venv) module.
required: false
default: null
state:
description:
- The state of module
- The 'forcereinstall' option is only available in Ansible 2.1 and above.
required: false
default: present
choices: [ "present", "absent", "latest", "forcereinstall" ]
extra_args:
description:
- Extra arguments passed to pip.
required: false
default: null
version_added: "1.0"
editable:
description:
- Pass the editable flag.
required: false
default: false
version_added: "2.0"
chdir:
description:
- cd into this directory before running the command
version_added: "1.3"
required: false
default: null
executable:
description:
- The explicit executable or a pathname to the executable to be used to
run pip for a specific version of Python installed in the system. For
example C(pip-3.3), if there are both Python 2.7 and 3.3 installations
in the system and you want to run pip for the Python 3.3 installation.
It cannot be specified together with the 'virtualenv' parameter (added in 2.1).
By default, it will take the appropriate version for the python interpreter
use by ansible, e.g. pip3 on python 3, and pip2 or pip on python 2.
version_added: "1.3"
required: false
default: null
umask:
description:
- The system umask to apply before installing the pip package. This is
useful, for example, when installing on systems that have a very
restrictive umask by default (e.g., 0077) and you want to pip install
packages which are to be used by all users. Note that this requires you
to specify desired umask mode in octal, with a leading 0 (e.g., 0077).
version_added: "2.1"
required: false
default: null
notes:
- Please note that virtualenv (U(http://www.virtualenv.org/)) must be
installed on the remote host if the virtualenv parameter is specified and
the virtualenv needs to be created.
- By default, this module will use the appropriate version of pip for the
interpreter used by ansible (e.g. pip3 when using python 3, pip2 otherwise)
requirements: [ "virtualenv", "pip" ]
author: "Matt Wright (@mattupstate)"
'''
EXAMPLES = '''
# Install (Bottle) python package.
- pip:
name: bottle
# Install (Bottle) python package on version 0.11.
- pip:
name: bottle
version: 0.11
# Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args.
- pip:
name: svn+http://myrepo/svn/MyApp#egg=MyApp
# Install MyApp using one of the remote protocols (bzr+,hg+,git+).
- pip:
name: git+http://myrepo/app/MyApp
# Install (MyApp) from local tarball
- pip:
name: file:///path/to/MyApp.tar.gz
# Install (Bottle) into the specified (virtualenv), inheriting none of the globally installed modules
- pip:
name: bottle
virtualenv: /my_app/venv
# Install (Bottle) into the specified (virtualenv), inheriting globally installed modules
- pip:
name: bottle
virtualenv: /my_app/venv
virtualenv_site_packages: yes
# Install (Bottle) into the specified (virtualenv), using Python 2.7
- pip:
name: bottle
virtualenv: /my_app/venv
virtualenv_command: virtualenv-2.7
# Install (Bottle) within a user home directory.
- pip:
name: bottle
extra_args: --user
# Install specified python requirements.
- pip:
requirements: /my_app/requirements.txt
# Install specified python requirements in indicated (virtualenv).
- pip:
requirements: /my_app/requirements.txt
virtualenv: /my_app/venv
# Install specified python requirements and custom Index URL.
- pip:
requirements: /my_app/requirements.txt
extra_args: -i https://example.com/pypi/simple
# Install (Bottle) for Python 3.3 specifically,using the 'pip-3.3' executable.
- pip:
name: bottle
executable: pip-3.3
# Install (Bottle), forcing reinstallation if it's already installed
- pip:
name: bottle
state: forcereinstall
# Install (Bottle) while ensuring the umask is 0022 (to ensure other users can use it)
- pip:
name: bottle
umask: 0022
become: True
'''
import os
import re
import sys
import tempfile
from ansible.module_utils.basic import AnsibleModule, is_executable
from ansible.module_utils._text import to_native
from ansible.module_utils.six import PY3
#: Python one-liners to be run at the command line that will determine the
# installed version for these special libraries. These are libraries that
# don't end up in the output of pip freeze.
_SPECIAL_PACKAGE_CHECKERS = {'setuptools': 'import setuptools; print(setuptools.__version__)',
'pip': 'import pkg_resources; print(pkg_resources.get_distribution("pip").version)'}
def _get_cmd_options(module, cmd):
thiscmd = cmd + " --help"
rc, stdout, stderr = module.run_command(thiscmd)
if rc != 0:
module.fail_json(msg="Could not get output from %s: %s" % (thiscmd, stdout + stderr))
words = stdout.strip().split()
cmd_options = [x for x in words if x.startswith('--')]
return cmd_options
def _get_full_name(name, version=None):
if version is None:
resp = name
else:
resp = name + '==' + version
return resp
def _get_packages(module, pip, chdir):
'''Return results of pip command to get packages.'''
# Try 'pip list' command first.
command = '%s list --format=freeze' % pip
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = module.run_command(command, cwd=chdir, environ_update=lang_env)
# If there was an error (pip version too old) then use 'pip freeze'.
if rc != 0:
command = '%s freeze' % pip
rc, out, err = module.run_command(command, cwd=chdir)
if rc != 0:
_fail(module, command, out, err)
return (command, out, err)
def _is_present(name, version, installed_pkgs, pkg_command):
'''Return whether or not package is installed.'''
for pkg in installed_pkgs:
if '==' in pkg:
pkg_name, pkg_version = pkg.split('==')
else:
continue
if pkg_name == name and (version is None or version == pkg_version):
return True
return False
def _get_pip(module, env=None, executable=None):
# Older pip only installed under the "/usr/bin/pip" name. Many Linux
# distros install it there.
# By default, we try to use pip required for the current python
# interpreter, so people can use pip to install modules dependencies
candidate_pip_basenames = ('pip2', 'pip')
if PY3:
# pip under python3 installs the "/usr/bin/pip3" name
candidate_pip_basenames = ('pip3',)
pip = None
if executable is not None:
if os.path.isabs(executable):
pip = executable
else:
# If you define your own executable that executable should be the only candidate.
# As noted in the docs, executable doesn't work with virtualenvs.
candidate_pip_basenames = (executable,)
if pip is None:
if env is None:
opt_dirs = []
for basename in candidate_pip_basenames:
pip = module.get_bin_path(basename, False, opt_dirs)
if pip is not None:
break
else:
# For-else: Means that we did not break out of the loop
# (therefore, that pip was not found)
module.fail_json(msg='Unable to find any of %s to use. pip'
' needs to be installed.' % ', '.join(candidate_pip_basenames))
else:
# If we're using a virtualenv we must use the pip from the
# virtualenv
venv_dir = os.path.join(env, 'bin')
candidate_pip_basenames = (candidate_pip_basenames[0], 'pip')
for basename in candidate_pip_basenames:
candidate = os.path.join(venv_dir, basename)
if os.path.exists(candidate) and is_executable(candidate):
pip = candidate
break
else:
# For-else: Means that we did not break out of the loop
# (therefore, that pip was not found)
module.fail_json(msg='Unable to find pip in the virtualenv,'
' %s, under any of these names: %s. Make sure pip is'
' present in the virtualenv.' % (env,
', '.join(candidate_pip_basenames)))
return pip
def _fail(module, cmd, out, err):
msg = ''
if out:
msg += "stdout: %s" % (out, )
if err:
msg += "\n:stderr: %s" % (err, )
module.fail_json(cmd=cmd, msg=msg)
def _get_package_info(module, package, env=None):
"""This is only needed for special packages which do not show up in pip freeze
pip and setuptools fall into this category.
:returns: a string containing the version number if the package is
installed. None if the package is not installed.
"""
if env:
opt_dirs = ['%s/bin' % env]
else:
opt_dirs = []
python_bin = module.get_bin_path('python', False, opt_dirs)
if python_bin is None:
formatted_dep = None
else:
rc, out, err = module.run_command([python_bin, '-c', _SPECIAL_PACKAGE_CHECKERS[package]])
if rc:
formatted_dep = None
else:
formatted_dep = '%s==%s' % (package, out.strip())
return formatted_dep
def main():
state_map = dict(
present='install',
absent='uninstall -y',
latest='install -U',
forcereinstall='install -U --force-reinstall',
)
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=state_map.keys()),
name=dict(type='list'),
version=dict(type='str'),
requirements=dict(type='str'),
virtualenv=dict(type='path'),
virtualenv_site_packages=dict(default=False, type='bool'),
virtualenv_command=dict(default='virtualenv', type='path'),
virtualenv_python=dict(type='str'),
use_mirrors=dict(default=True, type='bool'),
extra_args=dict(type='str'),
editable=dict(default=False, type='bool'),
chdir=dict(type='path'),
executable=dict(type='path'),
umask=dict(type='str'),
),
required_one_of=[['name', 'requirements']],
mutually_exclusive=[['name', 'requirements'], ['executable', 'virtualenv']],
supports_check_mode=True
)
state = module.params['state']
name = module.params['name']
version = module.params['version']
requirements = module.params['requirements']
extra_args = module.params['extra_args']
virtualenv_python = module.params['virtualenv_python']
chdir = module.params['chdir']
umask = module.params['umask']
if umask and not isinstance(umask, int):
try:
umask = int(umask, 8)
except Exception:
module.fail_json(msg="umask must be an octal integer",
details=to_native(sys.exc_info()[1]))
old_umask = None
if umask is not None:
old_umask = os.umask(umask)
try:
if state == 'latest' and version is not None:
module.fail_json(msg='version is incompatible with state=latest')
if chdir is None:
# this is done to avoid permissions issues with privilege escalation and virtualenvs
chdir = tempfile.gettempdir()
err = ''
out = ''
env = module.params['virtualenv']
if env:
if not os.path.exists(os.path.join(env, 'bin', 'activate')):
if module.check_mode:
module.exit_json(changed=True)
cmd = module.params['virtualenv_command']
if os.path.basename(cmd) == cmd:
cmd = module.get_bin_path(cmd, True)
if module.params['virtualenv_site_packages']:
cmd += ' --system-site-packages'
else:
cmd_opts = _get_cmd_options(module, cmd)
if '--no-site-packages' in cmd_opts:
cmd += ' --no-site-packages'
# -p is a virtualenv option, not compatible with pyenv or venv
# this if validates if the command being used is not any of them
if not any(ex in module.params['virtualenv_command'] for ex in ('pyvenv', '-m venv')):
if virtualenv_python:
cmd += ' -p%s' % virtualenv_python
elif PY3:
# Ubuntu currently has a patch making virtualenv always
# try to use python2. Since Ubuntu16 works without
# python2 installed, this is a problem. This code mimics
# the upstream behaviour of using the python which invoked
# virtualenv to determine which python is used inside of
# the virtualenv (when none are specified).
cmd += ' -p%s' % sys.executable
# if venv or pyvenv are used and virtualenv_python is defined, then
# virtualenv_python is ignored, this has to be acknowledged
elif module.params['virtualenv_python']:
module.fail_json(
msg='virtualenv_python should not be used when'
' using the venv module or pyvenv as virtualenv_command'
)
cmd = "%s %s" % (cmd, env)
rc, out_venv, err_venv = module.run_command(cmd, cwd=chdir)
out += out_venv
err += err_venv
if rc != 0:
_fail(module, cmd, out, err)
pip = _get_pip(module, env, module.params['executable'])
cmd = '%s %s' % (pip, state_map[state])
# If there's a virtualenv we want things we install to be able to use other
# installations that exist as binaries within this virtualenv. Example: we
# install cython and then gevent -- gevent needs to use the cython binary,
# not just a python package that will be found by calling the right python.
# So if there's a virtualenv, we add that bin/ to the beginning of the PATH
# in run_command by setting path_prefix here.
path_prefix = None
if env:
path_prefix = "/".join(pip.split('/')[:-1])
# Automatically apply -e option to extra_args when source is a VCS url. VCS
# includes those beginning with svn+, git+, hg+ or bzr+
has_vcs = False
if name:
for pkg in name:
if bool(pkg and re.match(r'(svn|git|hg|bzr)\+', pkg)):
has_vcs = True
break
if module.params['editable']:
args_list = [] # used if extra_args is not used at all
if extra_args:
args_list = extra_args.split(' ')
if '-e' not in args_list:
args_list.append('-e')
# Ok, we will reconstruct the option string
extra_args = ' '.join(args_list)
if extra_args:
cmd += ' %s' % extra_args
if name:
for pkg in name:
cmd += ' %s' % _get_full_name(pkg, version)
else:
if requirements:
cmd += ' -r %s' % requirements
if module.check_mode:
if extra_args or requirements or state == 'latest' or not name:
module.exit_json(changed=True)
pkg_cmd, out_pip, err_pip = _get_packages(module, pip, chdir)
out += out_pip
err += err_pip
changed = False
if name:
pkg_list = [p for p in out.split('\n') if not p.startswith('You are using') and not p.startswith('You should consider') and p]
if pkg_cmd.endswith(' freeze') and ('pip' in name or 'setuptools' in name):
# Older versions of pip (pre-1.3) do not have pip list.
# pip freeze does not list setuptools or pip in its output
# So we need to get those via a specialcase
for pkg in ('setuptools', 'pip'):
if pkg in name:
formatted_dep = _get_package_info(module, pkg, env)
if formatted_dep is not None:
pkg_list.append(formatted_dep)
out += '%s\n' % formatted_dep
for pkg in name:
is_present = _is_present(pkg, version, pkg_list, pkg_cmd)
if (state == 'present' and not is_present) or (state == 'absent' and is_present):
changed = True
break
module.exit_json(changed=changed, cmd=pkg_cmd, stdout=out, stderr=err)
out_freeze_before = None
if requirements or has_vcs:
_, out_freeze_before, _ = _get_packages(module, pip, chdir)
rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=chdir)
out += out_pip
err += err_pip
if rc == 1 and state == 'absent' and \
('not installed' in out_pip or 'not installed' in err_pip):
pass # rc is 1 when attempting to uninstall non-installed package
elif rc != 0:
_fail(module, cmd, out, err)
if state == 'absent':
changed = 'Successfully uninstalled' in out_pip
else:
if out_freeze_before is None:
changed = 'Successfully installed' in out_pip
else:
_, out_freeze_after, _ = _get_packages(module, pip, chdir)
changed = out_freeze_before != out_freeze_after
module.exit_json(changed=changed, cmd=cmd, name=name, version=version,
state=state, requirements=requirements, virtualenv=env,
stdout=out, stderr=err)
finally:
if old_umask is not None:
os.umask(old_umask)
if __name__ == '__main__':
main()
| gpl-3.0 |
kgao/MediaDrop | mediadrop/controllers/login.py | 1 | 5182 | # This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2014 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
from formencode import Invalid
from pylons import request, tmpl_context
from mediadrop.forms.login import LoginForm
from mediadrop.lib.base import BaseController
from mediadrop.lib.helpers import redirect, url_for
from mediadrop.lib.i18n import _
from mediadrop.lib.decorators import expose, observable
from mediadrop.lib.routing_helpers import dispatch_info_for_url, is_url_for_mediadrop_domain
from mediadrop.plugin import events
import logging
log = logging.getLogger(__name__)
login_form = LoginForm()
class LoginController(BaseController):
@expose('login.html')
@observable(events.LoginController.login)
def login(self, came_from=None, **kwargs):
if request.environ.get('repoze.who.identity'):
redirect(came_from or '/')
# the friendlyform plugin requires that these values are set in the
# query string
form_url = url_for('/login/submit',
came_from=(came_from or '').encode('utf-8'),
__logins=str(self._is_failed_login()))
login_errors = None
if self._is_failed_login():
login_errors = Invalid('dummy', None, {}, error_dict={
'_form': Invalid(_('Invalid username or password.'), None, {}),
'login': Invalid('dummy', None, {}),
'password': Invalid('dummy', None, {}),
})
return dict(
login_form = login_form,
form_action = form_url,
form_values = kwargs,
login_errors = login_errors,
)
@expose()
def login_handler(self):
"""This is a dummy method.
Without a dummy method, Routes will throw a NotImplemented exception.
Calls that would route to this method are intercepted by
repoze.who, as defined in mediadrop.lib.auth
"""
pass
@expose()
def logout_handler(self):
"""This is a dummy method.
Without a dummy method, Routes will throw a NotImplemented exception.
Calls that would route to this method are intercepted by
repoze.who, as defined in mediadrop.lib.auth
"""
pass
@expose()
@observable(events.LoginController.post_login)
def post_login(self, came_from=None, **kwargs):
if not request.identity:
# The FriendlyForm plugin will always issue a redirect to
# /login/continue (post login url) even for failed logins.
# If 'came_from' is a protected page (i.e. /admin) we could just
# redirect there and the login form will be displayed again with
# our login error message.
# However if the user tried to login from the front page, this
# mechanism doesn't work so go to the login method directly here.
self._increase_number_of_failed_logins()
return self.login(came_from=came_from)
if came_from:
url_mapper = request.environ['routes.url'].mapper
target = dispatch_info_for_url(came_from, url_mapper)
if not is_url_for_mediadrop_domain(came_from):
log.debug('no redirect to %r because target url does match our hostname (prevents parameter base redirection attacks)' % came_from)
came_from = None
elif (target is not None) and getattr(target.action, '_request_method', None) not in ('GET', None):
log.debug('no redirect to %r because target url does not allow GET requests' % came_from)
came_from = None
if came_from:
redirect(came_from)
# It is important to return absolute URLs (if app mounted in subdirectory)
if request.perm.contains_permission(u'edit') or request.perm.contains_permission(u'admin'):
redirect(url_for('/admin', qualified=True))
redirect(url_for('/', qualified=True))
@expose()
@observable(events.LoginController.post_logout)
def post_logout(self, came_from=None, **kwargs):
redirect('/')
def _is_failed_login(self):
# repoze.who.logins will always be an integer even if the HTTP login
# counter variable contained a non-digit string
return (request.environ.get('repoze.who.logins', 0) > 0)
def _increase_number_of_failed_logins(self):
request.environ['repoze.who.logins'] += 1
def __call__(self, environ, start_response):
"""Invoke the Controller"""
# BaseController.__call__ dispatches to the Controller method
# the request is routed to. This routing information is
# available in environ['pylons.routes_dict']
request.identity = request.environ.get('repoze.who.identity')
tmpl_context.identity = request.identity
return BaseController.__call__(self, environ, start_response)
| gpl-3.0 |
procangroup/edx-platform | pavelib/paver_tests/test_extract_and_generate.py | 15 | 4704 | """
This test tests that i18n extraction (`paver i18n_extract -v`) works properly.
"""
import os
import random
import re
import string
import subprocess
import sys
from datetime import datetime, timedelta
from unittest import TestCase
from i18n import config, dummy, extract, generate
from polib import pofile
from pytz import UTC
class TestGenerate(TestCase):
"""
Tests functionality of i18n/generate.py
"""
generated_files = ('django-partial.po', 'djangojs-partial.po', 'mako.po')
@classmethod
def setUpClass(cls):
super(TestGenerate, cls).setUpClass()
sys.stderr.write(
"\nThis test tests that i18n extraction (`paver i18n_extract`) works properly. "
"If you experience failures, please check that all instances of `gettext` and "
"`ngettext` are used correctly. You can also try running `paver i18n_extract -v` "
"locally for more detail.\n"
)
sys.stderr.write(
"\nExtracting i18n strings and generating dummy translations; "
"this may take a few minutes\n"
)
sys.stderr.flush()
extract.main(verbose=0)
dummy.main(verbose=0)
@classmethod
def tearDownClass(cls):
# Clear the Esperanto & RTL directories of any test artifacts
cmd = "git checkout conf/locale/eo conf/locale/rtl"
sys.stderr.write("Cleaning up dummy language directories: " + cmd)
sys.stderr.flush()
returncode = subprocess.call(cmd, shell=True)
assert returncode == 0
super(TestGenerate, cls).tearDownClass()
def setUp(self):
super(TestGenerate, self).setUp()
self.configuration = config.Configuration()
# Subtract 1 second to help comparisons with file-modify time succeed,
# since os.path.getmtime() is not millisecond-accurate
self.start_time = datetime.now(UTC) - timedelta(seconds=1)
def test_merge(self):
"""
Tests merge script on English source files.
"""
filename = os.path.join(self.configuration.source_messages_dir, random_name())
generate.merge(self.configuration, self.configuration.source_locale, target=filename)
self.assertTrue(os.path.exists(filename))
os.remove(filename)
def test_main(self):
"""
Runs generate.main() which should merge source files,
then compile all sources in all configured languages.
Validates output by checking all .mo files in all configured languages.
.mo files should exist, and be recently created (modified
after start of test suite)
"""
# Change dummy_locales to not have Esperanto present.
self.configuration.dummy_locales = ['fake2']
generate.main(verbosity=0, strict=False)
for locale in self.configuration.translated_locales:
for filename in ('django', 'djangojs'):
mofile = filename + '.mo'
path = os.path.join(self.configuration.get_messages_dir(locale), mofile)
exists = os.path.exists(path)
self.assertTrue(exists, msg='Missing file in locale %s: %s' % (locale, mofile))
self.assertGreaterEqual(
datetime.fromtimestamp(os.path.getmtime(path), UTC),
self.start_time,
msg='File not recently modified: %s' % path
)
# Segmenting means that the merge headers don't work they way they
# used to, so don't make this check for now. I'm not sure if we'll
# get the merge header back eventually, or delete this code eventually.
# self.assert_merge_headers(locale)
def assert_merge_headers(self, locale):
"""
This is invoked by test_main to ensure that it runs after
calling generate.main().
There should be exactly three merge comment headers
in our merged .po file. This counts them to be sure.
A merge comment looks like this:
# #-#-#-#-# django-partial.po (0.1a) #-#-#-#-#
"""
path = os.path.join(self.configuration.get_messages_dir(locale), 'django.po')
pof = pofile(path)
pattern = re.compile('^#-#-#-#-#', re.M)
match = pattern.findall(pof.header)
self.assertEqual(
len(match),
3,
msg="Found %s (should be 3) merge comments in the header for %s" % (len(match), path)
)
def random_name(size=6):
"""Returns random filename as string, like test-4BZ81W"""
chars = string.ascii_uppercase + string.digits
return 'test-' + ''.join(random.choice(chars) for x in range(size))
| agpl-3.0 |
KyleJamesWalker/ansible | lib/ansible/modules/cloud/openstack/os_router.py | 27 | 14113 | #!/usr/bin/python
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_router
short_description: Create or delete routers from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "David Shrewsbury (@Shrews)"
description:
- Create or Delete routers from OpenStack. Although Neutron allows
routers to share the same name, this module enforces name uniqueness
to be more user friendly.
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name to be give to the router
required: true
admin_state_up:
description:
- Desired admin state of the created or existing router.
required: false
default: true
enable_snat:
description:
- Enable Source NAT (SNAT) attribute.
required: false
default: true
network:
description:
- Unique name or ID of the external gateway network.
- required I(interfaces) or I(enable_snat) are provided.
required: false
default: None
project:
description:
- Unique name or ID of the project.
required: false
default: None
version_added: "2.2"
external_fixed_ips:
description:
- The IP address parameters for the external gateway network. Each
is a dictionary with the subnet name or ID (subnet) and the IP
address to assign on the subnet (ip). If no IP is specified,
one is automatically assigned from that subnet.
required: false
default: None
interfaces:
description:
- List of subnets to attach to the router internal interface.
required: false
default: None
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements: ["shade"]
'''
EXAMPLES = '''
# Create a simple router, not attached to a gateway or subnets.
- os_router:
cloud: mycloud
state: present
name: simple_router
# Create a simple router, not attached to a gateway or subnets for a given project.
- os_router:
cloud: mycloud
state: present
name: simple_router
project: myproj
# Creates a router attached to ext_network1 on an IPv4 subnet and one
# internal subnet interface.
- os_router:
cloud: mycloud
state: present
name: router1
network: ext_network1
external_fixed_ips:
- subnet: public-subnet
ip: 172.24.4.2
interfaces:
- private-subnet
# Update existing router1 external gateway to include the IPv6 subnet.
# Note that since 'interfaces' is not provided, any existing internal
# interfaces on an existing router will be left intact.
- os_router:
cloud: mycloud
state: present
name: router1
network: ext_network1
external_fixed_ips:
- subnet: public-subnet
ip: 172.24.4.2
- subnet: ipv6-public-subnet
ip: 2001:db8::3
# Delete router1
- os_router:
cloud: mycloud
state: absent
name: router1
'''
RETURN = '''
router:
description: Dictionary describing the router.
returned: On success when I(state) is 'present'
type: dictionary
contains:
id:
description: Router ID.
type: string
sample: "474acfe5-be34-494c-b339-50f06aa143e4"
name:
description: Router name.
type: string
sample: "router1"
admin_state_up:
description: Administrative state of the router.
type: boolean
sample: true
status:
description: The router status.
type: string
sample: "ACTIVE"
tenant_id:
description: The tenant ID.
type: string
sample: "861174b82b43463c9edc5202aadc60ef"
external_gateway_info:
description: The external gateway parameters.
type: dictionary
sample: {
"enable_snat": true,
"external_fixed_ips": [
{
"ip_address": "10.6.6.99",
"subnet_id": "4272cb52-a456-4c20-8f3c-c26024ecfa81"
}
]
}
routes:
description: The extra routes configuration for L3 router.
type: list
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def _needs_update(cloud, module, router, network, internal_subnet_ids):
"""Decide if the given router needs an update.
"""
if router['admin_state_up'] != module.params['admin_state_up']:
return True
if router['external_gateway_info']:
if router['external_gateway_info'].get('enable_snat', True) != module.params['enable_snat']:
return True
if network:
if not router['external_gateway_info']:
return True
elif router['external_gateway_info']['network_id'] != network['id']:
return True
# check external interfaces
if module.params['external_fixed_ips']:
for new_iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(new_iface['subnet'])
exists = False
# compare the requested interface with existing, looking for an existing match
for existing_iface in router['external_gateway_info']['external_fixed_ips']:
if existing_iface['subnet_id'] == subnet['id']:
if 'ip' in new_iface:
if existing_iface['ip_address'] == new_iface['ip']:
# both subnet id and ip address match
exists = True
break
else:
# only the subnet was given, so ip doesn't matter
exists = True
break
# this interface isn't present on the existing router
if not exists:
return True
# check internal interfaces
if module.params['interfaces']:
existing_subnet_ids = []
for port in cloud.list_router_interfaces(router, 'internal'):
if 'fixed_ips' in port:
for fixed_ip in port['fixed_ips']:
existing_subnet_ids.append(fixed_ip['subnet_id'])
if set(internal_subnet_ids) != set(existing_subnet_ids):
return True
return False
def _system_state_change(cloud, module, router, network, internal_ids):
"""Check if the system state would be changed."""
state = module.params['state']
if state == 'absent' and router:
return True
if state == 'present':
if not router:
return True
return _needs_update(cloud, module, router, network, internal_ids)
return False
def _build_kwargs(cloud, module, router, network):
kwargs = {
'admin_state_up': module.params['admin_state_up'],
}
if router:
kwargs['name_or_id'] = router['id']
else:
kwargs['name'] = module.params['name']
if network:
kwargs['ext_gateway_net_id'] = network['id']
# can't send enable_snat unless we have a network
kwargs['enable_snat'] = module.params['enable_snat']
if module.params['external_fixed_ips']:
kwargs['ext_fixed_ips'] = []
for iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(iface['subnet'])
d = {'subnet_id': subnet['id']}
if 'ip' in iface:
d['ip_address'] = iface['ip']
kwargs['ext_fixed_ips'].append(d)
return kwargs
def _validate_subnets(module, cloud):
external_subnet_ids = []
internal_subnet_ids = []
if module.params['external_fixed_ips']:
for iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(iface['subnet'])
if not subnet:
module.fail_json(msg='subnet %s not found' % iface['subnet'])
external_subnet_ids.append(subnet['id'])
if module.params['interfaces']:
for iface in module.params['interfaces']:
subnet = cloud.get_subnet(iface)
if not subnet:
module.fail_json(msg='subnet %s not found' % iface)
internal_subnet_ids.append(subnet['id'])
return external_subnet_ids, internal_subnet_ids
def main():
argument_spec = openstack_full_argument_spec(
state=dict(default='present', choices=['absent', 'present']),
name=dict(required=True),
admin_state_up=dict(type='bool', default=True),
enable_snat=dict(type='bool', default=True),
network=dict(default=None),
interfaces=dict(type='list', default=None),
external_fixed_ips=dict(type='list', default=None),
project=dict(default=None)
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['project'] and
StrictVersion(shade.__version__) <= StrictVersion('1.9.0')):
module.fail_json(msg="To utilize project, the installed version of"
"the shade library MUST be > 1.9.0")
state = module.params['state']
name = module.params['name']
network = module.params['network']
project = module.params['project']
if module.params['external_fixed_ips'] and not network:
module.fail_json(msg='network is required when supplying external_fixed_ips')
try:
cloud = shade.openstack_cloud(**module.params)
if project is not None:
proj = cloud.get_project(project)
if proj is None:
module.fail_json(msg='Project %s could not be found' % project)
project_id = proj['id']
filters = {'tenant_id': project_id}
else:
project_id = None
filters = None
router = cloud.get_router(name, filters=filters)
net = None
if network:
net = cloud.get_network(network)
if not net:
module.fail_json(msg='network %s not found' % network)
# Validate and cache the subnet IDs so we can avoid duplicate checks
# and expensive API calls.
external_ids, internal_ids = _validate_subnets(module, cloud)
if module.check_mode:
module.exit_json(
changed=_system_state_change(cloud, module, router, net, internal_ids)
)
if state == 'present':
changed = False
if not router:
kwargs = _build_kwargs(cloud, module, router, net)
if project_id:
kwargs['project_id'] = project_id
router = cloud.create_router(**kwargs)
for internal_subnet_id in internal_ids:
cloud.add_router_interface(router, subnet_id=internal_subnet_id)
changed = True
else:
if _needs_update(cloud, module, router, net, internal_ids):
kwargs = _build_kwargs(cloud, module, router, net)
updated_router = cloud.update_router(**kwargs)
# Protect against update_router() not actually
# updating the router.
if not updated_router:
changed = False
# On a router update, if any internal interfaces were supplied,
# just detach all existing internal interfaces and attach the new.
elif internal_ids:
router = updated_router
ports = cloud.list_router_interfaces(router, 'internal')
for port in ports:
cloud.remove_router_interface(router, port_id=port['id'])
for internal_subnet_id in internal_ids:
cloud.add_router_interface(router, subnet_id=internal_subnet_id)
changed = True
module.exit_json(changed=changed,
router=router,
id=router['id'])
elif state == 'absent':
if not router:
module.exit_json(changed=False)
else:
# We need to detach all internal interfaces on a router before
# we will be allowed to delete it.
ports = cloud.list_router_interfaces(router, 'internal')
router_id = router['id']
for port in ports:
cloud.remove_router_interface(router, port_id=port['id'])
cloud.delete_router(router_id)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
fireplume/tennis-club | club/tests.py | 1 | 2690 | from django.test import TestCase
from club.models import *
class ClubTestCase(TestCase):
def test_model_creation(self):
jdoe_address = Address.objects.create(address="123 MyStreet apt 666",
city='MyCity',
state_province='MyProvince',
country='MyCountry',
postal_zip_code='123456')
club_address = Address.objects.create(address="456 OtherStreet",
city='ClubsCity',
state_province='ClubsProvince',
country='ClubsCountry',
postal_zip_code='7890123')
club = Club.objects.create(name='Tennis Club',
creation_date='1968-05-21',
address=club_address,
details="Fun Club.")
Staff.objects.create(club=club,
first_name='Staff',
last_name='Staffee',
title='President',
details="Doing business",
email="staff@staffee.com",
phone="000-000-0000")
jdoe_level = Level.objects.create(level=Level.LEVEL_INTERMEDIATE,
rating=Level.RATING_4_0)
jdoe = Member.objects.create(club=club,
first_name='John',
last_name='Doe',
age=25,
address=jdoe_address,
email='jdoe@doe.com',
level=jdoe_level,
gender=Member.GENDER_MALE)
jdoe_mship = Membership.objects.create(member=jdoe,
start="2010-01-01",
expiry="2010-01-31")
program = Program.objects.create(program="Men's A League",
start="2010-01-01",
expiry="2010-01-31",
fee="10",
details="Advanced men's league")
ProgramSubscriber.objects.create(membership=jdoe_mship,
program=program,
paid=True)
| gpl-3.0 |
GunoH/intellij-community | python/testData/inspections/PyUnresolvedReferencesInspection/operators.py | 83 | 8516 | class C(object):
def __add__(self, other):
return int(other)
def __or__(self, other):
return other
def __rsub__(self, other):
return other
def __neg__(self):
return self
def test_object():
o1 = object()
o2 = object()
o1 <warning descr="Class 'object' does not define '__add__', so the '+' operator cannot be used on its instances">+</warning> o2 #fail
o1 <warning descr="Class 'object' does not define '__sub__', so the '-' operator cannot be used on its instances">-</warning> o2 #fail
o1 <warning descr="Class 'object' does not define '__mul__', so the '*' operator cannot be used on its instances">*</warning> o2 #fail
o1 <warning descr="Class 'object' does not define '__div__', so the '/' operator cannot be used on its instances">/</warning> o2 #fail
o1 <warning descr="Class 'object' does not define '__mod__', so the '%' operator cannot be used on its instances">%</warning> o2 #fail
o1 <warning descr="Class 'object' does not define '__pow__', so the '**' operator cannot be used on its instances">**</warning> o2 #fail
o1 <warning descr="Class 'object' does not define '__lshift__', so the '<<' operator cannot be used on its instances"><<</warning> o2 #fail
o1 <warning descr="Class 'object' does not define '__rshift__', so the '>>' operator cannot be used on its instances">>></warning> o2 #fail
o1 <warning descr="Class 'object' does not define '__and__', so the '&' operator cannot be used on its instances">&</warning> o2 #fail
o1 <warning descr="Class 'object' does not define '__or__', so the '|' operator cannot be used on its instances">|</warning> o2 #fail
o1 <warning descr="Class 'object' does not define '__xor__', so the '^' operator cannot be used on its instances">^</warning> o2 #fail
o1 <warning descr="Class 'object' does not define '__floordiv__', so the '//' operator cannot be used on its instances">//</warning> o2 #fail
o2 < o1 < o2, o2 <= o1 <= o2, o1 == o2, o1 != o2, o2 in o1 #pass
def test_custom_class():
c = C()
o = object()
c + o, c | o, o - c #pass
c <warning descr="Class 'C' does not define '__sub__', so the '-' operator cannot be used on its instances">-</warning> o #fail
c <warning descr="Class 'C' does not define '__mul__', so the '*' operator cannot be used on its instances">*</warning> o #fail
c <warning descr="Class 'C' does not define '__div__', so the '/' operator cannot be used on its instances">/</warning> o #fail
c <warning descr="Class 'C' does not define '__mod__', so the '%' operator cannot be used on its instances">%</warning> o #fail
c <warning descr="Class 'C' does not define '__pow__', so the '**' operator cannot be used on its instances">**</warning> o #fail
c <warning descr="Class 'C' does not define '__lshift__', so the '<<' operator cannot be used on its instances"><<</warning> o #fail
c <warning descr="Class 'C' does not define '__rshift__', so the '>>' operator cannot be used on its instances">>></warning> o #fail
c <warning descr="Class 'C' does not define '__and__', so the '&' operator cannot be used on its instances">&</warning> o #fail
c <warning descr="Class 'C' does not define '__xor__', so the '^' operator cannot be used on its instances">^</warning> o #fail
c <warning descr="Class 'C' does not define '__floordiv__', so the '//' operator cannot be used on its instances">//</warning> o #fail
o < c < o, o <= c <= o, c == o, c != o, o in c #pass
def test_builtins():
i = 0
o = object()
i + o, i - o, i * o, i / o, i % o, i ** o, i << o, i >> o, i & o, i | o, i ^ o, i // o #pass
o < i < o, o <= i <= o, i == o, i != o, o in i #pass
o + i, o - i, o * i, o / i, o % i, o ** i, o << i, o >> i, o & i, o | i, o ^ i, o // i #pass
s = 'foo'
s + o, s * o, s % o #pass
s <warning descr="Class 'str' does not define '__sub__', so the '-' operator cannot be used on its instances">-</warning> o #fail
s <warning descr="Class 'str' does not define '__div__', so the '/' operator cannot be used on its instances">/</warning> o #fail
s <warning descr="Class 'str' does not define '__pow__', so the '**' operator cannot be used on its instances">**</warning> o #fail
s <warning descr="Class 'str' does not define '__lshift__', so the '<<' operator cannot be used on its instances"><<</warning> o #fail
s <warning descr="Class 'str' does not define '__rshift__', so the '>>' operator cannot be used on its instances">>></warning> o #fail
s <warning descr="Class 'str' does not define '__and__', so the '&' operator cannot be used on its instances">&</warning> o #fail
s <warning descr="Class 'str' does not define '__or__', so the '|' operator cannot be used on its instances">|</warning> o #fail
s <warning descr="Class 'str' does not define '__xor__', so the '^' operator cannot be used on its instances">^</warning> o #fail
s <warning descr="Class 'str' does not define '__floordiv__', so the '//' operator cannot be used on its instances">//</warning> o #fail
o < s < o, o <= s <= o, s == o, s != o, o in s #pass
xs = []
xs + o, xs * o #pass
xs <warning descr="Class 'list' does not define '__sub__', so the '-' operator cannot be used on its instances">-</warning> o #fail
xs <warning descr="Class 'list' does not define '__div__', so the '/' operator cannot be used on its instances">/</warning> o #fail
xs <warning descr="Class 'list' does not define '__mod__', so the '%' operator cannot be used on its instances">%</warning> o #fail
xs <warning descr="Class 'list' does not define '__pow__', so the '**' operator cannot be used on its instances">**</warning> o #fail
xs <warning descr="Class 'list' does not define '__lshift__', so the '<<' operator cannot be used on its instances"><<</warning> o #fail
xs <warning descr="Class 'list' does not define '__rshift__', so the '>>' operator cannot be used on its instances">>></warning> o #fail
xs <warning descr="Class 'list' does not define '__and__', so the '&' operator cannot be used on its instances">&</warning> o #fail
xs <warning descr="Class 'list' does not define '__or__', so the '|' operator cannot be used on its instances">|</warning> o #fail
xs <warning descr="Class 'list' does not define '__xor__', so the '^' operator cannot be used on its instances">^</warning> o #fail
xs <warning descr="Class 'list' does not define '__floordiv__', so the '//' operator cannot be used on its instances">//</warning> o #fail
o < xs < o, o <= xs <= o, xs == o, xs != o, o in xs #pass
def test_subscription():
class C(object):
def __getitem__(self, key, value):
pass
def __setitem__(self, item):
pass
def __delitem__(self, item):
pass
class D(object):
pass
class E(object):
def __getitem__(self, item):
pass
c = C()
c[0] = 0
print(c[0])
del c[0]
d = D()
d<warning descr="Class 'D' does not define '__setitem__', so the '[]' operator cannot be used on its instances">[</warning>0] = 0
print(d<warning descr="Class 'D' does not define '__getitem__', so the '[]' operator cannot be used on its instances">[</warning>0])
del d<warning descr="Class 'D' does not define '__delitem__', so the '[]' operator cannot be used on its instances">[</warning>0]
e = E()
e<warning descr="Class 'E' does not define '__setitem__', so the '[]' operator cannot be used on its instances">[</warning>0] = 0
print(e[0])
del e<warning descr="Class 'E' does not define '__delitem__', so the '[]' operator cannot be used on its instances">[</warning>0]
def test_unary_operators():
o = object()
print(<warning descr="Class 'object' does not define '__pos__', so the '+' operator cannot be used on its instances">+</warning>o)
print(<warning descr="Class 'object' does not define '__neg__', so the '-' operator cannot be used on its instances">-</warning>o)
print(<warning descr="Class 'object' does not define '__invert__', so the '~' operator cannot be used on its instances">~</warning>o)
i = 1
print(+i)
print(-i)
print(~i)
c = C()
print(<warning descr="Class 'C' does not define '__pos__', so the '+' operator cannot be used on its instances">+</warning>c)
print(-c)
print(<warning descr="Class 'C' does not define '__invert__', so the '~' operator cannot be used on its instances">~</warning>c) | apache-2.0 |
elaske/mufund | html5lib/tests/test_treewalkers.py | 429 | 13692 | from __future__ import absolute_import, division, unicode_literals
import os
import sys
import unittest
import warnings
from difflib import unified_diff
try:
unittest.TestCase.assertEqual
except AttributeError:
unittest.TestCase.assertEqual = unittest.TestCase.assertEquals
from .support import get_data_files, TestData, convertExpected
from html5lib import html5parser, treewalkers, treebuilders, constants
def PullDOMAdapter(node):
from xml.dom import Node
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, COMMENT, CHARACTERS
if node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
for childNode in node.childNodes:
for event in PullDOMAdapter(childNode):
yield event
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
raise NotImplementedError("DOCTYPE nodes are not supported by PullDOM")
elif node.nodeType == Node.COMMENT_NODE:
yield COMMENT, node
elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
yield CHARACTERS, node
elif node.nodeType == Node.ELEMENT_NODE:
yield START_ELEMENT, node
for childNode in node.childNodes:
for event in PullDOMAdapter(childNode):
yield event
yield END_ELEMENT, node
else:
raise NotImplementedError("Node type not supported: " + str(node.nodeType))
treeTypes = {
"DOM": {"builder": treebuilders.getTreeBuilder("dom"),
"walker": treewalkers.getTreeWalker("dom")},
"PullDOM": {"builder": treebuilders.getTreeBuilder("dom"),
"adapter": PullDOMAdapter,
"walker": treewalkers.getTreeWalker("pulldom")},
}
# Try whatever etree implementations are available from a list that are
#"supposed" to work
try:
import xml.etree.ElementTree as ElementTree
except ImportError:
pass
else:
treeTypes['ElementTree'] = \
{"builder": treebuilders.getTreeBuilder("etree", ElementTree),
"walker": treewalkers.getTreeWalker("etree", ElementTree)}
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
pass
else:
treeTypes['cElementTree'] = \
{"builder": treebuilders.getTreeBuilder("etree", ElementTree),
"walker": treewalkers.getTreeWalker("etree", ElementTree)}
try:
import lxml.etree as ElementTree # flake8: noqa
except ImportError:
pass
else:
treeTypes['lxml_native'] = \
{"builder": treebuilders.getTreeBuilder("lxml"),
"walker": treewalkers.getTreeWalker("lxml")}
try:
from genshi.core import QName, Attrs
from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
except ImportError:
pass
else:
def GenshiAdapter(tree):
text = None
for token in treewalkers.getTreeWalker("dom")(tree):
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
if text is None:
text = token["data"]
else:
text += token["data"]
elif text is not None:
yield TEXT, text, (None, -1, -1)
text = None
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
for attr, value in token["data"].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield END, QName(name), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],
token["systemId"]), (None, -1, -1)
else:
pass # FIXME: What to do?
if text is not None:
yield TEXT, text, (None, -1, -1)
treeTypes["genshi"] = \
{"builder": treebuilders.getTreeBuilder("dom"),
"adapter": GenshiAdapter,
"walker": treewalkers.getTreeWalker("genshi")}
def concatenateCharacterTokens(tokens):
charactersToken = None
for token in tokens:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
if charactersToken is None:
charactersToken = {"type": "Characters", "data": token["data"]}
else:
charactersToken["data"] += token["data"]
else:
if charactersToken is not None:
yield charactersToken
charactersToken = None
yield token
if charactersToken is not None:
yield charactersToken
def convertTokens(tokens):
output = []
indent = 0
for token in concatenateCharacterTokens(tokens):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
if (token["namespace"] and
token["namespace"] != constants.namespaces["html"]):
if token["namespace"] in constants.prefixes:
name = constants.prefixes[token["namespace"]]
else:
name = token["namespace"]
name += " " + token["name"]
else:
name = token["name"]
output.append("%s<%s>" % (" " * indent, name))
indent += 2
attrs = token["data"]
if attrs:
# TODO: Remove this if statement, attrs should always exist
for (namespace, name), value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
outputname = constants.prefixes[namespace]
else:
outputname = namespace
outputname += " " + name
else:
outputname = name
output.append("%s%s=\"%s\"" % (" " * indent, outputname, value))
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
(" " * indent, token["name"],
token["publicId"],
token["systemId"] and token["systemId"] or ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">""" %
(" " * indent, token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>" % (" " * indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" " * indent,))
elif type in ("Characters", "SpaceCharacters"):
output.append("%s\"%s\"" % (" " * indent, token["data"]))
else:
pass # TODO: what to do with errors?
return "\n".join(output)
import re
attrlist = re.compile(r"^(\s+)\w+=.*(\n\1\w+=.*)+", re.M)
def sortattrs(x):
lines = x.group(0).split("\n")
lines.sort()
return "\n".join(lines)
class TokenTestCase(unittest.TestCase):
def test_all_tokens(self):
expected = [
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'html'},
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'head'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'head'},
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'body'},
{'data': 'a', 'type': 'Characters'},
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'div'},
{'data': 'b', 'type': 'Characters'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'div'},
{'data': 'c', 'type': 'Characters'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'body'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'html'}
]
for treeName, treeCls in treeTypes.items():
p = html5parser.HTMLParser(tree=treeCls["builder"])
document = p.parse("<html><head></head><body>a<div>b</div>c</body></html>")
document = treeCls.get("adapter", lambda x: x)(document)
output = treeCls["walker"](document)
for expectedToken, outputToken in zip(expected, output):
self.assertEqual(expectedToken, outputToken)
def runTreewalkerTest(innerHTML, input, expected, errors, treeClass):
warnings.resetwarnings()
warnings.simplefilter("error")
try:
p = html5parser.HTMLParser(tree=treeClass["builder"])
if innerHTML:
document = p.parseFragment(input, innerHTML)
else:
document = p.parse(input)
except constants.DataLossWarning:
# Ignore testcases we know we don't pass
return
document = treeClass.get("adapter", lambda x: x)(document)
try:
output = convertTokens(treeClass["walker"](document))
output = attrlist.sub(sortattrs, output)
expected = attrlist.sub(sortattrs, convertExpected(expected))
diff = "".join(unified_diff([line + "\n" for line in expected.splitlines()],
[line + "\n" for line in output.splitlines()],
"Expected", "Received"))
assert expected == output, "\n".join([
"", "Input:", input,
"", "Expected:", expected,
"", "Received:", output,
"", "Diff:", diff,
])
except NotImplementedError:
pass # Amnesty for those that confess...
def test_treewalker():
sys.stdout.write('Testing tree walkers ' + " ".join(list(treeTypes.keys())) + "\n")
for treeName, treeCls in treeTypes.items():
files = get_data_files('tree-construction')
for filename in files:
testName = os.path.basename(filename).replace(".dat", "")
if testName in ("template",):
continue
tests = TestData(filename, "data")
for index, test in enumerate(tests):
(input, errors,
innerHTML, expected) = [test[key] for key in ("data", "errors",
"document-fragment",
"document")]
errors = errors.split("\n")
yield runTreewalkerTest, innerHTML, input, expected, errors, treeCls
def set_attribute_on_first_child(docfrag, name, value, treeName):
"""naively sets an attribute on the first child of the document
fragment passed in"""
setter = {'ElementTree': lambda d: d[0].set,
'DOM': lambda d: d.firstChild.setAttribute}
setter['cElementTree'] = setter['ElementTree']
try:
setter.get(treeName, setter['DOM'])(docfrag)(name, value)
except AttributeError:
setter['ElementTree'](docfrag)(name, value)
def runTreewalkerEditTest(intext, expected, attrs_to_add, tree):
"""tests what happens when we add attributes to the intext"""
treeName, treeClass = tree
parser = html5parser.HTMLParser(tree=treeClass["builder"])
document = parser.parseFragment(intext)
for nom, val in attrs_to_add:
set_attribute_on_first_child(document, nom, val, treeName)
document = treeClass.get("adapter", lambda x: x)(document)
output = convertTokens(treeClass["walker"](document))
output = attrlist.sub(sortattrs, output)
if not output in expected:
raise AssertionError("TreewalkerEditTest: %s\nExpected:\n%s\nReceived:\n%s" % (treeName, expected, output))
def test_treewalker_six_mix():
"""Str/Unicode mix. If str attrs added to tree"""
# On Python 2.x string literals are of type str. Unless, like this
# file, the programmer imports unicode_literals from __future__.
# In that case, string literals become objects of type unicode.
# This test simulates a Py2 user, modifying attributes on a document
# fragment but not using the u'' syntax nor importing unicode_literals
sm_tests = [
('<a href="http://example.com">Example</a>',
[(str('class'), str('test123'))],
'<a>\n class="test123"\n href="http://example.com"\n "Example"'),
('<link href="http://example.com/cow">',
[(str('rel'), str('alternate'))],
'<link>\n href="http://example.com/cow"\n rel="alternate"\n "Example"')
]
for tree in treeTypes.items():
for intext, attrs, expected in sm_tests:
yield runTreewalkerEditTest, intext, expected, attrs, tree
| gpl-3.0 |
twitchyliquid64/misc-scripts | s3tool/boto/ec2/autoscale/policy.py | 152 | 6230 | # Copyright (c) 2009-2010 Reza Lotun http://reza.lotun.name/
# Copyright (c) 2011 Jann Kleen
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.resultset import ResultSet
from boto.ec2.elb.listelement import ListElement
class Alarm(object):
def __init__(self, connection=None):
self.connection = connection
self.name = None
self.alarm_arn = None
def __repr__(self):
return 'Alarm:%s' % self.name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'AlarmName':
self.name = value
elif name == 'AlarmARN':
self.alarm_arn = value
else:
setattr(self, name, value)
class AdjustmentType(object):
def __init__(self, connection=None):
self.connection = connection
self.adjustment_type = None
def __repr__(self):
return 'AdjustmentType:%s' % self.adjustment_type
def startElement(self, name, attrs, connection):
return
def endElement(self, name, value, connection):
if name == 'AdjustmentType':
self.adjustment_type = value
return
class MetricCollectionTypes(object):
class BaseType(object):
arg = ''
def __init__(self, connection):
self.connection = connection
self.val = None
def __repr__(self):
return '%s:%s' % (self.arg, self.val)
def startElement(self, name, attrs, connection):
return
def endElement(self, name, value, connection):
if name == self.arg:
self.val = value
class Metric(BaseType):
arg = 'Metric'
class Granularity(BaseType):
arg = 'Granularity'
def __init__(self, connection=None):
self.connection = connection
self.metrics = []
self.granularities = []
def __repr__(self):
return 'MetricCollectionTypes:<%s, %s>' % (self.metrics, self.granularities)
def startElement(self, name, attrs, connection):
if name == 'Granularities':
self.granularities = ResultSet([('member', self.Granularity)])
return self.granularities
elif name == 'Metrics':
self.metrics = ResultSet([('member', self.Metric)])
return self.metrics
def endElement(self, name, value, connection):
return
class ScalingPolicy(object):
def __init__(self, connection=None, **kwargs):
"""
Scaling Policy
:type name: str
:param name: Name of scaling policy.
:type adjustment_type: str
:param adjustment_type: Specifies the type of adjustment. Valid values are `ChangeInCapacity`, `ExactCapacity` and `PercentChangeInCapacity`.
:type as_name: str or int
:param as_name: Name or ARN of the Auto Scaling Group.
:type scaling_adjustment: int
:param scaling_adjustment: Value of adjustment (type specified in `adjustment_type`).
:type min_adjustment_step: int
:param min_adjustment_step: Value of min adjustment step required to
apply the scaling policy (only make sense when use `PercentChangeInCapacity` as adjustment_type.).
:type cooldown: int
:param cooldown: Time (in seconds) before Alarm related Scaling Activities can start after the previous Scaling Activity ends.
"""
self.name = kwargs.get('name', None)
self.adjustment_type = kwargs.get('adjustment_type', None)
self.as_name = kwargs.get('as_name', None)
self.scaling_adjustment = kwargs.get('scaling_adjustment', None)
self.cooldown = kwargs.get('cooldown', None)
self.connection = connection
self.min_adjustment_step = kwargs.get('min_adjustment_step', None)
def __repr__(self):
return 'ScalingPolicy(%s group:%s adjustment:%s)' % (self.name,
self.as_name,
self.adjustment_type)
def startElement(self, name, attrs, connection):
if name == 'Alarms':
self.alarms = ResultSet([('member', Alarm)])
return self.alarms
def endElement(self, name, value, connection):
if name == 'PolicyName':
self.name = value
elif name == 'AutoScalingGroupName':
self.as_name = value
elif name == 'PolicyARN':
self.policy_arn = value
elif name == 'ScalingAdjustment':
self.scaling_adjustment = int(value)
elif name == 'Cooldown':
self.cooldown = int(value)
elif name == 'AdjustmentType':
self.adjustment_type = value
elif name == 'MinAdjustmentStep':
self.min_adjustment_step = int(value)
def delete(self):
return self.connection.delete_policy(self.name, self.as_name)
class TerminationPolicies(list):
def __init__(self, connection=None, **kwargs):
pass
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'member':
self.append(value)
| mit |
h4ck3rm1k3/ansible | lib/ansible/runner/lookup_plugins/sequence.py | 85 | 6836 | # (c) 2013, Jayson Vantuyl <jayson@aggressive.ly>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.errors import AnsibleError
import ansible.utils as utils
from re import compile as re_compile, IGNORECASE
# shortcut format
NUM = "(0?x?[0-9a-f]+)"
SHORTCUT = re_compile(
"^(" + # Group 0
NUM + # Group 1: Start
"-)?" +
NUM + # Group 2: End
"(/" + # Group 3
NUM + # Group 4: Stride
")?" +
"(:(.+))?$", # Group 5, Group 6: Format String
IGNORECASE
)
class LookupModule(object):
"""
sequence lookup module
Used to generate some sequence of items. Takes arguments in two forms.
The simple / shortcut form is:
[start-]end[/stride][:format]
As indicated by the brackets: start, stride, and format string are all
optional. The format string is in the style of printf. This can be used
to pad with zeros, format in hexadecimal, etc. All of the numerical values
can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8).
Negative numbers are not supported.
Some examples:
5 -> ["1","2","3","4","5"]
5-8 -> ["5", "6", "7", "8"]
2-10/2 -> ["2", "4", "6", "8", "10"]
4:host%02d -> ["host01","host02","host03","host04"]
The standard Ansible key-value form is accepted as well. For example:
start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"]
This format takes an alternate form of "end" called "count", which counts
some number from the starting value. For example:
count=5 -> ["1", "2", "3", "4", "5"]
start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"]
start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"]
start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"]
The count option is mostly useful for avoiding off-by-one errors and errors
calculating the number of entries in a sequence when a stride is specified.
"""
def __init__(self, basedir, **kwargs):
"""absorb any keyword args"""
self.basedir = basedir
def reset(self):
"""set sensible defaults"""
self.start = 1
self.count = None
self.end = None
self.stride = 1
self.format = "%d"
def parse_kv_args(self, args):
"""parse key-value style arguments"""
for arg in ["start", "end", "count", "stride"]:
try:
arg_raw = args.pop(arg, None)
if arg_raw is None:
continue
arg_cooked = int(arg_raw, 0)
setattr(self, arg, arg_cooked)
except ValueError:
raise AnsibleError(
"can't parse arg %s=%r as integer"
% (arg, arg_raw)
)
if 'format' in args:
self.format = args.pop("format")
if args:
raise AnsibleError(
"unrecognized arguments to with_sequence: %r"
% args.keys()
)
def parse_simple_args(self, term):
"""parse the shortcut forms, return True/False"""
match = SHORTCUT.match(term)
if not match:
return False
_, start, end, _, stride, _, format = match.groups()
if start is not None:
try:
start = int(start, 0)
except ValueError:
raise AnsibleError("can't parse start=%s as integer" % start)
if end is not None:
try:
end = int(end, 0)
except ValueError:
raise AnsibleError("can't parse end=%s as integer" % end)
if stride is not None:
try:
stride = int(stride, 0)
except ValueError:
raise AnsibleError("can't parse stride=%s as integer" % stride)
if start is not None:
self.start = start
if end is not None:
self.end = end
if stride is not None:
self.stride = stride
if format is not None:
self.format = format
def sanity_check(self):
if self.count is None and self.end is None:
raise AnsibleError(
"must specify count or end in with_sequence"
)
elif self.count is not None and self.end is not None:
raise AnsibleError(
"can't specify both count and end in with_sequence"
)
elif self.count is not None:
# convert count to end
self.end = self.start + self.count * self.stride - 1
del self.count
if self.end < self.start:
raise AnsibleError("can't count backwards")
if self.format.count('%') != 1:
raise AnsibleError("bad formatting string: %s" % self.format)
def generate_sequence(self):
numbers = xrange(self.start, self.end + 1, self.stride)
for i in numbers:
try:
formatted = self.format % i
yield formatted
except (ValueError, TypeError):
raise AnsibleError(
"problem formatting %r with %r" % self.format
)
def run(self, terms, inject=None, **kwargs):
results = []
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if isinstance(terms, basestring):
terms = [ terms ]
for term in terms:
try:
self.reset() # clear out things for this iteration
try:
if not self.parse_simple_args(term):
self.parse_kv_args(utils.parse_kv(term))
except Exception:
raise AnsibleError(
"unknown error parsing with_sequence arguments: %r"
% term
)
self.sanity_check()
results.extend(self.generate_sequence())
except AnsibleError:
raise
except Exception:
raise AnsibleError(
"unknown error generating sequence"
)
return results
| gpl-3.0 |
puzan/ansible | lib/ansible/playbook/role/definition.py | 26 | 9039 | # (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.six import iteritems, string_types
import os
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.conditional import Conditional
from ansible.playbook.taggable import Taggable
from ansible.template import Templar
from ansible.utils.path import unfrackpath
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['RoleDefinition']
class RoleDefinition(Base, Become, Conditional, Taggable):
_role = FieldAttribute(isa='string')
def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None):
super(RoleDefinition, self).__init__()
self._play = play
self._variable_manager = variable_manager
self._loader = loader
self._role_path = None
self._role_basedir = role_basedir
self._role_params = dict()
#def __repr__(self):
# return 'ROLEDEF: ' + self._attributes.get('role', '<no name set>')
@staticmethod
def load(data, variable_manager=None, loader=None):
raise AnsibleError("not implemented")
def preprocess_data(self, ds):
# role names that are simply numbers can be parsed by PyYAML
# as integers even when quoted, so turn it into a string type
if isinstance(ds, int):
ds = "%s" % ds
assert isinstance(ds, dict) or isinstance(ds, string_types) or isinstance(ds, AnsibleBaseYAMLObject)
if isinstance(ds, dict):
ds = super(RoleDefinition, self).preprocess_data(ds)
# save the original ds for use later
self._ds = ds
# we create a new data structure here, using the same
# object used internally by the YAML parsing code so we
# can preserve file:line:column information if it exists
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
new_ds.ansible_pos = ds.ansible_pos
# first we pull the role name out of the data structure,
# and then use that to determine the role path (which may
# result in a new role name, if it was a file path)
role_name = self._load_role_name(ds)
(role_name, role_path) = self._load_role_path(role_name)
# next, we split the role params out from the valid role
# attributes and update the new datastructure with that
# result and the role name
if isinstance(ds, dict):
(new_role_def, role_params) = self._split_role_params(ds)
new_ds.update(new_role_def)
self._role_params = role_params
# set the role name in the new ds
new_ds['role'] = role_name
# we store the role path internally
self._role_path = role_path
# and return the cleaned-up data structure
return new_ds
def _load_role_name(self, ds):
'''
Returns the role name (either the role: or name: field) from
the role definition, or (when the role definition is a simple
string), just that string
'''
if isinstance(ds, string_types):
return ds
role_name = ds.get('role', ds.get('name'))
if not role_name or not isinstance(role_name, string_types):
raise AnsibleError('role definitions must contain a role name', obj=ds)
# if we have the required datastructures, and if the role_name
# contains a variable, try and template it now
if self._variable_manager:
all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play)
templar = Templar(loader=self._loader, variables=all_vars)
if templar._contains_vars(role_name):
role_name = templar.template(role_name)
return role_name
def _load_role_path(self, role_name):
'''
the 'role', as specified in the ds (or as a bare string), can either
be a simple name or a full path. If it is a full path, we use the
basename as the role name, otherwise we take the name as-given and
append it to the default role path
'''
# we always start the search for roles in the base directory of the playbook
role_search_paths = [
os.path.join(self._loader.get_basedir(), u'roles'),
]
# also search in the configured roles path
if C.DEFAULT_ROLES_PATH:
role_search_paths.extend(C.DEFAULT_ROLES_PATH)
# next, append the roles basedir, if it was set, so we can
# search relative to that directory for dependent roles
if self._role_basedir:
role_search_paths.append(self._role_basedir)
# finally as a last resort we look in the current basedir as set
# in the loader (which should be the playbook dir itself) but without
# the roles/ dir appended
role_search_paths.append(self._loader.get_basedir())
# create a templar class to template the dependency names, in
# case they contain variables
if self._variable_manager is not None:
all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play)
else:
all_vars = dict()
templar = Templar(loader=self._loader, variables=all_vars)
role_name = templar.template(role_name)
# now iterate through the possible paths and return the first one we find
for path in role_search_paths:
path = templar.template(path)
role_path = unfrackpath(os.path.join(path, role_name))
if self._loader.path_exists(role_path):
return (role_name, role_path)
# if not found elsewhere try to extract path from name
role_path = unfrackpath(role_name)
if self._loader.path_exists(role_path):
role_name = os.path.basename(role_name)
return (role_name, role_path)
raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)), obj=self._ds)
def _split_role_params(self, ds):
'''
Splits any random role params off from the role spec and store
them in a dictionary of params for parsing later
'''
role_def = dict()
role_params = dict()
base_attribute_names = frozenset(self._valid_attrs.keys())
for (key, value) in iteritems(ds):
# use the list of FieldAttribute values to determine what is and is not
# an extra parameter for this role (or sub-class of this role)
# FIXME: hard-coded list of exception key names here corresponds to the
# connection fields in the Base class. There may need to be some
# other mechanism where we exclude certain kinds of field attributes,
# or make this list more automatic in some way so we don't have to
# remember to update it manually.
if key not in base_attribute_names or key in ('connection', 'port', 'remote_user'):
if key in ('connection', 'port', 'remote_user'):
display.deprecated("Using '%s' as a role param has been deprecated. " % key + \
"In the future, these values should be entered in the `vars:` " + \
"section for roles, but for now we'll store it as both a param and an attribute.")
role_def[key] = value
# this key does not match a field attribute, so it must be a role param
role_params[key] = value
else:
# this is a field attribute, so copy it over directly
role_def[key] = value
return (role_def, role_params)
def get_role_params(self):
return self._role_params.copy()
def get_role_path(self):
return self._role_path
| gpl-3.0 |
gabrielfalcao/lettuce | tests/integration/lib/Django-1.2.5/django/contrib/localflavor/uk/forms.py | 313 | 1943 | """
UK-specific Form helpers
"""
import re
from django.forms.fields import CharField, Select
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
class UKPostcodeField(CharField):
"""
A form field that validates its input is a UK postcode.
The regular expression used is sourced from the schema for British Standard
BS7666 address types: http://www.govtalk.gov.uk/gdsc/schemas/bs7666-v2-0.xsd
The value is uppercased and a space added in the correct place, if required.
"""
default_error_messages = {
'invalid': _(u'Enter a valid postcode.'),
}
outcode_pattern = '[A-PR-UWYZ]([0-9]{1,2}|([A-HIK-Y][0-9](|[0-9]|[ABEHMNPRVWXY]))|[0-9][A-HJKSTUW])'
incode_pattern = '[0-9][ABD-HJLNP-UW-Z]{2}'
postcode_regex = re.compile(r'^(GIR 0AA|%s %s)$' % (outcode_pattern, incode_pattern))
space_regex = re.compile(r' *(%s)$' % incode_pattern)
def clean(self, value):
value = super(UKPostcodeField, self).clean(value)
if value == u'':
return value
postcode = value.upper().strip()
# Put a single space before the incode (second part).
postcode = self.space_regex.sub(r' \1', postcode)
if not self.postcode_regex.search(postcode):
raise ValidationError(self.error_messages['invalid'])
return postcode
class UKCountySelect(Select):
"""
A Select widget that uses a list of UK Counties/Regions as its choices.
"""
def __init__(self, attrs=None):
from uk_regions import UK_REGION_CHOICES
super(UKCountySelect, self).__init__(attrs, choices=UK_REGION_CHOICES)
class UKNationSelect(Select):
"""
A Select widget that uses a list of UK Nations as its choices.
"""
def __init__(self, attrs=None):
from uk_regions import UK_NATIONS_CHOICES
super(UKNationSelect, self).__init__(attrs, choices=UK_NATIONS_CHOICES)
| gpl-3.0 |
bdero/edx-platform | common/test/acceptance/pages/studio/settings_group_configurations.py | 8 | 6420 | """
Course Group Configurations page.
"""
from .course_page import CoursePage
from .utils import confirm_prompt
class GroupConfigurationsPage(CoursePage):
"""
Course Group Configurations page.
"""
url_path = "group_configurations"
def is_browser_on_page(self):
return self.q(css='body.view-group-configurations').present
@property
def group_configurations(self):
"""
Return list of the group configurations for the course.
"""
css = '.group-configurations-list-item'
return [GroupConfiguration(self, index) for index in xrange(len(self.q(css=css)))]
def create(self):
"""
Creates new group configuration.
"""
self.q(css=".new-button").first.click()
class GroupConfiguration(object):
"""
Group Configuration wrapper.
"""
def __init__(self, page, index):
self.page = page
self.SELECTOR = '.group-configurations-list-item-{}'.format(index)
self.index = index
def get_selector(self, css=''):
return ' '.join([self.SELECTOR, css])
def find_css(self, selector):
"""
Find elements as defined by css locator.
"""
return self.page.q(css=self.get_selector(css=selector))
def toggle(self):
"""
Expand/collapse group configuration.
"""
self.find_css('a.group-toggle').first.click()
@property
def is_expanded(self):
"""
Group configuration usage information is expanded.
"""
return self.find_css('a.group-toggle.hide-groups').present
def add_group(self):
"""
Add new group.
"""
self.find_css('button.action-add-group').first.click()
def get_text(self, css):
"""
Return text for the defined by css locator.
"""
return self.find_css(css).first.text[0]
def click_outline_anchor(self):
"""
Click on the `Course Outline` link.
"""
self.find_css('p.group-configuration-usage-text a').first.click()
def click_unit_anchor(self, index=0):
"""
Click on the link to the unit.
"""
self.find_css('li.group-configuration-usage-unit a').nth(index).click()
def edit(self):
"""
Open editing view for the group configuration.
"""
self.find_css('.action-edit .edit').first.click()
@property
def delete_button_is_disabled(self):
return self.find_css('.actions .delete.is-disabled').present
@property
def delete_button_is_absent(self):
return not self.find_css('.actions .delete').present
def delete(self):
"""
Delete the group configuration.
"""
self.find_css('.actions .delete').first.click()
confirm_prompt(self.page)
def save(self):
"""
Save group configuration.
"""
self.find_css('.action-primary').first.click()
self.page.wait_for_ajax()
def cancel(self):
"""
Cancel group configuration.
"""
self.find_css('.action-secondary').first.click()
@property
def mode(self):
"""
Return group configuration mode.
"""
if self.find_css('.group-configuration-edit').present:
return 'edit'
elif self.find_css('.group-configuration-details').present:
return 'details'
@property
def id(self):
"""
Return group configuration id.
"""
return self.get_text('.group-configuration-id .group-configuration-value')
@property
def validation_message(self):
"""
Return validation message.
"""
return self.get_text('.message-status.error')
@property
def usages(self):
"""
Return list of usages.
"""
css = '.group-configuration-usage-unit'
return self.find_css(css).text
@property
def name(self):
"""
Return group configuration name.
"""
return self.get_text('.group-configuration-title')
@name.setter
def name(self, value):
"""
Set group configuration name.
"""
self.find_css('.group-configuration-name-input').first.fill(value)
@property
def description(self):
"""
Return group configuration description.
"""
return self.get_text('.group-configuration-description')
@description.setter
def description(self, value):
"""
Set group configuration description.
"""
self.find_css('.group-configuration-description-input').first.fill(value)
@property
def groups(self):
"""
Return list of groups.
"""
def group_selector(group_index):
return self.get_selector('.group-{} '.format(group_index))
return [Group(self.page, group_selector(index)) for index, element in enumerate(self.find_css('.group'))]
@property
def delete_note(self):
"""
Return delete note for the group configuration.
"""
return self.find_css('.wrapper-delete-button').first.attrs('data-tooltip')[0]
def __repr__(self):
return "<{}:{}>".format(self.__class__.__name__, self.name)
class Group(object):
"""
Group wrapper.
"""
def __init__(self, page, prefix_selector):
self.page = page
self.prefix = prefix_selector
def find_css(self, selector):
"""
Find elements as defined by css locator.
"""
return self.page.q(css=self.prefix + selector)
@property
def name(self):
"""
Return the name of the group .
"""
css = '.group-name'
return self.find_css(css).first.text[0]
@name.setter
def name(self, value):
"""
Set the name for the group.
"""
css = '.group-name'
self.find_css(css).first.fill(value)
@property
def allocation(self):
"""
Return allocation for the group.
"""
css = '.group-allocation'
return self.find_css(css).first.text[0]
def remove(self):
"""
Remove the group.
"""
css = '.action-close'
return self.find_css(css).first.click()
def __repr__(self):
return "<{}:{}>".format(self.__class__.__name__, self.name)
| agpl-3.0 |
zenweasel/loggerglue | loggerglue/tests/rfc5424.py | 1 | 4512 | import unittest
from loggerglue.rfc5424 import *
valids = (
"""<34>1 2003-10-11T22:14:15.003Z mymachine.example.com su - ID47 - \xef\xbb\xbf'su root' failed for lonvick on /dev/pts/8""",
"""<165>1 2003-08-24T05:14:15.000003-07:00 192.0.2.1 myproc 8710 - - %% It's time to make the do-nuts.""",
"""<165>1 2003-10-11T22:14:15.003Z mymachine.example.com evntslog - ID47 [exampleSDID@32473 iut="3" eventSource="Application" eventID="1011"] \xef\xbb\xbfAn application event log entry...""",
"""<165>1 2003-10-11T22:14:15.003Z mymachine.example.com evntslog - ID47 [exampleSDID@32473 iut="3" eventSource="Application" eventID="1011"][examplePriority@32473 class="high"]""",
"""<165>1 2003-10-11T22:14:15.003Z mymachine.example.com evntslog - ID47 [traceback@32473 file="main.py" line="123" method="runStuff" file="pinger.py" line="456" method="pingpong"]""",
"""<34>1 2003-10-11T22:14:15.003000Z mymachine.example.com su - ID47 [test@32473 escaped="\\"nS\\]t\\\u\n"] \xef\xbb\xbf'su root' failed\n for lonvick on /dev/pts/8""",
"""<165>1 2003-10-11T22:14:15.003000Z mymachine.example.com evntslog - ID47 [exampleSDID@32473 iut="3" eventSource="Application" eventID="1011"] \xef\xbb\xbfAn application event log entry...""",
"""<78>1 2011-03-20T12:00:01+01:00 mymachine.example.com - 9778 - - (orion) CMD (/home/www/stats/pinger.py /home/www/stats/data/pinger.pickle)""",
)
invalids = (
"""This is obviously invalid.""",
)
class TestABNF(unittest.TestCase):
def test_valids(self):
for v in valids:
r = syslog_msg.parseString(v)
def test_invalids(self):
for i in invalids:
self.assertRaises(ParseException, syslog_msg.parseString, i)
def test_details(self):
r = syslog_msg.parseString(valids[0])
self.assertEqual(r.PRIVAL, '34')
self.assertEqual(r.VERSION, '1')
self.assertEqual(r.TIMESTAMP, '2003-10-11T22:14:15.003Z')
self.assertEqual(r.HOSTNAME, 'mymachine.example.com')
self.assertEqual(r.APP_NAME, 'su')
self.assertEqual(r.PROCID, '-')
self.assertEqual(r.STRUCTURED_DATA, '-')
self.assertEqual(r.MSG,
"\xef\xbb\xbf'su root' failed for lonvick on /dev/pts/8")
r = syslog_msg.parseString(valids[2])
self.assertTrue(hasattr(r.STRUCTURED_DATA, 'SD_ID'))
r = syslog_msg.parseString(valids[3])
self.assertEqual(len(r.SD_ELEMENTS), 2)
self.assertEqual(len(r.SD_ELEMENTS[0].SD_PARAMS), 3)
self.assertEqual(len(r.SD_ELEMENTS[1].SD_PARAMS), 1)
class TestSyslogEntry(unittest.TestCase):
def test_class(self):
for v in valids:
se = SyslogEntry.from_line(v)
self.assertTrue(se is not None)
def test_details(self):
se = SyslogEntry.from_line(valids[0])
self.assertEqual(se.msg,
"""'su root' failed for lonvick on /dev/pts/8""")
self.assertEqual(se.timestamp.year, 2003)
self.assertEqual(se.hostname, 'mymachine.example.com')
self.assertEqual(se.msgid, 'ID47')
se = SyslogEntry.from_line(valids[3])
self.assertEqual(len(se.structured_data.elements), 2)
self.assertEqual(len(se.structured_data.elements[0].sd_params), 3)
self.assertEqual(len(se.structured_data.elements[1].sd_params), 1)
se = SyslogEntry.from_line(valids[4])
self.assertEqual(len(se.structured_data.elements), 1)
self.assertEqual(len(list(se.structured_data.elements[0].sd_params.allitems())), 6)
self.assertEqual(len(list(se.structured_data.elements[0].sd_params.getall("file"))), 2)
se = SyslogEntry.from_line(valids[5])
self.assertEqual(str(se), valids[5])
se = SyslogEntry(
prival=165, version=1, timestamp=datetime(2003,10,11,22,14,15,3000),
hostname='mymachine.example.com', app_name='evntslog', procid=None, msgid='ID47',
structured_data=StructuredData([SDElement('exampleSDID@32473',
[('iut','3'),
('eventSource','Application'),
('eventID','1011')]
)]),
msg=u'An application event log entry...'
)
self.assertEqual(str(se), valids[6])
se = SyslogEntry.from_line(valids[7])
self.assertEqual(se.timestamp.year, 2011)
if __name__ == '__main__':
unittest.main()
| mit |
foss-transportationmodeling/rettina-server | flask/local/lib/python2.7/site-packages/sqlalchemy/dialects/mysql/pymysql.py | 21 | 1232 | # mysql/pymysql.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+pymysql
:name: PyMySQL
:dbapi: pymysql
:connectstring: mysql+pymysql://<username>:<password>@<host>/<dbname>\
[?<options>]
:url: http://code.google.com/p/pymysql/
MySQL-Python Compatibility
--------------------------
The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver,
and targets 100% compatibility. Most behavioral notes for MySQL-python apply
to the pymysql driver as well.
"""
from .mysqldb import MySQLDialect_mysqldb
from ...util import py3k
class MySQLDialect_pymysql(MySQLDialect_mysqldb):
driver = 'pymysql'
description_encoding = None
if py3k:
supports_unicode_statements = True
@classmethod
def dbapi(cls):
return __import__('pymysql')
if py3k:
def _extract_error_code(self, exception):
if isinstance(exception.args[0], Exception):
exception = exception.args[0]
return exception.args[0]
dialect = MySQLDialect_pymysql
| apache-2.0 |
elkingtonmcb/django | tests/urlpatterns_reverse/included_namespace_urls.py | 199 | 1357 | import warnings
from django.conf.urls import include, patterns, url
from django.utils.deprecation import RemovedInDjango110Warning
from .namespace_urls import URLObject
from .views import view_class_instance
testobj3 = URLObject('testapp', 'test-ns3')
testobj4 = URLObject('testapp', 'test-ns4')
# test deprecated patterns() function. convert to list of urls() in Django 1.10
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RemovedInDjango110Warning)
urlpatterns = patterns('urlpatterns_reverse.views',
url(r'^normal/$', 'empty_view', name='inc-normal-view'),
url(r'^normal/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', 'empty_view', name='inc-normal-view'),
url(r'^\+\\\$\*/$', 'empty_view', name='inc-special-view'),
url(r'^mixed_args/([0-9]+)/(?P<arg2>[0-9]+)/$', 'empty_view', name='inc-mixed-args'),
url(r'^no_kwargs/([0-9]+)/([0-9]+)/$', 'empty_view', name='inc-no-kwargs'),
url(r'^view_class/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', view_class_instance, name='inc-view-class'),
(r'^test3/', include(testobj3.urls)),
(r'^test4/', include(testobj4.urls)),
(r'^ns-included3/', include('urlpatterns_reverse.included_urls', namespace='inc-ns3')),
(r'^ns-included4/', include('urlpatterns_reverse.namespace_urls', namespace='inc-ns4')),
)
| bsd-3-clause |
bkendzior/scipy | scipy/fftpack/setup.py | 102 | 1514 | #!/usr/bin/env python
# Created by Pearu Peterson, August 2002
from __future__ import division, print_function, absolute_import
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('fftpack',parent_package, top_path)
config.add_data_dir('tests')
dfftpack_src = [join('src/dfftpack','*.f')]
config.add_library('dfftpack', sources=dfftpack_src)
fftpack_src = [join('src/fftpack','*.f')]
config.add_library('fftpack', sources=fftpack_src)
sources = ['fftpack.pyf','src/zfft.c','src/drfft.c','src/zrfft.c',
'src/zfftnd.c', 'src/dct.c.src', 'src/dst.c.src']
config.add_extension('_fftpack',
sources=sources,
libraries=['dfftpack', 'fftpack'],
include_dirs=['src'],
depends=(dfftpack_src + fftpack_src))
config.add_extension('convolve',
sources=['convolve.pyf','src/convolve.c'],
libraries=['dfftpack'],
depends=dfftpack_src,
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
from fftpack_version import fftpack_version
setup(version=fftpack_version,
description='fftpack - Discrete Fourier Transform package',
author='Pearu Peterson',
author_email='pearu@cens.ioc.ee',
maintainer_email='scipy-dev@scipy.org',
license='SciPy License (BSD Style)',
**configuration(top_path='').todict())
| bsd-3-clause |
mkieszek/odoo | addons/pad/pad.py | 30 | 4250 | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
import random
import re
import string
import urllib2
import logging
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
from openerp.tools import html2plaintext
from py_etherpad import EtherpadLiteClient
from openerp.exceptions import UserError
_logger = logging.getLogger(__name__)
class pad_common(osv.osv_memory):
_name = 'pad.common'
def pad_is_configured(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return bool(user.company_id.pad_server)
def pad_generate_url(self, cr, uid, context=None):
company = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context).company_id
pad = {
"server" : company.pad_server,
"key" : company.pad_key,
}
# make sure pad server in the form of http://hostname
if not pad["server"]:
return pad
if not pad["server"].startswith('http'):
pad["server"] = 'http://' + pad["server"]
pad["server"] = pad["server"].rstrip('/')
# generate a salt
s = string.ascii_uppercase + string.digits
salt = ''.join([s[random.SystemRandom().randint(0, len(s) - 1)] for i in range(10)])
#path
# etherpad hardcodes pad id length limit to 50
path = '-%s-%s' % (self._name, salt)
path = '%s%s' % (cr.dbname.replace('_','-')[0:50 - len(path)], path)
# contruct the url
url = '%s/p/%s' % (pad["server"], path)
#if create with content
if "field_name" in context and "model" in context and "object_id" in context:
myPad = EtherpadLiteClient( pad["key"], pad["server"]+'/api')
try:
myPad.createPad(path)
except urllib2.URLError:
raise UserError(_("Pad creation failed, either there is a problem with your pad server URL or with your connection."))
#get attr on the field model
model = self.pool[context["model"]]
field = model._fields[context['field_name']]
real_field = field.pad_content_field
#get content of the real field
for record in model.browse(cr, uid, [context["object_id"]]):
if record[real_field]:
myPad.setText(path, (html2plaintext(record[real_field]).encode('utf-8')))
#Etherpad for html not functional
#myPad.setHTML(path, record[real_field])
return {
"server": pad["server"],
"path": path,
"url": url,
}
def pad_get_content(self, cr, uid, url, context=None):
content = ''
if url:
try:
page = urllib2.urlopen('%s/export/html'%url).read()
mo = re.search('<body>(.*)</body>',page)
if mo:
content = mo.group(1)
except:
_logger.warning("No url found '%s'.", url)
return content
# TODO
# reverse engineer protocol to be setHtml without using the api key
def write(self, cr, uid, ids, vals, context=None):
self._set_pad_value(cr, uid, vals, context)
return super(pad_common, self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
self._set_pad_value(cr, uid, vals, context)
return super(pad_common, self).create(cr, uid, vals, context=context)
# Set the pad content in vals
def _set_pad_value(self, cr, uid, vals, context=None):
for k,v in vals.items():
field = self._fields[k]
if hasattr(field,'pad_content_field'):
vals[field.pad_content_field] = self.pad_get_content(cr, uid, v, context=context)
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
for k, field in self._fields.iteritems():
if hasattr(field,'pad_content_field'):
pad = self.pad_generate_url(cr, uid, context)
default[k] = pad.get('url')
return super(pad_common, self).copy(cr, uid, id, default, context)
| agpl-3.0 |
funson/rt-xen | tools/python/logging/logging-0.4.9.2/test/logconf.py | 42 | 62356 | #!/usr/bin/env python
#
# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""
A simple-minded GUI configurator for the logging module, using Tkinter.
Should work under Python versions >= 1.5.2.
Copyright (C) 2002 Vinay Sajip. All Rights Reserved.
Configuration files are read/written using ConfigParser.
"""
"""
(C) 2002 Vinay Sajip. All rights reserved.
"""
from Tkinter import *
from tkFileDialog import *
from tkMessageBox import *
import os, sys, string, types
import ConfigParser
active = None
__version__ = "0.4.1"
DEFAULT_FILENAME = "logconf.ini"
LOGGING_LEVELS = (
("NOTSET", "NOTSET"),
("DEBUG", "DEBUG"),
("INFO", "INFO"),
("WARNING", "WARNING"),
("ERROR", "ERROR"),
("CRITICAL", "CRITICAL")
)
HANDLER_TYPES = (
("StreamHandlerProxy", "StreamHandler"),
("FileHandlerProxy", "FileHandler"),
("RotatingFileHandlerProxy", "RotatingFileHandler"),
("SocketHandlerProxy", "SocketHandler"),
("DatagramHandlerProxy", "DatagramHandler"),
("SysLogHandlerProxy", "SysLogHandler"),
("NTEventLogHandlerProxy", "NTEventLogHandler"),
("SMTPHandlerProxy", "SMTPHandler"),
("MemoryHandlerProxy", "MemoryHandler"),
("HTTPHandlerProxy", "HTTPHandler"),
# ("SOAPHandlerProxy", "SOAPHandler"),
)
OUTPUT_STREAMS = (
("sys.stdout", "sys.stdout"),
("sys.stderr", "sys.stderr")
)
FILE_MODES = (
("a", "a"),
("w", "w")
)
HTTP_METHODS = (
("GET", "GET"),
("POST", "POST")
)
SYSLOG_FACILITIES = (
("LOG_AUTH", "auth"),
("LOG_AUTHPRIV", "authpriv"),
("LOG_CRON", "cron"),
("LOG_DAEMON", "daemon"),
("LOG_KERN", "kern"),
("LOG_LPR", "lpr"),
("LOG_MAIL", "mail"),
("LOG_NEWS", "news"),
("LOG_AUTH", "security"),
("LOG_SYSLOG", "syslog"),
("LOG_USER", "user"),
("LOG_UUCP", "uucp"),
("LOG_LOCAL0", "local0"),
("LOG_LOCAL1", "local1"),
("LOG_LOCAL2", "local2"),
("LOG_LOCAL3", "local3"),
("LOG_LOCAL4", "local4"),
("LOG_LOCAL5", "local5"),
("LOG_LOCAL6", "local6"),
("LOG_LOCAL7", "local7"),
)
LOG_TYPES = (
("Application", "Application"),
("System", "System"),
("Security", "Security")
)
BOOLEAN_VALUES = (
("0", "False"),
("1", "True")
)
class Property:
def __init__(self, name, caption, value=None, choices=None):
self.name = name
self.caption = caption
self.value = value
self.choices = choices
def getChoices(self):
return self.choices
def isvalid(self, s):
return 0
def getCaption(self):
return self.caption
def getValue(self):
return self.value
def getChoiceText(self, val):
rv = ""
choices = self.getChoices()
if choices:
for choice in choices:
if choice[0] == val:
rv = choice[1]
break
return rv
def setValue(self, val):
self.value = val
def getValueText(self):
if type(self.value) in [types.ListType, types.TupleType]:
v = list(self.value)
else:
v = [self.value]
choices = self.getChoices()
if choices:
v = map(self.getChoiceText, v)
return string.join(v, ',')
class PropertyHolder:
def __init__(self, dict):
self.dict = dict
self.propnames = []
self.onPropListChanged = None
def getPropNames(self):
"""
Return the property names in the order in which they are to
be listed.
"""
return self.propnames
def getProp(self, name):
return self.dict[name]
def isReadonly(self, name):
return 0
#convenience methods
def getPropValue(self, name):
return self.dict[name].value
def setPropValue(self, name, value):
self.dict[name].setValue(value)
LINE_COLOUR = '#999999'
class ScrollingList(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent)
self.parent = parent
self.listener = self.parent
self.sb = Scrollbar(self, orient=VERTICAL)
kwargs["yscrollcommand"] = self.sb.set
self.list = apply(Listbox, (self,) + args, kwargs)
self.sb.config(command=self.list.yview)
self.sb.pack(side=RIGHT, fill=Y)
self.list.pack(side=LEFT, fill=BOTH,expand=1)
self.list.bind('<ButtonRelease-1>', self.onListChange)
self.choices = None
def setContents(self, choices, value):
self.choices = choices
self.value = value
self.list.delete(0, END)
if type(value) == types.ListType:
sm = EXTENDED
else:
sm = BROWSE
self.list.configure(selectmode=sm)
i = 0
for choice in choices:
self.list.insert(END, choice[1])
if sm == EXTENDED:
if choice[0] in value:
self.list.select_set(i)
else:
if choice[0] == value:
self.list.select_set(i)
i = i + 1
def getValue(self):
if type(self.value) == types.ListType:
multi = 1
rv = []
else:
multi = 0
for i in xrange(len(self.choices)):
if self.list.select_includes(i):
if not multi:
rv = self.choices[i][0]
break
else:
rv.append(self.choices[i][0])
return rv
def onListChange(self, event):
self.value = self.getValue()
self.listener.onListChange(self.value)
class PropertyHeader(Canvas):
def __init__(self, parent, *args, **kwargs):
self.namewidth = 120
if kwargs.has_key("namewidth"):
self.namewidth = kwargs["namewidth"]
del kwargs["namewidth"]
self.rowheight = 16
if kwargs.has_key("rowheight"):
self.rowheight = kwargs["rowheight"]
del kwargs["rowheight"]
apply(Canvas.__init__, (self, parent)+args, kwargs)
self.bind('<Configure>', self.onConfigure)
x = 5
y = 0
wid = int(self.cget('width'))
self.create_text(x, y, text='Property', anchor='nw')
self.create_text(x + self.namewidth, y, text='Value', anchor='nw')
self.create_line(self.namewidth, 0, self.namewidth, self.rowheight, fill=LINE_COLOUR)
self.tline = self.create_line(0, 0, wid, 0, fill=LINE_COLOUR)
#self.create_line(0, 0, 0, self.rowheight, fill=LINE_COLOUR)
#self.create_line(wid - 1, 0, wid - 1, self.rowheight, fill=LINE_COLOUR)
def onConfigure(self, event):
self.delete(self.tline)
self.tline = self.create_line(0, 0, event.width, 0, fill=LINE_COLOUR)
_popup = None
class PropertyCanvas(Canvas):
def __init__(self, parent, *args, **kwargs):
self.namewidth = 120
if kwargs.has_key("namewidth"):
self.namewidth = kwargs["namewidth"]
del kwargs["namewidth"]
self.rowheight = 16
if kwargs.has_key("rowheight"):
self.rowheight = kwargs["rowheight"]
del kwargs["rowheight"]
apply(Canvas.__init__, (self, parent)+args, kwargs)
self.namitems = []
self.valitems = []
self.lines = []
self.pnames = []
#Event bindings...
self.bind('<Enter>', self.onEnter)
self.bind('<Button-1>', self.onClick)
self.bind('<Configure>', self.onConfigure)
self.button = Button(height=self.rowheight, width=self.rowheight, text='...', command=self.onEdit)
self.btnitem = None
self.editor = Entry()
self.edititem = None
self.popup = Toplevel()
self.popup.withdraw()
self.popup.overrideredirect(1)
self.list = ScrollingList(self.popup, background='white', relief=FLAT, borderwidth=0)
self.list.pack(fill=BOTH, expand=1)
self.list.listener = self
self.listvisible = 0
def clear(self):
for itm in self.namitems:
self.delete(itm)
self.namitems = []
for itm in self.valitems:
self.delete(itm)
self.valitems = []
for lin in self.lines:
self.delete(lin)
self.lines = []
def setPropertyHolder(self, ph):
self.ph = ph
self.pnames = ph.getPropNames()
wid = int(self.cget('width'))
hei = int(self.cget('height'))
self.clear()
x = 5
y = 0
i = 0
self.props = []
for n in self.pnames:
prop = self.ph.getProp(n)
self.props.append(prop)
tn = "n%d" % i
tv = "v%d" % i
self.namitems.append(self.create_text(x, y + 2, text=prop.getCaption(), anchor='nw', tags=tn))
self.valitems.append(self.create_text(x + self.namewidth, y + 2, text=prop.getValueText(), anchor='nw', tags=tv))
y = y + self.rowheight
i = i + 1
self.drawLines(wid, hei)
#self.config(height=y)
def drawLines(self, wid, hei):
for lin in self.lines:
self.delete(lin)
self.lines = []
y = 0
for i in xrange(len(self.pnames)):
self.lines.append(self.create_line(0, y, wid, y, fill=LINE_COLOUR))
y = y + self.rowheight
self.lines.append(self.create_line(0, y, wid, y, fill=LINE_COLOUR))
self.create_line(self.namewidth, 0, self.namewidth, hei, fill=LINE_COLOUR)
def onEnter(self, event):
if not self.edititem and not self.listvisible:
self.focus_set()
def hideControls(self):
if self.listvisible:
self.popup.withdraw()
global _popup
_popup = None
self.listvisible = 0
if self.edititem:
self.ph.setPropValue(self.editprop.name, self.editor.get())
self.itemconfig(self.valitems[self.editrow], text=self.editprop.getValueText())
self.delete(self.edititem)
self.edititem = None
if self.btnitem:
self.delete(self.btnitem)
self.btnitem = None
def onClick(self, event):
row = event.y / self.rowheight
self.hideControls()
if row < len(self.pnames):
wid = int(self.cget('width'))
hei = self.rowheight
prop = self.props[row]
if not self.ph.isReadonly(self.pnames[row]):
self.editrow = row
self.editprop = prop
choices = prop.getChoices()
if choices != None:
val = prop.getValue()
self.list.setContents(choices, val)
self.listy = row * hei + self.rowheight
self.btnitem = self.create_window(wid - hei, row * hei, width=hei, height=hei, window=self.button, anchor='nw', tags='button')
else:
self.editor.delete(0, END)
self.editor.insert(0, prop.getValueText())
self.editor.select_range(0, END)
self.edititem = self.create_window(self.namewidth + 1, row * hei, width=wid - self.namewidth, height = hei + 1, window=self.editor, anchor='nw', tags='editor')
self.editor.focus_set()
def onConfigure(self, event):
self.hideControls()
self.drawLines(event.width, event.height)
self.configure(width=event.width, height=event.height)
def onEdit(self):
wid = int(self.cget('width'))
#self.listitem = self.create_window(self.namewidth + 1, self.listy, width=wid - self.namewidth - 1, height = self.rowheight * 3, window=self.list, anchor='nw', tags='list')
w = wid - self.namewidth - 1
h = self.rowheight * 5
x = self.winfo_rootx() + self.namewidth + 1
y = self.winfo_rooty() + self.listy
s = "%dx%d+%d+%d" % (w, h, x, y)
self.popup.deiconify()
self.popup.lift()
self.popup.focus_set()
self.listvisible = 1
self.list.focus_set()
#For some reason with 1.5.2 (Windows), making the geometry call
#immediately following the assignment to s doesn't work. So we
#do it here
self.popup.geometry(s)
global _popup
_popup = self.popup
def onListChange(self, val):
self.ph.setPropValue(self.editprop.name, val)
self.itemconfig(self.valitems[self.editrow], text=self.editprop.getValueText())
if type(val) != types.ListType:
self.hideControls()
class PropertyEditor(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent)
self.parent = parent
nw = kwargs.get("namewidth", 120)
rh = kwargs.get("rowheight", 16)
wid = kwargs.get("width", 300)
hei = kwargs.get("height", 60)
self.header = PropertyHeader(self, namewidth=nw, rowheight=rh, height=14, highlightthickness=0)
self.body = PropertyCanvas(self, namewidth=nw, rowheight=rh, width=wid, height=hei, background='white', highlightthickness=0)
self.header.pack(side=TOP, fill=X)
self.body.pack(side=BOTTOM, fill=BOTH, expand=1)
def setPropertyHolder(self, ph):
self.body.setPropertyHolder(ph)
class ADUPanel(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.add = Button(self, text="New", command=parent.onAdd)
self.add.pack(side=LEFT) #, fill=X, expand=1)
self.rmv = Button(self, text="Delete", command=parent.onDelete)
self.rmv.pack(side=LEFT) #, fill=X, expand=1)
#self.upd = Button(self, text="Update", command=parent.onUpdate)
#self.upd.pack(side=RIGHT, fill=X, expand=1)
class ScrollList(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent)
self.parent = parent
self.sb = Scrollbar(self, orient=VERTICAL)
kwargs["yscrollcommand"] = self.sb.set
self.list = apply(Listbox, (self,) + args, kwargs)
self.sb.config(command=self.list.yview)
self.sb.pack(side=RIGHT, fill=Y)
self.list.pack(side=LEFT, fill=BOTH,expand=1)
def sortqn(log1, log2):
qn1 = log1.getQualifiedName()
qn2 = log2.getQualifiedName()
if qn1 == "(root)":
rv = -1
elif qn2 == "(root)":
rv = 1
else:
rv = cmp(qn1, qn2)
return rv
def sortn(obj1, obj2):
return cmp(obj1.getPropValue("name"), obj2.getPropValue("name"))
class LoggerPanel(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
label = Label(self, text="Loggers:")
label.grid(row=0, column=0, sticky='w')
self.slist = ScrollList(self, height=15, background='white')
self.slist.list.bind('<ButtonRelease-1>', self.onListChange)
self.slist.grid(row=1, column=0, sticky="nsew")
self.adu = ADUPanel(self)
self.adu.grid(row=2, column=0, sticky="we")
label = Label(self, text="Properties of selected logger:")
label.grid(row=3, column=0, sticky='w')
self.pe = PropertyEditor(self, height=120, borderwidth=1)
self.pe.grid(row=4, column=0, sticky='nsew')
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=3)
self.rowconfigure(4, weight=1)
def setConfig(self, config):
self.config = config
#populate list of loggers
llist = config.getLoggers()
llist.sort(sortqn)
self.slist.list.delete(0, END)
self.pe.body.clear()
self.names = []
for logger in llist:
self.names.append(logger.getPropValue("name"))
self.slist.list.insert(END, logger.getQualifiedName())
def onAdd(self):
items = self.slist.list.curselection()
if not len(items):
showerror("No Parent Selected", "You haven't selected a parent logger.")
else:
idx = int(items[0])
parent = self.config.getLogger(self.names[idx])
log = self.config.getLogger(None)
log.onChannelChanged = self.onChannelChanged
log.setPropValue("parent", parent.getPropValue("name"))
self.names.insert(1 + idx, log.getPropValue("name"))
self.slist.list.insert(1 + idx, log.getQualifiedName())
self.slist.list.select_clear(0, END)
self.slist.list.select_set(1 + idx)
self.pe.setPropertyHolder(log)
def onDelete(self):
items = self.slist.list.curselection()
if not len(items):
showerror("No Item Selected", "You haven't selected anything to delete.")
else:
idx = int(items[0])
name = self.slist.list.get(idx)
if name == "(root)":
showerror("Root Item Selected", "You cannot delete the root logger.")
else:
resp = askyesno("Logger Deletion", "Are you sure you want to delete logger '%s'?" % name)
if resp:
#self.config.removeLogger(self.names[idx])
log = self.config.getLogger(self.names[idx])
log.deleted = 1
self.slist.list.delete(idx)
del self.names[idx]
self.pe.body.clear()
def onChannelChanged(self, nm, chname):
i = self.names.index(nm)
sel = i
while i < len(self.names):
log = self.config.getLogger(self.names[i])
self.slist.list.delete(i)
self.slist.list.insert(i, log.getQualifiedName())
i = i + 1
self.slist.list.select_clear(0, END)
self.slist.list.select_set(sel)
def onListChange(self, event):
self.pe.body.hideControls()
items = self.slist.list.curselection()
idx = int(items[0])
name = self.names[idx]
log = self.config.getLogger(name)
self.pe.setPropertyHolder(log)
class HandlerPanel(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
label = Label(self, text="Handlers:")
label.grid(row=0, column=0, sticky='w')
self.slist = ScrollList(self, height=6, background='white')
self.slist.list.bind('<ButtonRelease-1>', self.onListChange)
self.slist.grid(row=1, column=0, sticky="nsew")
self.adu = ADUPanel(self)
self.adu.grid(row=2, column=0, sticky="we")
label = Label(self, text="Properties of selected handler:")
label.grid(row=3, column=0, sticky='w')
self.pe = PropertyEditor(self, height=90, borderwidth=1)
self.pe.grid(row=4, column=0, sticky='nsew')
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self.rowconfigure(4, weight=1)
def setConfig(self, config):
self.config = config
#populate list of handlers
hlist = config.getHandlers()
hlist.sort(sortn)
self.slist.list.delete(0, END)
self.pe.body.clear()
for hand in hlist:
hand.onPropListChanged = self.onPropListChanged
self.slist.list.insert(END, hand.getPropValue("name"))
def onAdd(self):
self.pe.body.hideControls()
hand = self.config.getHandler(None)
self.slist.list.insert(END, hand.getProp("name").getValueText())
self.slist.list.select_clear(0, END)
self.slist.list.select_set(END)
hand.onPropListChanged = self.onPropListChanged
self.pe.setPropertyHolder(hand)
def onDelete(self):
items = self.slist.list.curselection()
if not len(items):
showerror("No Item Selected", "You haven't selected anything to delete")
else:
name = self.slist.list.get(int(items[0]))
log = self.config.handlerIsUsed(name)
if log:
showerror("Handler in use",
"The handler '%s' is being used by logger '%s'"\
", so it cannot be deleted." % (
name, log))
else:
self.config.removeHandler(name)
self.slist.list.delete(items)
self.pe.body.clear()
def onUpdate(self):
print "handler update"
def onListChange(self, event):
self.pe.body.hideControls()
items = self.slist.list.curselection()
name = self.slist.list.get(int(items[0]))
hand = self.config.getHandler(name)
self.pe.setPropertyHolder(hand)
def onPropListChanged(self, newhand):
newhand.onPropListChanged = self.onPropListChanged
self.pe.setPropertyHolder(newhand)
class FormatterPanel(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
label = Label(self, text="Formatters:")
label.grid(row=0, column=0, sticky='w')
self.slist = ScrollList(self, height=4, background='white')
self.slist.list.bind('<ButtonRelease-1>', self.onListChange)
self.slist.grid(row=1, column=0, sticky="nsew")
self.adu = ADUPanel(self)
self.adu.grid(row=2, column=0, sticky="ew")
label = Label(self, text="Properties of selected formatter:")
label.grid(row=3, column=0, sticky='w')
self.pe = PropertyEditor(self, height=60, borderwidth=1)
self.pe.grid(row=4, column=0, sticky='nsew')
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self.rowconfigure(4, weight=1)
def setConfig(self, config):
self.config = config
#populate list of formatters
flist = config.getFormatters()
flist.sort(sortn)
self.slist.list.delete(0, END)
self.pe.body.clear()
for form in flist:
self.slist.list.insert(END, form.getPropValue("name"))
def onAdd(self):
self.pe.body.hideControls()
fmt = self.config.getFormatter(None)
self.slist.list.insert(END, fmt.getProp("name").getValueText())
self.slist.list.select_clear(0, END)
i = self.slist.list.size()
self.slist.list.select_set(i - 1)
self.pe.setPropertyHolder(fmt)
def onDelete(self):
self.pe.body.hideControls()
items = self.slist.list.curselection()
if not len(items):
showerror("No Item Selected", "You haven't selected anything to delete")
else:
name = self.slist.list.get(int(items[0]))
h = self.config.formatterIsUsed(name)
if h:
showerror("Formatter in use",
"The formatter '%s' is being used by handler '%s'"\
", so it cannot be deleted." % (
name, h))
else:
self.config.removeFormatter(name)
self.slist.list.delete(items)
self.pe.body.clear()
def onUpdate(self):
self.pe.body.hideControls()
def onListChange(self, event):
self.pe.body.hideControls()
items = self.slist.list.curselection()
name = self.slist.list.get(int(items[0]))
fmt = self.config.getFormatter(name)
self.pe.setPropertyHolder(fmt)
class FilterPanel(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
label = Label(self, text="Filters:")
label.grid(row=0, column=0, sticky='w')
self.slist = ScrollList(self, height=4, background='white')
self.slist.list.bind('<ButtonRelease-1>', self.onListChange)
self.slist.grid(row=1, column=0, sticky="nsew")
self.adu = ADUPanel(self)
self.adu.grid(row=2, column=0, sticky="ew")
label = Label(self, text="Properties of selected filter:")
label.grid(row=3, column=0, sticky='w')
self.pe = PropertyEditor(self, height=60, borderwidth=1)
self.pe.grid(row=4, column=0, sticky='nsew')
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self.rowconfigure(4, weight=1)
def setConfig(self, config):
self.config = config
#populate list of filters
flist = config.getFilters()
flist.sort(sortn)
self.slist.list.delete(0, END)
self.pe.body.clear()
for filt in flist:
self.slist.list.insert(END, filt.getPropValue("name"))
def onAdd(self):
self.pe.body.hideControls()
filt = self.config.getFilter(None)
self.slist.list.insert(END, filt.getProp("name").getValueText())
self.slist.list.select_clear(0, END)
i = self.slist.list.size()
self.slist.list.select_set(i - 1)
self.pe.setPropertyHolder(filt)
def onDelete(self):
self.pe.body.hideControls()
items = self.slist.list.curselection()
if not len(items):
showerror("No Item Selected", "You haven't selected anything to delete")
else:
name = self.slist.list.get(int(items[0]))
h = self.config.filterIsUsed(name)
if h:
showerror("Filter in use",
"The filter '%s' is being used by '%s'"\
", so it cannot be deleted." % (
name, h))
else:
self.config.removeFilter(name)
self.slist.list.delete(items)
self.pe.body.clear()
def onUpdate(self):
self.pe.body.hideControls()
def onListChange(self, event):
self.pe.body.hideControls()
items = self.slist.list.curselection()
name = self.slist.list.get(int(items[0]))
filt = self.config.getFilter(name)
self.pe.setPropertyHolder(filt)
class ConfigPanel(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.load = Button(self, text="Load...", command=parent.onLoad)
self.load.pack(side=LEFT)
self.save = Button(self, text="Save", command=parent.onSave)
self.save.pack(side=LEFT)
self.save = Button(self, text="Save as...", command=parent.onSaveAs)
self.save.pack(side=LEFT)
self.reset = Button(self, text="Reset", command=parent.onReset)
self.reset.pack(side=RIGHT)
class Configurator(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.llist = LoggerPanel(self)
self.llist.grid(row=0, column=0, rowspan=2, sticky='nsew')
spacer = Canvas(self, width=2, highlightthickness=0)
spacer.grid(row=0, column=1, rowspan=2, sticky='ns')
self.hlist = HandlerPanel(self)
self.hlist.grid(row=0, column=2, sticky='nsew')
self.flist = FormatterPanel(self)
self.flist.grid(row=1, column=2, sticky='nsew')
self.cfg = ConfigPanel(self)
self.cfg.grid(row=2, column=0, columnspan=2, sticky='w')
self.filename = None
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.columnconfigure(2, weight=1)
label = Label(self, text="Copyright (C) 2002 Vinay Sajip. All rights reserved.", foreground='brown')
label.grid(row=3, column=0, columnspan=2, sticky='w')
if len(sys.argv) > 1:
fn = sys.argv[1]
try:
self.loadFile(fn)
except Exception, e:
print e
raise
else:
self.onReset(0)
self.setTitle()
self.focus_set()
def setTitle(self):
if self.filename:
s = os.path.split(self.filename)[1]
else:
s = "untitled"
self.winfo_toplevel().title("%s - Python Logging Configurator V%s" % (s, __version__))
def loadFile(self, fn):
self.config = LoggingConfig()
self.config.read(fn)
self.filename = fn
self.llist.setConfig(self.config)
self.hlist.setConfig(self.config)
self.flist.setConfig(self.config)
self.setTitle()
def onLoad(self):
fn = askopenfilename(title="Choose configuration file", filetypes=[("Logging configurations", "*.ini"), ("All files", "*.*")])
if fn:
self.loadFile(fn)
def onSaveAs(self):
if self.filename:
fn = os.path.split(self.filename)[1]
else:
fn = DEFAULT_FILENAME
fn = asksaveasfilename(title="Save configuration as", initialfile=fn, filetypes=[("Logging configurations", "*.ini"), ("All files", "*.*")])
if fn:
self.config.save(fn)
self.filename = fn
self.setTitle()
def onSave(self):
if not self.filename:
self.onSaveAs()
else:
self.config.save(self.filename)
def onReset(self, confirm=1):
if not confirm:
doit = 1
else:
doit = askyesno("Reset", "Are you sure you want to reset?")
if doit:
self.config = LoggingConfig()
self.llist.setConfig(self.config)
self.hlist.setConfig(self.config)
self.flist.setConfig(self.config)
self.setTitle()
# -- general properties
class NameProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "name", "Name", value)
class LevelProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "level", "Level", value)
def getChoices(self):
return LOGGING_LEVELS
# -- formatter properties
class FormatProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "format", "Format", value)
class DateFormatProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "datefmt", "Date Format", value)
class FormatterProxy(PropertyHolder):
def __init__(self, config, dict):
self.config = config
PropertyHolder.__init__(self, dict)
prop = NameProperty(dict.get("name", ""))
self.dict["name"] = prop
prop = FormatProperty(dict.get("format", "%(asctime)s %(levelname)s %(message)s"))
self.dict["format"] = prop
prop = DateFormatProperty(dict.get("datefmt", ""))
self.dict["datefmt"] = prop
self.propnames = ["name", "format", "datefmt"]
def isReadonly(self, name):
return name == "name"
def writeConfig(self, file):
file.write("[formatter_%s]\n" % self.getPropValue("name"))
file.write("format=%s\n" % self.getPropValue("format"))
file.write("datefmt=%s\n\n" % self.getPropValue("datefmt"))
# -- filter properties
class LoggerNameProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "lname", "Name", value)
class FilterProxy(PropertyHolder):
def __init__(self, config, dict):
self.config = config
PropertyHolder.__init__(self, dict)
prop = NameProperty(dict.get("name", ""))
self.dict["name"] = prop
prop = LoggerNameProperty(dict.get("lname", ""))
self.dict["lname"] = prop
self.propnames = ["name", "lname"]
def isReadonly(self, name):
return name == "name"
def writeConfig(self, file):
file.write("[filter_%s]\n" % self.getPropValue("name"))
file.write("lname=%s\n" % self.getPropValue("lname"))
# -- handler properties and proxies
class HandlerTypeProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "class", "Type", value)
def getChoices(self):
return HANDLER_TYPES
class FormatterProperty(Property):
def __init__(self, config, value=None):
self.config = config
Property.__init__(self, "formatter", "Formatter", value)
def getChoices(self):
return self.config.getFormatterChoice()
class HandlerProxy(PropertyHolder):
def __init__(self, config, dict):
self.config = config
PropertyHolder.__init__(self, dict)
prop = NameProperty(dict.get("name", ""))
self.dict["name"] = prop
prop = HandlerTypeProperty(dict.get("class", "StreamHandlerProxy"))
self.dict["class"] = prop
prop = FormatterProperty(self.config, dict.get("formatter", ""))
self.dict["formatter"] = prop
prop = LevelProperty(dict.get("level", "NOTSET"))
self.dict["level"] = prop
self.propnames = ["name", "class", "level", "formatter"]
def isReadonly(self, name):
return (name == "name")
def setPropValue(self, name, value):
PropertyHolder.setPropValue(self, name, value)
if (name == "class"): #morph type of handler
#print "try morph -> %s" % value
try:
klass = eval(value)
except Exception, e:
print e
klass = None
if klass:
n = self.getPropValue("name")
d = {
"name": n,
"class": value,
"formatter": self.getPropValue("formatter"),
"level": self.getPropValue("level"),
}
newhand = klass(self.config, d)
self.config.handlers[n] = newhand #FIXME encapsulation
if self.onPropListChanged:
self.onPropListChanged(newhand)
def writeConfig(self, file):
file.write("[handler_%s]\n" % self.getPropValue("name"))
s = self.getProp("class").getValueText()
if not s in ["StreamHandler", "FileHandler"]:
s = "handlers." + s
file.write("class=%s\n" % s)
file.write("level=%s\n" % self.getPropValue("level"))
file.write("formatter=%s\n" % self.getPropValue("formatter"))
class StreamProperty(Property):
def __init__(self, config, value=None):
self.config = config
Property.__init__(self, "stream", "Stream", value)
def getChoices(self):
return OUTPUT_STREAMS
class StreamHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = StreamProperty(self.config, dict.get("stream", "sys.stderr"))
self.dict["stream"] = prop
self.propnames.append("stream")
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
file.write("stream=%s\n" % self.getPropValue("stream"))
file.write("args=(%s,)\n\n" % self.getPropValue("stream"))
def readConfig(self, sectname):
prop = StreamProperty(self.config, self.config.get(sectname, "stream"))
self.dict["stream"] = prop
self.propnames.append("stream")
class FilenameProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "filename", "File name", value)
class ModeProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "mode", "Mode", value)
def getChoices(self):
return FILE_MODES
class MaxSizeProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "maxsize", "Maximum Size (bytes)", value)
class BackupCountProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "backcount", "Backup Count", value)
class FileHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = FilenameProperty(dict.get("filename", "python.log"))
self.dict["filename"] = prop
prop = ModeProperty(dict.get("mode", "a"))
self.dict["mode"] = prop
self.propnames.extend(["filename", "mode"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
fn = self.getPropValue("filename")
file.write("filename=%s\n" % fn)
mode = self.getPropValue("mode")
file.write("mode=%s\n" % mode)
file.write("args=('%s', '%s')\n\n" % (fn, mode))
def readConfig(self, sectname):
prop = FilenameProperty(self.config.get(sectname, "filename"))
self.dict["filename"] = prop
prop = ModeProperty(self.config.get(sectname, "mode"))
self.dict["mode"] = prop
self.propnames.extend(["filename", "mode"])
class RotatingFileHandlerProxy(FileHandlerProxy):
def __init__(self, config, dict):
FileHandlerProxy.__init__(self, config, dict)
prop = MaxSizeProperty(dict.get("maxsize", "0"))
self.dict["maxsize"] = prop
prop = BackupCountProperty(dict.get("backcount", "1"))
self.dict["backcount"] = prop
self.propnames.extend(["maxsize", "backcount"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
fn = self.getPropValue("filename")
file.write("filename=%s\n" % fn)
mode = self.getPropValue("mode")
file.write("mode=%s\n" % mode)
ms = self.getPropValue("maxsize")
file.write("maxsize=%s\n" % ms)
bc = self.getPropValue("backcount")
file.write("backcount=%s\n" % bc)
file.write("args=('%s', '%s', %s, %s)\n\n" % (fn, mode, ms, bc))
def readConfig(self, sectname):
FileHandlerProxy.readConfig(self, sectname)
prop = MaxSizeProperty(self.config.get(sectname, "maxsize"))
self.dict["maxsize"] = prop
prop = BackupCountProperty(self.config.get(sectname, "backcount"))
self.dict["backcount"] = prop
self.propnames.extend(["maxsize", "backcount"])
class HostProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "host", "Host", value)
class PortProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "port", "Port", value)
class SocketHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = HostProperty(dict.get("host", "localhost"))
self.dict["host"] = prop
prop = PortProperty(dict.get("port", "handlers.DEFAULT_TCP_LOGGING_PORT"))
self.dict["port"] = prop
self.propnames.extend(["host", "port"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
host = self.getPropValue("host")
file.write("host=%s\n" % host)
port = self.getPropValue("port")
file.write("port=%s\n" % port)
file.write("args=('%s', %s)\n\n" % (host, port))
def readConfig(self, sectname):
prop = HostProperty(self.config.get(sectname, "host"))
self.dict["host"] = prop
prop = PortProperty(self.config.get(sectname, "port"))
self.dict["port"] = prop
self.propnames.extend(["host", "port"])
class DatagramHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = HostProperty(dict.get("host", "localhost"))
self.dict["host"] = prop
prop = PortProperty(dict.get("port", "handlers.DEFAULT_UDP_LOGGING_PORT"))
self.dict["port"] = prop
self.propnames.extend(["host", "port"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
host = self.getPropValue("host")
file.write("host=%s\n" % host)
port = self.getPropValue("port")
file.write("port=%s\n" % port)
file.write("args=('%s', %s)\n\n" % (host, port))
def readConfig(self, sectname):
prop = HostProperty(self.config.get(sectname, "host"))
self.dict["host"] = prop
prop = PortProperty(self.config.get(sectname, "port"))
self.dict["port"] = prop
self.propnames.extend(["host", "port"])
class URLProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "url", "URL", value)
class MethodProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "method", "HTTP Method", value)
def getChoices(self):
return HTTP_METHODS
class HTTPHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = HostProperty(dict.get("host", "localhost"))
self.dict["host"] = prop
prop = PortProperty(dict.get("port", "80"))
self.dict["port"] = prop
prop = URLProperty(dict.get("url", ""))
self.dict["url"] = prop
prop = MethodProperty(dict.get("method", "GET"))
self.dict["method"] = prop
self.propnames.extend(["host", "port", "url", "method"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
host = self.getPropValue("host")
file.write("host=%s\n" % host)
port = self.getPropValue("port")
file.write("port=%s\n" % port)
url = self.getPropValue("url")
file.write("url=%s\n" % url)
meth = self.getPropValue("method")
file.write("method=%s\n" % meth)
file.write("args=('%s:%s', '%s', '%s')\n\n" % (host, port, url, meth))
def readConfig(self, sectname):
prop = HostProperty(self.config.get(sectname, "host"))
self.dict["host"] = prop
prop = PortProperty(self.config.get(sectname, "port"))
self.dict["port"] = prop
prop = URLProperty(self.config.get(sectname, "url"))
self.dict["url"] = prop
prop = MethodProperty(self.config.get(sectname, "method"))
self.dict["method"] = prop
self.propnames.extend(["host", "port", "url", "method"])
class SOAPHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = HostProperty(dict.get("host", "localhost"))
self.dict["host"] = prop
prop = PortProperty(dict.get("port", "80"))
self.dict["port"] = prop
prop = URLProperty(dict.get("url", ""))
self.dict["url"] = prop
self.propnames.extend(["host", "port", "url"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
host = self.getPropValue("host")
file.write("host=%s\n" % host)
port = self.getPropValue("port")
file.write("port=%s\n" % port)
url = self.getPropValue("url")
file.write("url=%s\n" % url)
file.write("args=('%s:%s', '%s')\n\n" % (host, port, url))
def readConfig(self, sectname):
prop = HostProperty(self.config.get(sectname, "host"))
self.dict["host"] = prop
prop = PortProperty(self.config.get(sectname, "port"))
self.dict["port"] = prop
prop = URLProperty(self.config.get(sectname, "url"))
self.dict["url"] = prop
self.propnames.extend(["host", "port", "url"])
class FacilityProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "facility", "Facility", value)
def getChoices(self):
return SYSLOG_FACILITIES
class SysLogHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = HostProperty(dict.get("host", "localhost"))
self.dict["host"] = prop
prop = PortProperty(dict.get("port", "handlers.SYSLOG_UDP_PORT"))
self.dict["port"] = prop
prop = FacilityProperty(dict.get("facility", "handlers.SysLogHandler.LOG_USER"))
self.dict["facility"] = prop
self.propnames.extend(["host", "port", "facility"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
host = self.getPropValue("host")
file.write("host=%s\n" % host)
port = self.getPropValue("port")
file.write("port=%s\n" % port)
fac = self.getPropValue("facility")
file.write("facility=%s\n" % fac)
file.write("args=(('%s', %s), handlers.SysLogHandler.%s)\n\n" % (host, port, fac))
def readConfig(self, sectname):
prop = HostProperty(self.config.get(sectname, "host"))
self.dict["host"] = prop
prop = PortProperty(self.config.get(sectname, "port"))
self.dict["port"] = prop
prop = FacilityProperty(self.config.get(sectname, "facility"))
self.dict["facility"] = prop
self.propnames.extend(["host", "port", "facility"])
class FromProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "from", "From", value)
class ToProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "to", "To", value)
class SubjectProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "subject", "Subject", value)
class SMTPHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = HostProperty(dict.get("host", "localhost"))
self.dict["host"] = prop
prop = PortProperty(dict.get("port", "25"))
self.dict["port"] = prop
prop = FromProperty(dict.get("from", ""))
self.dict["from"] = prop
prop = ToProperty(dict.get("to", ""))
self.dict["to"] = prop
prop = SubjectProperty(dict.get("subject", ""))
self.dict["subject"] = prop
self.propnames.extend(["host", "port", "from", "to", "subject"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
host = self.getPropValue("host")
file.write("host=%s\n" % host)
port = self.getPropValue("port")
file.write("port=%s\n" % port)
frm = self.getPropValue("from")
file.write("from=%s\n" % frm)
to = self.getPropValue("to")
file.write("to=%s\n" % to)
subj = self.getPropValue("subject")
file.write("subject=%s\n" % subj)
to = string.split(to, ",")
file.write("args=('%s', '%s', %s, '%s')\n\n" % (host, frm, repr(to), subj))
def readConfig(self, sectname):
prop = HostProperty(self.config.get(sectname, "host"))
self.dict["host"] = prop
prop = PortProperty(self.config.get(sectname, "port"))
self.dict["port"] = prop
prop = FromProperty(self.config.get(sectname, "from"))
self.dict["from"] = prop
prop = ToProperty(self.config.get(sectname, "to"))
self.dict["to"] = prop
prop = SubjectProperty(self.config.get(sectname, "subject"))
self.dict["subject"] = prop
self.propnames.extend(["host", "port", "from", "to", "subject"])
class CapacityProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "capacity", "Capacity", value)
class FlushLevelProperty(LevelProperty):
def __init__(self, value=None):
Property.__init__(self, "flushlevel", "Flush Level", value)
class TargetProperty(Property):
def __init__(self, config, value=None):
self.config = config
Property.__init__(self, "target", "Target", value)
def getChoices(self):
handlers = self.config.getHandlerChoice()
nm = self.dict["name"].getValueText()
#can't be own target...
return filter(lambda x,nm=nm: x[0] != nm, handlers)
class MemoryHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = CapacityProperty(dict.get("capacity", "10"))
self.dict["capacity"] = prop
prop = FlushLevelProperty(dict.get("flushlevel", "ERROR"))
self.dict["flushlevel"] = prop
prop = TargetProperty(config, dict.get("target", ""))
prop.dict = self.dict
self.dict["target"] = prop
self.propnames.extend(["capacity", "flushlevel", "target"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
cap = self.getPropValue("capacity")
file.write("capacity=%s\n" % cap)
flvl = self.getPropValue("flushlevel")
file.write("flushlevel=%s\n" % flvl)
file.write("target=%s\n" % self.getPropValue("target"))
file.write("args=(%s, %s)\n\n" % (cap, flvl))
def readConfig(self, sectname):
prop = CapacityProperty(self.config.get(sectname, "capacity"))
self.dict["capacity"] = prop
prop = FlushLevelProperty(self.config.get(sectname, "flushlevel"))
self.dict["flushlevel"] = prop
prop = TargetProperty(self.config, self.config.get(sectname, "target"))
prop.dict = self.dict
self.dict["target"] = prop
self.propnames.extend(["capacity", "flushlevel", "target"])
class AppNameProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "appname", "Application Name", value)
class DLLNameProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "dllname", "Message DLL name", value)
class LogTypeProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "logtype", "Log Type", value)
def getChoices(self):
return LOG_TYPES
class NTEventLogHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = AppNameProperty(dict.get("appname", "Python Application"))
self.dict["appname"] = prop
prop = DLLNameProperty(dict.get("dllname", ""))
self.dict["dllname"] = prop
prop = LogTypeProperty(dict.get("logtype", "Application"))
self.dict["logtype"] = prop
self.propnames.extend(["appname", "dllname", "logtype"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
app = self.getPropValue("appname")
file.write("appname=%s\n" % app)
dll = self.getPropValue("dllname")
file.write("dllname=%s\n" % dll)
ltype = self.getPropValue("logtype")
file.write("logtype=%s\n" % ltype)
file.write("args=('%s', '%s', '%s')\n\n" % (app, dll, ltype))
def readConfig(self, sectname):
prop = AppNameProperty(self.config.get(sectname, "appname"))
self.dict["appname"] = prop
prop = DLLNameProperty(self.config.get(sectname, "dllname"))
self.dict["dllname"] = prop
prop = LogTypeProperty(self.config.get(sectname, "logtype"))
self.dict["logtype"] = prop
self.propnames.extend(["appname", "dllname", "logtype"])
# -- logger properties and proxies
class ChannelProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "channel", "Name", value)
class HandlerProperty(Property):
def __init__(self, config, value=None):
self.config = config
Property.__init__(self, "handler", "Handlers", value)
def getChoices(self):
return self.config.getHandlerChoice()
class FilterProperty(Property):
def __init__(self, config, value=None):
self.config = config
Property.__init__(self, "filter", "Filters", value)
def getChoices(self):
return self.config.getFilterChoice()
class ParentProperty(Property):
def __init__(self, config, value=None):
self.config = config
Property.__init__(self, "parent", "Parent", value)
def getChoices(self):
loggers = self.config.getLoggerChoice()
nm = self.dict["name"].getValueText()
#can't be own parent...
return filter(lambda x,nm=nm: x[0] != nm, loggers)
def getValueText(self):
if self.dict.has_key("root"):
return ""
pn = Property.getValueText(self)
rv = ""
while pn != "(root)":
parent = self.config.getLogger(pn)
rv = parent.getPropValue("channel") + "." + rv
pn = parent.getProp("parent").value
return rv[:-1]
class PropagateProperty(Property):
def __init__(self, config, value=None):
self.config = config
Property.__init__(self, "propagate", "Propagate", value)
def getChoices(self):
return BOOLEAN_VALUES
class LoggerProxy(PropertyHolder):
def __init__(self, config, dict):
self.config = config
PropertyHolder.__init__(self, dict)
prop = ChannelProperty(dict.get("channel", ""))
self.dict["channel"] = prop
prop = NameProperty(dict.get("name", ""))
self.dict["name"] = prop
prop = HandlerProperty(config, dict.get("handler", []))
self.dict["handler"] = prop
prop = LevelProperty(dict.get("level", "NOTSET"))
self.dict["level"] = prop
prop = PropagateProperty(self.config, dict.get("propagate", "1"))
self.dict["propagate"] = prop
prop = ParentProperty(config, dict.get("parent", "(root)"))
prop.dict = self.dict
self.dict["parent"] = prop
self.propnames = ["parent", "channel", "level", "propagate", "handler"]
self.onChannelChanged = None
self.deleted = 0
def isReadonly(self, name):
return (name in ["channel", "parent", "propagate"]) and self.dict.has_key("root")
def getQualifiedName(self):
pt = self.getProp("parent").getValueText()
nm = self.getPropValue("channel")
if pt:
pn = pt + "." + nm
else:
pn = nm
if pn == "":
pn = "(root)"
return pn
def setPropValue(self, name, value):
PropertyHolder.setPropValue(self, name, value)
if (name == "channel"):
nm = self.getPropValue("name")
if self.onChannelChanged:
self.onChannelChanged(nm, value)
def writeConfig(self, file):
if self.dict.has_key("root"):
name = "root"
else:
name = self.getPropValue("name")
file.write("[logger_%s]\n" % name)
file.write("level=%s\n" % self.getPropValue("level"))
file.write("propagate=%s\n" % self.getPropValue("propagate"))
file.write("channel=%s\n" % self.getPropValue("channel"))
file.write("parent=%s\n" % self.getPropValue("parent"))
file.write("qualname=%s\n" % self.getQualifiedName())
file.write("handlers=%s\n\n" % string.join(self.getPropValue("handler"), ","))
# -- logging configuration
class LoggingConfig(ConfigParser.ConfigParser):
def __init__(self, defaults=None):
ConfigParser.ConfigParser.__init__(self, defaults)
self.formatters = {}
self.handlers = {}
self.loggers = {}
# self.filters = {}
#create root logger
d = { "name": "(root)", "root": 1, "parent": "" }
self.loggers["(root)"] = LoggerProxy(self, d)
def read(self, fn):
ConfigParser.ConfigParser.read(self, fn)
llist = self.get("loggers", "keys")
llist = string.split(llist, ",")
llist.remove("root")
sectname = "logger_root"
log = self.loggers["(root)"]
log.setPropValue("level", self.get(sectname, "level"))
hlist = self.get(sectname, "handlers")
hlist = string.split(hlist, ",")
log.setPropValue("handler", hlist)
for log in llist:
sectname = "logger_%s" % log
hlist = self.get(sectname, "handlers")
hlist = string.split(hlist, ",")
d = {
"name" : log,
"level" : self.get(sectname, "level"),
"channel" : self.get(sectname, "channel"),
"parent" : self.get(sectname, "parent"),
"propagate" : self.get(sectname, "propagate"),
"handler" : hlist,
}
self.loggers[log] = LoggerProxy(self, d)
hlist = self.get("handlers", "keys")
if len(hlist):
hlist = string.split(hlist, ",")
for hand in hlist:
sectname = "handler_%s" % hand
klass = self.get(sectname, "class")
if klass[:9] == "handlers.":
klass = klass[9:]
d = {
"name" : hand,
"class" : "%sProxy" % klass,
"level" : self.get(sectname, "level"),
"formatter" : self.get(sectname, "formatter"),
}
hobj = HandlerProxy(self, d)
hobj.__class__ = eval("%sProxy" % klass)
hobj.readConfig(sectname)
self.handlers[hand] = hobj
flist = self.get("formatters", "keys")
if len(flist):
flist = string.split(flist, ",")
for form in flist:
sectname = "formatter_%s" % form
d = {
"name" : form,
"format" : self.get(sectname, "format", 1),
"datefmt" : self.get(sectname, "datefmt", 1),
}
self.formatters[form] = FormatterProxy(self, d)
# flist = self.get("filters", "keys")
# if len(flist):
# flist = string.split(flist, ",")
# for filt in flist:
# sectname = "filter_%s" % filt
# d = {
# "name" : filt,
# "lname" : self.get(sectname, "lname", 1),
# }
# self.filters[filt] = FilterProxy(self, d)
def getFormatter(self, name):
if name:
fmt = self.formatters[name]
else:
n = len(self.formatters.keys()) + 1
name = "form%02d" % n
fmt = FormatterProxy(self, {"name": name})
self.formatters[name] = fmt
return fmt
def getHandler(self, name):
if name:
hand = self.handlers[name]
else:
n = len(self.handlers.keys()) + 1
name = "hand%02d" % n
hand = StreamHandlerProxy(self, {"name": name})
self.handlers[name] = hand
return hand
def getLogger(self, name):
if name:
log = self.loggers[name]
else:
n = len(self.loggers.keys()) + 1
name = "log%02d" % n
log = LoggerProxy(self, {"name": name, "channel": name})
self.loggers[name] = log
return log
def getFormatterChoice(self):
values = []
keys = self.formatters.keys()
keys.sort()
for f in keys:
values.append((f, f))
return tuple(values)
def getHandlerChoice(self):
values = []
keys = self.handlers.keys()
keys.sort()
for f in keys:
values.append((f, f))
return tuple(values)
def getFilterChoice(self):
values = []
keys = self.filters.keys()
keys.sort()
for f in keys:
values.append((f, f))
return tuple(values)
def getLoggerChoice(self):
values = []
keys = self.loggers.keys()
keys.sort()
for f in keys:
values.append((f, f))
return tuple(values)
def getLoggers(self):
return self.loggers.values()
def getHandlers(self):
return self.handlers.values()
def getFormatters(self):
return self.formatters.values()
def formatterIsUsed(self, name):
rv = None
for h in self.handlers.keys():
if self.handlers[h].getPropValue("formatter") == name:
rv = h
break
return rv
def handlerIsUsed(self, name):
rv = None
for log in self.loggers.keys():
if name in self.loggers[log].getPropValue("handler"):
rv = log
break
return rv
def removeFormatter(self, name):
del self.formatters[name]
def removeHandler(self, name):
del self.handlers[name]
def removeLogger(self, name):
del self.loggers[name]
def save(self, fn):
#needed because 1.5.2 ConfigParser should be supported
file = open(fn, "w")
#Write out the keys
loggers = self.loggers.keys()
loggers.remove("(root)")
loggers = filter(lambda x, d=self.loggers: not d[x].deleted, loggers)
loggers.sort()
list = ["root"]
list.extend(loggers)
file.write("[loggers]\nkeys=%s\n\n" % string.join(list, ","))
handlers = self.handlers.keys()
handlers.sort()
file.write("[handlers]\nkeys=%s\n\n" % string.join(handlers, ","))
formatters = self.formatters.keys()
formatters.sort()
file.write("[formatters]\nkeys=%s\n\n" % string.join(formatters, ","))
#write out the root logger properties
log = self.loggers["(root)"]
log.writeConfig(file)
#write out other logger properties
for log in loggers:
log = self.loggers[log]
log.writeConfig(file)
#write out handler properties
for hand in handlers:
hand = self.handlers[hand]
hand.writeConfig(file)
#write out formatter properties
for form in formatters:
form = self.formatters[form]
form.writeConfig(file)
file.close()
root = None
def onClose():
if _popup:
_popup.withdraw()
root.destroy()
def main():
global root
root=Tk()
cfg = Configurator(root)
cfg.pack(side=LEFT, fill=BOTH, expand=1)
root.protocol("WM_DELETE_WINDOW", onClose)
root.mainloop()
if __name__ == "__main__":
main() | gpl-2.0 |
maartenq/ansible | lib/ansible/modules/network/aci/aci_config_snapshot.py | 10 | 9721 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_config_snapshot
short_description: Manage Config Snapshots (config:Snapshot, config:ExportP)
description:
- Manage Config Snapshots on Cisco ACI fabrics.
- Creating new Snapshots is done using the configExportP class.
- Removing Snapshots is done using the configSnapshot class.
notes:
- The APIC does not provide a mechanism for naming the snapshots.
- 'Snapshot files use the following naming structure: ce_<config export policy name>-<yyyy>-<mm>-<dd>T<hh>:<mm>:<ss>.<mss>+<hh>:<mm>.'
- 'Snapshot objects use the following naming structure: run-<yyyy>-<mm>-<dd>T<hh>-<mm>-<ss>.'
- More information about the internal APIC classes B(config:Snapshot) and B(config:ExportP) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
options:
description:
description:
- The description for the Config Export Policy.
aliases: [ descr ]
export_policy:
description:
- The name of the Export Policy to use for Config Snapshots.
aliases: [ name ]
format:
description:
- Sets the config backup to be formatted in JSON or XML.
- The APIC defaults to C(json) when unset.
choices: [ json, xml ]
include_secure:
description:
- Determines if secure information should be included in the backup.
- The APIC defaults to C(yes) when unset.
type: bool
max_count:
description:
- Determines how many snapshots can exist for the Export Policy before the APIC starts to rollover.
- Accepted values range between C(1) and C(10).
- The APIC defaults to C(3) when unset.
type: int
snapshot:
description:
- The name of the snapshot to delete.
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Create a Snapshot
aci_config_snapshot:
host: apic
username: admin
password: SomeSecretPassword
state: present
export_policy: config_backup
max_count: 10
description: Backups taken before new configs are applied.
delegate_to: localhost
- name: Query all Snapshots
aci_config_snapshot:
host: apic
username: admin
password: SomeSecretPassword
state: query
delegate_to: localhost
register: query_result
- name: Query Snapshots associated with a particular Export Policy
aci_config_snapshot:
host: apic
username: admin
password: SomeSecretPassword
export_policy: config_backup
state: query
delegate_to: localhost
register: query_result
- name: Delete a Snapshot
aci_config_snapshot:
host: apic
username: admin
password: SomeSecretPassword
export_policy: config_backup
snapshot: run-2017-08-24T17-20-05
state: absent
delegate_to: localhost
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
description=dict(type='str', aliases=['descr']),
export_policy=dict(type='str', aliases=['name']), # Not required for querying all objects
format=dict(type='str', choices=['json', 'xml']),
include_secure=dict(type='bool'),
max_count=dict(type='int'),
snapshot=dict(type='str'),
state=dict(type='str', choices=['absent', 'present', 'query'], default='present'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False,
required_if=[
['state', 'absent', ['export_policy', 'snapshot']],
['state', 'present', ['export_policy']],
],
)
aci = ACIModule(module)
description = module.params['description']
export_policy = module.params['export_policy']
file_format = module.params['format']
include_secure = aci.boolean(module.params['include_secure'])
max_count = module.params['max_count']
if max_count is not None:
if max_count in range(1, 11):
max_count = str(max_count)
else:
module.fail_json(msg="Parameter 'max_count' must be a number between 1 and 10")
snapshot = module.params['snapshot']
if snapshot is not None and not snapshot.startswith('run-'):
snapshot = 'run-' + snapshot
state = module.params['state']
if state == 'present':
aci.construct_url(
root_class=dict(
aci_class='configExportP',
aci_rn='fabric/configexp-{0}'.format(export_policy),
module_object=export_policy,
target_filter={'name': export_policy},
),
)
aci.get_existing()
aci.payload(
aci_class='configExportP',
class_config=dict(
adminSt='triggered',
descr=description,
format=file_format,
includeSecureFields=include_secure,
maxSnapshotCount=max_count,
name=export_policy,
snapshot='yes',
),
)
aci.get_diff('configExportP')
# Create a new Snapshot
aci.post_config()
else:
# Prefix the proper url to export_policy
if export_policy is not None:
export_policy = 'uni/fabric/configexp-{0}'.format(export_policy)
aci.construct_url(
root_class=dict(
aci_class='configSnapshotCont',
aci_rn='backupst/snapshots-[{0}]'.format(export_policy),
module_object=export_policy,
target_filter={'name': export_policy},
),
subclass_1=dict(
aci_class='configSnapshot',
aci_rn='snapshot-{0}'.format(snapshot),
module_object=snapshot,
target_filter={'name': snapshot},
),
)
aci.get_existing()
if state == 'absent':
# Build POST request to used to remove Snapshot
aci.payload(
aci_class='configSnapshot',
class_config=dict(
name=snapshot,
retire="yes",
),
)
if aci.existing:
aci.get_diff('configSnapshot')
# Mark Snapshot for Deletion
aci.post_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
kevin-coder/tensorflow-fork | tensorflow/python/util/serialization_test.py | 32 | 2724 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for serialization functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.platform import test
from tensorflow.python.util import serialization
class SerializationTests(test.TestCase):
def test_serialize_dense(self):
dense = core.Dense(3)
dense(constant_op.constant([[4.]]))
round_trip = json.loads(json.dumps(
dense, default=serialization.get_json_type))
self.assertEqual(3, round_trip["config"]["units"])
def test_serialize_shape(self):
round_trip = json.loads(json.dumps(
tensor_shape.TensorShape([None, 2, 3]),
default=serialization.get_json_type))
self.assertIs(round_trip[0], None)
self.assertEqual(round_trip[1], 2)
@test_util.run_in_graph_and_eager_modes
def test_serialize_sequential(self):
model = sequential.Sequential()
model.add(core.Dense(4))
model.add(core.Dense(5))
model(constant_op.constant([[1.]]))
sequential_round_trip = json.loads(
json.dumps(model, default=serialization.get_json_type))
self.assertEqual(
5, sequential_round_trip["config"]["layers"][1]["config"]["units"])
@test_util.run_in_graph_and_eager_modes
def test_serialize_model(self):
x = input_layer.Input(shape=[3])
y = core.Dense(10)(x)
model = training.Model(x, y)
model(constant_op.constant([[1., 1., 1.]]))
model_round_trip = json.loads(
json.dumps(model, default=serialization.get_json_type))
self.assertEqual(
10, model_round_trip["config"]["layers"][1]["config"]["units"])
if __name__ == "__main__":
test.main()
| apache-2.0 |
papados/ordersys | Lib/encodings/cp037.py | 593 | 13377 | """ Python Character Mapping Codec cp037 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP037.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp037',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'\xa2' # 0x4A -> CENT SIGN
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'|' # 0x4F -> VERTICAL LINE
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'!' # 0x5A -> EXCLAMATION MARK
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'\xac' # 0x5F -> NOT SIGN
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
u'\xae' # 0xAF -> REGISTERED SIGN
u'^' # 0xB0 -> CIRCUMFLEX ACCENT
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'[' # 0xBA -> LEFT SQUARE BRACKET
u']' # 0xBB -> RIGHT SQUARE BRACKET
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| unlicense |
fbradyirl/home-assistant | homeassistant/components/satel_integra/switch.py | 3 | 3172 | """Support for Satel Integra modifiable outputs represented as switches."""
import logging
from homeassistant.components.switch import SwitchDevice
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import (
CONF_DEVICE_CODE,
CONF_SWITCHABLE_OUTPUTS,
CONF_ZONE_NAME,
SIGNAL_OUTPUTS_UPDATED,
DATA_SATEL,
)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ["satel_integra"]
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Satel Integra switch devices."""
if not discovery_info:
return
configured_zones = discovery_info[CONF_SWITCHABLE_OUTPUTS]
controller = hass.data[DATA_SATEL]
devices = []
for zone_num, device_config_data in configured_zones.items():
zone_name = device_config_data[CONF_ZONE_NAME]
device = SatelIntegraSwitch(
controller, zone_num, zone_name, discovery_info[CONF_DEVICE_CODE]
)
devices.append(device)
async_add_entities(devices)
class SatelIntegraSwitch(SwitchDevice):
"""Representation of an Satel switch."""
def __init__(self, controller, device_number, device_name, code):
"""Initialize the binary_sensor."""
self._device_number = device_number
self._name = device_name
self._state = False
self._code = code
self._satel = controller
async def async_added_to_hass(self):
"""Register callbacks."""
async_dispatcher_connect(
self.hass, SIGNAL_OUTPUTS_UPDATED, self._devices_updated
)
@callback
def _devices_updated(self, zones):
"""Update switch state, if needed."""
_LOGGER.debug("Update switch name: %s zones: %s", self._name, zones)
if self._device_number in zones:
new_state = self._read_state()
_LOGGER.debug("New state: %s", new_state)
if new_state != self._state:
self._state = new_state
self.async_schedule_update_ha_state()
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
_LOGGER.debug("Switch: %s status: %s," " turning on", self._name, self._state)
await self._satel.set_output(self._code, self._device_number, True)
self.async_schedule_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
_LOGGER.debug(
"Switch name: %s status: %s," " turning off", self._name, self._state
)
await self._satel.set_output(self._code, self._device_number, False)
self.async_schedule_update_ha_state()
@property
def is_on(self):
"""Return true if device is on."""
self._state = self._read_state()
return self._state
def _read_state(self):
"""Read state of the device."""
return self._device_number in self._satel.violated_outputs
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""Don't poll."""
return False
| apache-2.0 |
bpsinc-native/src_tools_grit | grit/tool/build_unittest.py | 5 | 3033 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for the 'grit build' tool.
'''
import os
import sys
import tempfile
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from grit import util
from grit.tool import build
class BuildUnittest(unittest.TestCase):
def testFindTranslationsWithSubstitutions(self):
# This is a regression test; we had a bug where GRIT would fail to find
# messages with substitutions e.g. "Hello [IDS_USER]" where IDS_USER is
# another <message>.
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute.grd')
self.verbose = False
self.extra_verbose = False
builder.Run(DummyOpts(), ['-o', output_dir])
def testGenerateDepFile(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute.grd')
self.verbose = False
self.extra_verbose = False
expected_dep_file = os.path.join(output_dir, 'substitute.grd.d')
builder.Run(DummyOpts(), ['-o', output_dir,
'--depdir', output_dir,
'--depfile', expected_dep_file])
self.failUnless(os.path.isfile(expected_dep_file))
with open(expected_dep_file) as f:
line = f.readline()
(dep_output_file, deps_string) = line.split(': ')
deps = deps_string.split(' ')
self.failUnlessEqual("resource.h", dep_output_file)
self.failUnlessEqual(1, len(deps))
self.failUnlessEqual(deps[0],
util.PathFromRoot('grit/testdata/substitute.xmb'))
def testAssertOutputs(self):
output_dir = tempfile.mkdtemp()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute.grd')
self.verbose = False
self.extra_verbose = False
# Incomplete output file list should fail.
builder_fail = build.RcBuilder()
self.failUnlessEqual(2,
builder_fail.Run(DummyOpts(), [
'-o', output_dir,
'-a', os.path.abspath(
os.path.join(output_dir, 'en_generated_resources.rc'))]))
# Complete output file list should succeed.
builder_ok = build.RcBuilder()
self.failUnlessEqual(0,
builder_ok.Run(DummyOpts(), [
'-o', output_dir,
'-a', os.path.abspath(
os.path.join(output_dir, 'en_generated_resources.rc')),
'-a', os.path.abspath(
os.path.join(output_dir, 'sv_generated_resources.rc')),
'-a', os.path.abspath(
os.path.join(output_dir, 'resource.h'))]))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
mavit/ansible | lib/ansible/modules/cloud/rackspace/rax_clb.py | 65 | 9095 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_clb
short_description: create / delete a load balancer in Rackspace Public Cloud
description:
- creates / deletes a Rackspace Public Cloud load balancer.
version_added: "1.4"
options:
algorithm:
description:
- algorithm for the balancer being created
choices:
- RANDOM
- LEAST_CONNECTIONS
- ROUND_ROBIN
- WEIGHTED_LEAST_CONNECTIONS
- WEIGHTED_ROUND_ROBIN
default: LEAST_CONNECTIONS
meta:
description:
- A hash of metadata to associate with the instance
name:
description:
- Name to give the load balancer
port:
description:
- Port for the balancer being created
default: 80
protocol:
description:
- Protocol for the balancer being created
choices:
- DNS_TCP
- DNS_UDP
- FTP
- HTTP
- HTTPS
- IMAPS
- IMAPv4
- LDAP
- LDAPS
- MYSQL
- POP3
- POP3S
- SMTP
- TCP
- TCP_CLIENT_FIRST
- UDP
- UDP_STREAM
- SFTP
default: HTTP
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
timeout:
description:
- timeout for communication between the balancer and the node
default: 30
type:
description:
- type of interface for the balancer being created
choices:
- PUBLIC
- SERVICENET
default: PUBLIC
vip_id:
description:
- Virtual IP ID to use when creating the load balancer for purposes of
sharing an IP with another load balancer of another protocol
version_added: 1.5
wait:
description:
- wait for the balancer to be in state 'running' before returning
type: bool
default: 'no'
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment:
- rackspace
- rackspace.openstack
'''
EXAMPLES = '''
- name: Build a Load Balancer
gather_facts: False
hosts: local
connection: local
tasks:
- name: Load Balancer create request
local_action:
module: rax_clb
credentials: ~/.raxpub
name: my-lb
port: 8080
protocol: HTTP
type: SERVICENET
timeout: 30
region: DFW
wait: yes
state: present
meta:
app: my-cool-app
register: my_lb
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import (CLB_ALGORITHMS,
CLB_PROTOCOLS,
rax_argument_spec,
rax_required_together,
rax_to_dict,
setup_rax_module,
)
def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
vip_type, timeout, wait, wait_timeout, vip_id):
if int(timeout) < 30:
module.fail_json(msg='"timeout" must be greater than or equal to 30')
changed = False
balancers = []
clb = pyrax.cloud_loadbalancers
if not clb:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
balancer_list = clb.list()
while balancer_list:
retrieved = clb.list(marker=balancer_list.pop().id)
balancer_list.extend(retrieved)
if len(retrieved) < 2:
break
for balancer in balancer_list:
if name != balancer.name and name != balancer.id:
continue
balancers.append(balancer)
if len(balancers) > 1:
module.fail_json(msg='Multiple Load Balancers were matched by name, '
'try using the Load Balancer ID instead')
if state == 'present':
if isinstance(meta, dict):
metadata = [dict(key=k, value=v) for k, v in meta.items()]
if not balancers:
try:
virtual_ips = [clb.VirtualIP(type=vip_type, id=vip_id)]
balancer = clb.create(name, metadata=metadata, port=port,
algorithm=algorithm, protocol=protocol,
timeout=timeout, virtual_ips=virtual_ips)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
balancer = balancers[0]
setattr(balancer, 'metadata',
[dict(key=k, value=v) for k, v in
balancer.get_metadata().items()])
atts = {
'name': name,
'algorithm': algorithm,
'port': port,
'protocol': protocol,
'timeout': timeout
}
for att, value in atts.items():
current = getattr(balancer, att)
if current != value:
changed = True
if changed:
balancer.update(**atts)
if balancer.metadata != metadata:
balancer.set_metadata(meta)
changed = True
virtual_ips = [clb.VirtualIP(type=vip_type)]
current_vip_types = set([v.type for v in balancer.virtual_ips])
vip_types = set([v.type for v in virtual_ips])
if current_vip_types != vip_types:
module.fail_json(msg='Load balancer Virtual IP type cannot '
'be changed')
if wait:
attempts = wait_timeout // 5
pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
balancer.get()
instance = rax_to_dict(balancer, 'clb')
result = dict(changed=changed, balancer=instance)
if balancer.status == 'ERROR':
result['msg'] = '%s failed to build' % balancer.id
elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
result['msg'] = 'Timeout waiting on %s' % balancer.id
if 'msg' in result:
module.fail_json(**result)
else:
module.exit_json(**result)
elif state == 'absent':
if balancers:
balancer = balancers[0]
try:
balancer.delete()
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
instance = rax_to_dict(balancer, 'clb')
if wait:
attempts = wait_timeout // 5
pyrax.utils.wait_until(balancer, 'status', ('DELETED'),
interval=5, attempts=attempts)
else:
instance = {}
module.exit_json(changed=changed, balancer=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
algorithm=dict(choices=CLB_ALGORITHMS,
default='LEAST_CONNECTIONS'),
meta=dict(type='dict', default={}),
name=dict(required=True),
port=dict(type='int', default=80),
protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'),
state=dict(default='present', choices=['present', 'absent']),
timeout=dict(type='int', default=30),
type=dict(choices=['PUBLIC', 'SERVICENET'], default='PUBLIC'),
vip_id=dict(),
wait=dict(type='bool'),
wait_timeout=dict(default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
algorithm = module.params.get('algorithm')
meta = module.params.get('meta')
name = module.params.get('name')
port = module.params.get('port')
protocol = module.params.get('protocol')
state = module.params.get('state')
timeout = int(module.params.get('timeout'))
vip_id = module.params.get('vip_id')
vip_type = module.params.get('type')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
setup_rax_module(module, pyrax)
cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
vip_type, timeout, wait, wait_timeout, vip_id)
if __name__ == '__main__':
main()
| gpl-3.0 |
BigTone2009/sms-tools | software/transformations_interface/hpsMorph_GUI_frame.py | 21 | 14975 | # GUI frame for the hpsMorph_function.py
from Tkinter import *
import tkFileDialog, tkMessageBox
import sys, os
from scipy.io.wavfile import read
import numpy as np
import hpsMorph_function as hM
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
class HpsMorph_frame:
def __init__(self, parent):
self.parent = parent
self.initUI()
def initUI(self):
## INPUT FILE 1
choose1_label = "inputFile1:"
Label(self.parent, text=choose1_label).grid(row=0, column=0, sticky=W, padx=5, pady=(10,2))
#TEXTBOX TO PRINT PATH OF THE SOUND FILE
self.filelocation1 = Entry(self.parent)
self.filelocation1.focus_set()
self.filelocation1["width"] = 30
self.filelocation1.grid(row=0,column=0, sticky=W, padx=(75, 5), pady=(10,2))
self.filelocation1.delete(0, END)
self.filelocation1.insert(0, '../../sounds/violin-B3.wav')
#BUTTON TO BROWSE SOUND FILE 1
open_file1 = Button(self.parent, text="...", command=self.browse_file1) #see: def browse_file(self)
open_file1.grid(row=0, column=0, sticky=W, padx=(330, 6), pady=(10,2)) #put it beside the filelocation textbox
#BUTTON TO PREVIEW SOUND FILE 1
preview1 = Button(self.parent, text=">", command=lambda:UF.wavplay(self.filelocation1.get()), bg="gray30", fg="white")
preview1.grid(row=0, column=0, sticky=W, padx=(375,6), pady=(10,2))
#ANALYSIS WINDOW TYPE SOUND 1
wtype1_label = "window1:"
Label(self.parent, text=wtype1_label).grid(row=1, column=0, sticky=W, padx=5, pady=(4,2))
self.w1_type = StringVar()
self.w1_type.set("blackman") # initial value
window1_option = OptionMenu(self.parent, self.w1_type, "rectangular", "hanning", "hamming", "blackman", "blackmanharris")
window1_option.grid(row=1, column=0, sticky=W, padx=(68,5), pady=(4,2))
#WINDOW SIZE SOUND 1
M1_label = "M1:"
Label(self.parent, text=M1_label).grid(row=1, column=0, sticky=W, padx=(180, 5), pady=(4,2))
self.M1 = Entry(self.parent, justify=CENTER)
self.M1["width"] = 5
self.M1.grid(row=1,column=0, sticky=W, padx=(208,5), pady=(4,2))
self.M1.delete(0, END)
self.M1.insert(0, "1001")
#FFT SIZE SOUND 1
N1_label = "N1:"
Label(self.parent, text=N1_label).grid(row=1, column=0, sticky=W, padx=(265, 5), pady=(4,2))
self.N1 = Entry(self.parent, justify=CENTER)
self.N1["width"] = 5
self.N1.grid(row=1,column=0, sticky=W, padx=(290,5), pady=(4,2))
self.N1.delete(0, END)
self.N1.insert(0, "1024")
#THRESHOLD MAGNITUDE SOUND 1
t1_label = "t1:"
Label(self.parent, text=t1_label).grid(row=1, column=0, sticky=W, padx=(343,5), pady=(4,2))
self.t1 = Entry(self.parent, justify=CENTER)
self.t1["width"] = 5
self.t1.grid(row=1, column=0, sticky=W, padx=(370,5), pady=(4,2))
self.t1.delete(0, END)
self.t1.insert(0, "-100")
#MIN DURATION SINUSOIDAL TRACKS SOUND 1
minSineDur1_label = "minSineDur1:"
Label(self.parent, text=minSineDur1_label).grid(row=2, column=0, sticky=W, padx=(5, 5), pady=(4,2))
self.minSineDur1 = Entry(self.parent, justify=CENTER)
self.minSineDur1["width"] = 5
self.minSineDur1.grid(row=2, column=0, sticky=W, padx=(92,5), pady=(4,2))
self.minSineDur1.delete(0, END)
self.minSineDur1.insert(0, "0.05")
#MIN FUNDAMENTAL FREQUENCY SOUND 1
minf01_label = "minf01:"
Label(self.parent, text=minf01_label).grid(row=2, column=0, sticky=W, padx=(157,5), pady=(4,2))
self.minf01 = Entry(self.parent, justify=CENTER)
self.minf01["width"] = 5
self.minf01.grid(row=2, column=0, sticky=W, padx=(208,5), pady=(4,2))
self.minf01.delete(0, END)
self.minf01.insert(0, "200")
#MAX FUNDAMENTAL FREQUENCY SOUND 1
maxf01_label = "maxf01:"
Label(self.parent, text=maxf01_label).grid(row=2, column=0, sticky=W, padx=(270,5), pady=(4,2))
self.maxf01 = Entry(self.parent, justify=CENTER)
self.maxf01["width"] = 5
self.maxf01.grid(row=2, column=0, sticky=W, padx=(325,5), pady=(4,2))
self.maxf01.delete(0, END)
self.maxf01.insert(0, "300")
#MAX ERROR ACCEPTED SOUND 1
f0et1_label = "f0et1:"
Label(self.parent, text=f0et1_label).grid(row=3, column=0, sticky=W, padx=5, pady=(4,2))
self.f0et1 = Entry(self.parent, justify=CENTER)
self.f0et1["width"] = 3
self.f0et1.grid(row=3, column=0, sticky=W, padx=(45,5), pady=(4,2))
self.f0et1.delete(0, END)
self.f0et1.insert(0, "10")
#ALLOWED DEVIATION OF HARMONIC TRACKS SOUND 1
harmDevSlope1_label = "harmDevSlope1:"
Label(self.parent, text=harmDevSlope1_label).grid(row=3, column=0, sticky=W, padx=(108,5), pady=(4,2))
self.harmDevSlope1 = Entry(self.parent, justify=CENTER)
self.harmDevSlope1["width"] = 5
self.harmDevSlope1.grid(row=3, column=0, sticky=W, padx=(215,5), pady=(4,2))
self.harmDevSlope1.delete(0, END)
self.harmDevSlope1.insert(0, "0.01")
###
#SEPARATION LINE
Frame(self.parent,height=1,width=50,bg="black").grid(row=4, pady=5, sticky=W+E)
###
## INPUT FILE 2
choose2_label = "inputFile2:"
Label(self.parent, text=choose2_label).grid(row=5, column=0, sticky=W, padx=5, pady=(2,2))
#TEXTBOX TO PRINT PATH OF THE SOUND FILE
self.filelocation2 = Entry(self.parent)
self.filelocation2.focus_set()
self.filelocation2["width"] = 30
self.filelocation2.grid(row=5,column=0, sticky=W, padx=(75, 5), pady=(2,2))
self.filelocation2.delete(0, END)
self.filelocation2.insert(0, '../../sounds/soprano-E4.wav')
#BUTTON TO BROWSE SOUND FILE 2
open_file2 = Button(self.parent, text="...", command=self.browse_file2) #see: def browse_file(self)
open_file2.grid(row=5, column=0, sticky=W, padx=(330, 6), pady=(2,2)) #put it beside the filelocation textbox
#BUTTON TO PREVIEW SOUND FILE 2
preview2 = Button(self.parent, text=">", command=lambda:UF.wavplay(self.filelocation2.get()), bg="gray30", fg="white")
preview2.grid(row=5, column=0, sticky=W, padx=(375,6), pady=(2,2))
#ANALYSIS WINDOW TYPE SOUND 2
wtype2_label = "window2:"
Label(self.parent, text=wtype2_label).grid(row=6, column=0, sticky=W, padx=5, pady=(4,2))
self.w2_type = StringVar()
self.w2_type.set("hamming") # initial value
window2_option = OptionMenu(self.parent, self.w2_type, "rectangular", "hanning", "hamming", "blackman", "blackmanharris")
window2_option.grid(row=6, column=0, sticky=W, padx=(68,5), pady=(4,2))
#WINDOW SIZE SOUND 2
M2_label = "M2:"
Label(self.parent, text=M2_label).grid(row=6, column=0, sticky=W, padx=(180, 5), pady=(4,2))
self.M2 = Entry(self.parent, justify=CENTER)
self.M2["width"] = 5
self.M2.grid(row=6,column=0, sticky=W, padx=(208,5), pady=(4,2))
self.M2.delete(0, END)
self.M2.insert(0, "901")
#FFT SIZE SOUND 2
N2_label = "N2:"
Label(self.parent, text=N2_label).grid(row=6, column=0, sticky=W, padx=(265, 5), pady=(4,2))
self.N2 = Entry(self.parent, justify=CENTER)
self.N2["width"] = 5
self.N2.grid(row=6,column=0, sticky=W, padx=(290,5), pady=(4,2))
self.N2.delete(0, END)
self.N2.insert(0, "1024")
#THRESHOLD MAGNITUDE SOUND 2
t2_label = "t2:"
Label(self.parent, text=t2_label).grid(row=6, column=0, sticky=W, padx=(343,5), pady=(4,2))
self.t2 = Entry(self.parent, justify=CENTER)
self.t2["width"] = 5
self.t2.grid(row=6, column=0, sticky=W, padx=(370,5), pady=(4,2))
self.t2.delete(0, END)
self.t2.insert(0, "-100")
#MIN DURATION SINUSOIDAL TRACKS SOUND 2
minSineDur2_label = "minSineDur2:"
Label(self.parent, text=minSineDur2_label).grid(row=7, column=0, sticky=W, padx=(5, 5), pady=(4,2))
self.minSineDur2 = Entry(self.parent, justify=CENTER)
self.minSineDur2["width"] = 5
self.minSineDur2.grid(row=7, column=0, sticky=W, padx=(92,5), pady=(4,2))
self.minSineDur2.delete(0, END)
self.minSineDur2.insert(0, "0.05")
#MIN FUNDAMENTAL FREQUENCY SOUND 2
minf02_label = "minf02:"
Label(self.parent, text=minf02_label).grid(row=7, column=0, sticky=W, padx=(157,5), pady=(4,2))
self.minf02 = Entry(self.parent, justify=CENTER)
self.minf02["width"] = 5
self.minf02.grid(row=7, column=0, sticky=W, padx=(208,5), pady=(4,2))
self.minf02.delete(0, END)
self.minf02.insert(0, "250")
#MAX FUNDAMENTAL FREQUENCY SOUND 2
maxf02_label = "maxf02:"
Label(self.parent, text=maxf02_label).grid(row=7, column=0, sticky=W, padx=(270,5), pady=(4,2))
self.maxf02 = Entry(self.parent, justify=CENTER)
self.maxf02["width"] = 5
self.maxf02.grid(row=7, column=0, sticky=W, padx=(325,5), pady=(4,2))
self.maxf02.delete(0, END)
self.maxf02.insert(0, "500")
#MAX ERROR ACCEPTED SOUND 2
f0et2_label = "f0et2:"
Label(self.parent, text=f0et2_label).grid(row=8, column=0, sticky=W, padx=5, pady=(4,2))
self.f0et2 = Entry(self.parent, justify=CENTER)
self.f0et2["width"] = 3
self.f0et2.grid(row=8, column=0, sticky=W, padx=(45,5), pady=(4,2))
self.f0et2.delete(0, END)
self.f0et2.insert(0, "10")
#ALLOWED DEVIATION OF HARMONIC TRACKS SOUND 2
harmDevSlope2_label = "harmDevSlope2:"
Label(self.parent, text=harmDevSlope2_label).grid(row=8, column=0, sticky=W, padx=(108,5), pady=(4,2))
self.harmDevSlope2 = Entry(self.parent, justify=CENTER)
self.harmDevSlope2["width"] = 5
self.harmDevSlope2.grid(row=8, column=0, sticky=W, padx=(215,5), pady=(4,2))
self.harmDevSlope2.delete(0, END)
self.harmDevSlope2.insert(0, "0.01")
###
#SEPARATION LINE
Frame(self.parent,height=1,width=50,bg="black").grid(row=9, pady=5, sticky=W+E)
###
#MAX NUMBER OF HARMONICS SOUND 1
nH_label = "nH:"
Label(self.parent, text=nH_label).grid(row=10, column=0, sticky=W, padx=(5,5), pady=(2,2))
self.nH = Entry(self.parent, justify=CENTER)
self.nH["width"] = 5
self.nH.grid(row=10, column=0, sticky=W, padx=(35,5), pady=(2,2))
self.nH.delete(0, END)
self.nH.insert(0, "60")
#DECIMATION FACTOR SOUND 1
stocf_label = "stocf:"
Label(self.parent, text=stocf_label).grid(row=10, column=0, sticky=W, padx=(98,5), pady=(2,2))
self.stocf = Entry(self.parent, justify=CENTER)
self.stocf["width"] = 5
self.stocf.grid(row=10, column=0, sticky=W, padx=(138,5), pady=(2,2))
self.stocf.delete(0, END)
self.stocf.insert(0, "0.1")
#BUTTON TO DO THE ANALYSIS OF THE SOUND
self.compute = Button(self.parent, text="Analysis", command=self.analysis, bg="dark red", fg="white")
self.compute.grid(row=10, column=0, padx=(210, 5), pady=(2,2), sticky=W)
###
#SEPARATION LINE
Frame(self.parent,height=1,width=50,bg="black").grid(row=11, pady=5, sticky=W+E)
###
#
hfreqIntp_label = "harmonic frequencies interpolation factors, 0 to 1 (time,value pairs)"
Label(self.parent, text=hfreqIntp_label).grid(row=12, column=0, sticky=W, padx=5, pady=(2,2))
self.hfreqIntp = Entry(self.parent, justify=CENTER)
self.hfreqIntp["width"] = 35
self.hfreqIntp.grid(row=13, column=0, sticky=W+E, padx=5, pady=(0,2))
self.hfreqIntp.delete(0, END)
self.hfreqIntp.insert(0, "[0, 0, .1, 0, .9, 1, 1, 1]")
#
hmagIntp_label = "harmonic magnitudes interpolation factors, 0 to 1 (time,value pairs)"
Label(self.parent, text=hmagIntp_label).grid(row=14, column=0, sticky=W, padx=5, pady=(5,2))
self.hmagIntp = Entry(self.parent, justify=CENTER)
self.hmagIntp["width"] = 35
self.hmagIntp.grid(row=15, column=0, sticky=W+E, padx=5, pady=(0,2))
self.hmagIntp.delete(0, END)
self.hmagIntp.insert(0, "[0, 0, .1, 0, .9, 1, 1, 1]")
#
stocIntp_label = "stochastic component interpolation factors, 0 to 1 (time,value pairs)"
Label(self.parent, text=stocIntp_label).grid(row=16, column=0, sticky=W, padx=5, pady=(5,2))
self.stocIntp = Entry(self.parent, justify=CENTER)
self.stocIntp["width"] = 35
self.stocIntp.grid(row=17, column=0, sticky=W+E, padx=5, pady=(0,2))
self.stocIntp.delete(0, END)
self.stocIntp.insert(0, "[0, 0, .1, 0, .9, 1, 1, 1]")
#BUTTON TO DO THE SYNTHESIS
self.compute = Button(self.parent, text="Apply Transformation", command=self.transformation_synthesis, bg="dark green", fg="white")
self.compute.grid(row=18, column=0, padx=5, pady=(10,15), sticky=W)
#BUTTON TO PLAY TRANSFORMATION SYNTHESIS OUTPUT
self.transf_output = Button(self.parent, text=">", command=lambda:UF.wavplay('output_sounds/' + os.path.basename(self.filelocation1.get())[:-4] + '_hpsMorph.wav'), bg="gray30", fg="white")
self.transf_output.grid(row=18, column=0, padx=(165,5), pady=(10,15), sticky=W)
# define options for opening file
self.file_opt = options = {}
options['defaultextension'] = '.wav'
options['filetypes'] = [('All files', '.*'), ('Wav files', '.wav')]
options['initialdir'] = '../../sounds/'
options['title'] = 'Open a mono audio file .wav with sample frequency 44100 Hz'
def browse_file1(self):
self.filename1 = tkFileDialog.askopenfilename(**self.file_opt)
#set the text of the self.filelocation
self.filelocation1.delete(0, END)
self.filelocation1.insert(0,self.filename1)
def browse_file2(self):
self.filename2 = tkFileDialog.askopenfilename(**self.file_opt)
#set the text of the self.filelocation
self.filelocation2.delete(0, END)
self.filelocation2.insert(0,self.filename2)
def analysis(self):
try:
inputFile1 = self.filelocation1.get()
window1 = self.w1_type.get()
M1 = int(self.M1.get())
N1 = int(self.N1.get())
t1 = int(self.t1.get())
minSineDur1 = float(self.minSineDur1.get())
minf01 = int(self.minf01.get())
maxf01 = int(self.maxf01.get())
f0et1 = int(self.f0et1.get())
harmDevSlope1 = float(self.harmDevSlope1.get())
nH = int(self.nH.get())
stocf = float(self.stocf.get())
inputFile2 = self.filelocation2.get()
window2 = self.w2_type.get()
M2 = int(self.M2.get())
N2 = int(self.N2.get())
t2 = int(self.t2.get())
minSineDur2 = float(self.minSineDur2.get())
minf02 = int(self.minf02.get())
maxf02 = int(self.maxf02.get())
f0et2 = int(self.f0et2.get())
harmDevSlope2 = float(self.harmDevSlope2.get())
self.inputFile1, self.fs1, self.hfreq1, self.hmag1, self.stocEnv1, \
self.inputFile2, self.hfreq2, self.hmag2, self.stocEnv2 = hM.analysis(inputFile1, window1, M1, N1, t1, \
minSineDur1, nH, minf01, maxf01, f0et1, harmDevSlope1, stocf, inputFile2, window2, M2, N2, t2, minSineDur2, minf02, maxf02, f0et2, harmDevSlope2)
except ValueError as errorMessage:
tkMessageBox.showerror("Input values error", errorMessage)
def transformation_synthesis(self):
try:
inputFile1 = self.inputFile1
fs = self.fs1
hfreq1 = self.hfreq1
hmag1 = self.hmag1
stocEnv1 = self.stocEnv1
inputFile2 = self.inputFile2
hfreq2 = self.hfreq2
hmag2 = self.hmag2
stocEnv2 = self.stocEnv2
hfreqIntp = np.array(eval(self.hfreqIntp.get()))
hmagIntp = np.array(eval(self.hmagIntp.get()))
stocIntp = np.array(eval(self.stocIntp.get()))
hM.transformation_synthesis(inputFile1, fs, hfreq1, hmag1, stocEnv1, inputFile2, hfreq2, hmag2, stocEnv2, hfreqIntp, hmagIntp, stocIntp)
except ValueError as errorMessage:
tkMessageBox.showerror("Input values error", errorMessage)
except AttributeError:
tkMessageBox.showerror("Analysis not computed", "First you must analyse the sound!")
| agpl-3.0 |
thehub/hubplus | apps/plus_permissions/types/Contact.py | 1 | 2187 | from apps.plus_permissions.interfaces import InterfaceReadProperty, InterfaceWriteProperty, InterfaceCallProperty, InterfaceReadWriteProperty
from apps.plus_permissions.models import SetSliderOptions, SetAgentDefaults, SetPossibleTypes, SetSliderAgents
from apps.plus_contacts.models import Contact
from apps.plus_groups.models import TgGroup
from django.db.models.signals import post_save
import datetime
content_type = Contact
from apps.plus_permissions.default_agents import get_or_create_root_location, get_anonymous_group, get_all_members_group, get_creator_agent
# we need a special set_permissions interface which is only editable by the scontext_admin and
# determines who can set permissions or override them for an object.
class ContactAdmin:
pk = InterfaceReadProperty
first_name = InterfaceReadWriteProperty
last_name = InterfaceReadWriteProperty
organisation = InterfaceReadWriteProperty
email_address = InterfaceReadWriteProperty
location = InterfaceReadWriteProperty
apply_msg = InterfaceReadWriteProperty
find_out = InterfaceReadWriteProperty
invited_by = InterfaceReadWriteProperty
become_member = InterfaceCallProperty
invite = InterfaceCallProperty
class ContactInvite:
pk = InterfaceReadProperty
invite = InterfaceCallProperty
group_invite_message = InterfaceCallProperty
from apps.plus_permissions.models import add_type_to_interface_map
ContactInterfaces = {'ContactAdmin': ContactAdmin, 'ContactInvite' : ContactInvite }
add_type_to_interface_map(Contact, ContactInterfaces)
# use InterfaceOrder to draw the slider and constraints, these are used in rendering the sliders and in validating the results
# these exist on a per type basis and are globals for their type.
# they don't need to be stored in the db
SliderOptions = {'InterfaceOrder':['ContactAdmin']}
SetSliderOptions(Contact, SliderOptions)
# ChildTypes are used to determine what types of objects can be created in this security context (and acquire security context from this). These are used when creating an explicit security context for an object of this type.
child_types = []
SetPossibleTypes(Contact, child_types)
| gpl-3.0 |
benosteen/mypaint | lib/command.py | 1 | 10185 | # This file is part of MyPaint.
# Copyright (C) 2007-2008 by Martin Renold <martinxyz@gmx.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import layer
import helpers
class CommandStack:
def __init__(self):
self.call_before_action = []
self.clear()
def clear(self):
self.undo_stack = []
self.redo_stack = []
def do(self, command):
for f in self.call_before_action: f()
self.redo_stack = [] # discard
command.redo()
self.undo_stack.append(command)
self.reduce_undo_history()
def undo(self):
if not self.undo_stack: return
for f in self.call_before_action: f()
command = self.undo_stack.pop()
command.undo()
self.redo_stack.append(command)
return command
def redo(self):
if not self.redo_stack: return
for f in self.call_before_action: f()
command = self.redo_stack.pop()
command.redo()
self.undo_stack.append(command)
return command
def reduce_undo_history(self):
stack = self.undo_stack
self.undo_stack = []
steps = 0
for item in reversed(stack):
self.undo_stack.insert(0, item)
if not item.automatic_undo:
steps += 1
if steps == 30: # and memory > ...
break
def get_last_command(self):
if not self.undo_stack: return None
return self.undo_stack[-1]
class Action:
'''Base class for all undo/redoable actions. Subclasses must implement the
undo and redo methods. They should have a reference to the document in
self.doc'''
automatic_undo = False
def redo(self):
raise NotImplementedError
def undo(self):
raise NotImplementedError
# Utility functions
def _notify_canvas_observers(self, affected_layers):
bbox = helpers.Rect()
for layer in affected_layers:
layer_bbox = layer.surface.get_bbox()
bbox.expandToIncludeRect(layer_bbox)
for func in self.doc.canvas_observers:
func(*bbox)
def _notify_document_observers(self):
self.doc.call_doc_observers()
class Stroke(Action):
def __init__(self, doc, stroke, snapshot_before):
"""called only when the stroke was just completed and is now fully rendered"""
self.doc = doc
assert stroke.finished
self.stroke = stroke # immutable; not used for drawing any more, just for inspection
self.before = snapshot_before
self.doc.layer.add_stroke(stroke, snapshot_before)
# this snapshot will include the updated stroke list (modified by the line above)
self.after = self.doc.layer.save_snapshot()
def undo(self):
self.doc.layer.load_snapshot(self.before)
def redo(self):
self.doc.layer.load_snapshot(self.after)
class ClearLayer(Action):
def __init__(self, doc):
self.doc = doc
def redo(self):
self.before = self.doc.layer.save_snapshot()
self.doc.layer.clear()
self._notify_document_observers()
def undo(self):
self.doc.layer.load_snapshot(self.before)
del self.before
self._notify_document_observers()
class LoadLayer(Action):
def __init__(self, doc, data, x, y):
self.doc = doc
self.data = [x, y, data]
def redo(self):
layer = self.doc.layer
self.before = layer.save_snapshot()
layer.load_from_pixbuf(self.data)
def undo(self):
self.doc.layer.load_snapshot(self.before)
del self.before
class MergeLayer(Action):
"""merge the current layer into dst"""
def __init__(self, doc, dst_idx):
self.doc = doc
self.dst_layer = self.doc.layers[dst_idx]
self.remove_src = RemoveLayer(doc)
def redo(self):
self.dst_before = self.dst_layer.save_snapshot()
assert self.doc.layer is not self.dst_layer
self.doc.layer.merge_into(self.dst_layer)
self.remove_src.redo()
self.select_dst = SelectLayer(self.doc, self.doc.layers.index(self.dst_layer))
self.select_dst.redo()
self._notify_document_observers()
def undo(self):
self.select_dst.undo()
del self.select_dst
self.remove_src.undo()
self.dst_layer.load_snapshot(self.dst_before)
del self.dst_before
self._notify_document_observers()
class AddLayer(Action):
def __init__(self, doc, insert_idx=None, after=None, name=''):
self.doc = doc
self.insert_idx = insert_idx
if after:
l_idx = self.doc.layers.index(after)
self.insert_idx = l_idx + 1
self.layer = layer.Layer(name)
self.layer.surface.observers.append(self.doc.layer_modified_cb)
def redo(self):
self.doc.layers.insert(self.insert_idx, self.layer)
self.prev_idx = self.doc.layer_idx
self.doc.layer_idx = self.insert_idx
self._notify_document_observers()
def undo(self):
self.doc.layers.remove(self.layer)
self.doc.layer_idx = self.prev_idx
self._notify_document_observers()
class RemoveLayer(Action):
def __init__(self, doc,layer=None):
self.doc = doc
self.layer = layer
def redo(self):
assert len(self.doc.layers) > 1
if self.layer:
self.idx = self.doc.layers.index(self.layer)
self.doc.layers.remove(self.layer)
else:
self.idx = self.doc.layer_idx
self.layer = self.doc.layers.pop(self.doc.layer_idx)
if self.doc.layer_idx == len(self.doc.layers):
self.doc.layer_idx -= 1
self._notify_canvas_observers([self.layer])
self._notify_document_observers()
def undo(self):
self.doc.layers.insert(self.idx, self.layer)
self.doc.layer_idx = self.idx
self._notify_canvas_observers([self.layer])
self._notify_document_observers()
class SelectLayer(Action):
automatic_undo = True
def __init__(self, doc, idx):
self.doc = doc
self.idx = idx
def redo(self):
assert self.idx >= 0 and self.idx < len(self.doc.layers)
self.prev_idx = self.doc.layer_idx
self.doc.layer_idx = self.idx
self._notify_document_observers()
def undo(self):
self.doc.layer_idx = self.prev_idx
self._notify_document_observers()
class MoveLayer(Action):
def __init__(self, doc, was_idx, new_idx, select_new=False):
self.doc = doc
self.was_idx = was_idx
self.new_idx = new_idx
self.select_new = select_new
def redo(self):
moved_layer = self.doc.layers[self.was_idx]
self.doc.layers.remove(moved_layer)
self.doc.layers.insert(self.new_idx, moved_layer)
if self.select_new:
self.was_selected = self.doc.layer_idx
self.doc.layer_idx = self.new_idx
self._notify_canvas_observers([moved_layer])
self._notify_document_observers()
def undo(self):
moved_layer = self.doc.layers[self.new_idx]
self.doc.layers.remove(moved_layer)
self.doc.layers.insert(self.was_idx, moved_layer)
if self.select_new:
self.doc.layer_idx = self.was_selected
self.was_selected = None
self._notify_canvas_observers([moved_layer])
self._notify_document_observers()
class ReorderLayers(Action):
def __init__(self, doc, new_order):
self.doc = doc
self.old_order = doc.layers[:]
self.selection = self.old_order[doc.layer_idx]
self.new_order = new_order
for layer in new_order:
assert layer in self.old_order
assert len(self.old_order) == len(new_order)
def redo(self):
self.doc.layers[:] = self.new_order
self.doc.layer_idx = self.doc.layers.index(self.selection)
self._notify_canvas_observers(self.doc.layers)
self._notify_document_observers()
def undo(self):
self.doc.layers[:] = self.old_order
self.doc.layer_idx = self.doc.layers.index(self.selection)
self._notify_canvas_observers(self.doc.layers)
self._notify_document_observers()
class SetLayerVisibility(Action):
def __init__(self, doc, visible, layer):
self.doc = doc
self.new_visibility = visible
self.layer = layer
def redo(self):
self.old_visibility = self.layer.visible
self.layer.visible = self.new_visibility
self._notify_canvas_observers([self.layer])
self._notify_document_observers()
def undo(self):
self.layer.visible = self.old_visibility
self._notify_canvas_observers([self.layer])
self._notify_document_observers()
class SetLayerLocked (Action):
def __init__(self, doc, locked, layer):
self.doc = doc
self.new_locked = locked
self.layer = layer
def redo(self):
self.old_locked = self.layer.locked
self.layer.locked = self.new_locked
self._notify_canvas_observers([self.layer])
self._notify_document_observers()
def undo(self):
self.layer.locked = self.old_locked
self._notify_canvas_observers([self.layer])
self._notify_document_observers()
class SetLayerOpacity(Action):
def __init__(self, doc, opacity, layer=None):
self.doc = doc
self.new_opacity = opacity
self.layer = layer
def redo(self):
if self.layer:
l = self.layer
else:
l = self.doc.layer
self.old_opacity = l.opacity
l.opacity = self.new_opacity
self._notify_canvas_observers([l])
self._notify_document_observers()
def undo(self):
if self.layer:
l = self.layer
else:
l = self.doc.layer
l.opacity = self.old_opacity
self._notify_canvas_observers([l])
self._notify_document_observers()
| gpl-2.0 |
rgerkin/neuroConstruct | lib/jython/Lib/distutils/tests/test_dir_util.py | 106 | 4382 | """Tests for distutils.dir_util."""
import unittest
import os
import stat
import shutil
import sys
from distutils.dir_util import (mkpath, remove_tree, create_tree, copy_tree,
ensure_relative)
from distutils import log
from distutils.tests import support
from test.test_support import run_unittest
class DirUtilTestCase(support.TempdirManager, unittest.TestCase):
def _log(self, msg, *args):
if len(args) > 0:
self._logs.append(msg % args)
else:
self._logs.append(msg)
def setUp(self):
super(DirUtilTestCase, self).setUp()
self._logs = []
tmp_dir = self.mkdtemp()
self.root_target = os.path.join(tmp_dir, 'deep')
self.target = os.path.join(self.root_target, 'here')
self.target2 = os.path.join(tmp_dir, 'deep2')
self.old_log = log.info
log.info = self._log
def tearDown(self):
log.info = self.old_log
super(DirUtilTestCase, self).tearDown()
def test_mkpath_remove_tree_verbosity(self):
mkpath(self.target, verbose=0)
wanted = []
self.assertEqual(self._logs, wanted)
remove_tree(self.root_target, verbose=0)
mkpath(self.target, verbose=1)
wanted = ['creating %s' % self.root_target,
'creating %s' % self.target]
self.assertEqual(self._logs, wanted)
self._logs = []
remove_tree(self.root_target, verbose=1)
wanted = ["removing '%s' (and everything under it)" % self.root_target]
self.assertEqual(self._logs, wanted)
@unittest.skipIf(sys.platform.startswith('win'),
"This test is only appropriate for POSIX-like systems.")
def test_mkpath_with_custom_mode(self):
# Get and set the current umask value for testing mode bits.
umask = os.umask(0o002)
os.umask(umask)
mkpath(self.target, 0o700)
self.assertEqual(
stat.S_IMODE(os.stat(self.target).st_mode), 0o700 & ~umask)
mkpath(self.target2, 0o555)
self.assertEqual(
stat.S_IMODE(os.stat(self.target2).st_mode), 0o555 & ~umask)
def test_create_tree_verbosity(self):
create_tree(self.root_target, ['one', 'two', 'three'], verbose=0)
self.assertEqual(self._logs, [])
remove_tree(self.root_target, verbose=0)
wanted = ['creating %s' % self.root_target]
create_tree(self.root_target, ['one', 'two', 'three'], verbose=1)
self.assertEqual(self._logs, wanted)
remove_tree(self.root_target, verbose=0)
def test_copy_tree_verbosity(self):
mkpath(self.target, verbose=0)
copy_tree(self.target, self.target2, verbose=0)
self.assertEqual(self._logs, [])
remove_tree(self.root_target, verbose=0)
mkpath(self.target, verbose=0)
a_file = os.path.join(self.target, 'ok.txt')
f = open(a_file, 'w')
try:
f.write('some content')
finally:
f.close()
wanted = ['copying %s -> %s' % (a_file, self.target2)]
copy_tree(self.target, self.target2, verbose=1)
self.assertEqual(self._logs, wanted)
remove_tree(self.root_target, verbose=0)
remove_tree(self.target2, verbose=0)
def test_copy_tree_skips_nfs_temp_files(self):
mkpath(self.target, verbose=0)
a_file = os.path.join(self.target, 'ok.txt')
nfs_file = os.path.join(self.target, '.nfs123abc')
for f in a_file, nfs_file:
fh = open(f, 'w')
try:
fh.write('some content')
finally:
fh.close()
copy_tree(self.target, self.target2)
self.assertEqual(os.listdir(self.target2), ['ok.txt'])
remove_tree(self.root_target, verbose=0)
remove_tree(self.target2, verbose=0)
def test_ensure_relative(self):
if os.sep == '/':
self.assertEqual(ensure_relative('/home/foo'), 'home/foo')
self.assertEqual(ensure_relative('some/path'), 'some/path')
else: # \\
self.assertEqual(ensure_relative('c:\\home\\foo'), 'c:home\\foo')
self.assertEqual(ensure_relative('home\\foo'), 'home\\foo')
def test_suite():
return unittest.makeSuite(DirUtilTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| gpl-2.0 |
spring-week-topos/nova-week | nova/virt/xenapi/driver.py | 6 | 29913 | # Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A driver for XenServer or Xen Cloud Platform.
**Related Flags**
:connection_url: URL for connection to XenServer/Xen Cloud Platform.
:connection_username: Username for connection to XenServer/Xen Cloud
Platform (default: root).
:connection_password: Password for connection to XenServer/Xen Cloud
Platform.
:target_host: the iSCSI Target Host IP address, i.e. the IP
address for the nova-volume host
:target_port: iSCSI Target Port, 3260 Default
:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack'
**Variable Naming Scheme**
- suffix "_ref" for opaque references
- suffix "_uuid" for UUIDs
- suffix "_rec" for record objects
"""
import math
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova import utils
from nova.virt import driver
from nova.virt.xenapi.client import session
from nova.virt.xenapi import host
from nova.virt.xenapi import pool
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volumeops
LOG = logging.getLogger(__name__)
xenapi_opts = [
cfg.StrOpt('connection_url',
deprecated_name='xenapi_connection_url',
deprecated_group='DEFAULT',
help='URL for connection to XenServer/Xen Cloud Platform. '
'A special value of unix://local can be used to connect '
'to the local unix socket. '
'Required if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('connection_username',
default='root',
deprecated_name='xenapi_connection_username',
deprecated_group='DEFAULT',
help='Username for connection to XenServer/Xen Cloud Platform. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('connection_password',
deprecated_name='xenapi_connection_password',
deprecated_group='DEFAULT',
help='Password for connection to XenServer/Xen Cloud Platform. '
'Used only if compute_driver=xenapi.XenAPIDriver',
secret=True),
cfg.FloatOpt('vhd_coalesce_poll_interval',
default=5.0,
deprecated_name='xenapi_vhd_coalesce_poll_interval',
deprecated_group='DEFAULT',
help='The interval used for polling of coalescing vhds. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.BoolOpt('check_host',
default=True,
deprecated_name='xenapi_check_host',
deprecated_group='DEFAULT',
help='Ensure compute service is running on host XenAPI '
'connects to.'),
cfg.IntOpt('vhd_coalesce_max_attempts',
default=20,
deprecated_name='xenapi_vhd_coalesce_max_attempts',
deprecated_group='DEFAULT',
help='Max number of times to poll for VHD to coalesce. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('sr_base_path',
default='/var/run/sr-mount',
deprecated_name='xenapi_sr_base_path',
deprecated_group='DEFAULT',
help='Base path to the storage repository'),
cfg.StrOpt('target_host',
deprecated_name='target_host',
deprecated_group='DEFAULT',
help='The iSCSI Target Host'),
cfg.StrOpt('target_port',
default='3260',
deprecated_name='target_port',
deprecated_group='DEFAULT',
help='The iSCSI Target Port, default is port 3260'),
cfg.StrOpt('iqn_prefix',
default='iqn.2010-10.org.openstack',
deprecated_name='iqn_prefix',
deprecated_group='DEFAULT',
help='IQN Prefix'),
# NOTE(sirp): This is a work-around for a bug in Ubuntu Maverick,
# when we pull support for it, we should remove this
cfg.BoolOpt('remap_vbd_dev',
default=False,
deprecated_name='xenapi_remap_vbd_dev',
deprecated_group='DEFAULT',
help='Used to enable the remapping of VBD dev '
'(Works around an issue in Ubuntu Maverick)'),
cfg.StrOpt('remap_vbd_dev_prefix',
default='sd',
deprecated_name='xenapi_remap_vbd_dev_prefix',
deprecated_group='DEFAULT',
help='Specify prefix to remap VBD dev to '
'(ex. /dev/xvdb -> /dev/sdb)'),
]
CONF = cfg.CONF
# xenapi options in the DEFAULT group were deprecated in Icehouse
CONF.register_opts(xenapi_opts, 'xenserver')
CONF.import_opt('host', 'nova.netconf')
OVERHEAD_BASE = 3
OVERHEAD_PER_MB = 0.00781
OVERHEAD_PER_VCPU = 1.5
class XenAPIDriver(driver.ComputeDriver):
"""A connection to XenServer or Xen Cloud Platform."""
def __init__(self, virtapi, read_only=False):
super(XenAPIDriver, self).__init__(virtapi)
url = CONF.xenserver.connection_url
username = CONF.xenserver.connection_username
password = CONF.xenserver.connection_password
if not url or password is None:
raise Exception(_('Must specify connection_url, '
'connection_username (optionally), and '
'connection_password to use '
'compute_driver=xenapi.XenAPIDriver'))
self._session = session.XenAPISession(url, username, password)
self._volumeops = volumeops.VolumeOps(self._session)
self._host_state = None
self._host = host.Host(self._session, self.virtapi)
self._vmops = vmops.VMOps(self._session, self.virtapi)
self._initiator = None
self._hypervisor_hostname = None
self._pool = pool.ResourcePool(self._session, self.virtapi)
@property
def host_state(self):
if not self._host_state:
self._host_state = host.HostState(self._session)
return self._host_state
def init_host(self, host):
if CONF.xenserver.check_host:
vm_utils.ensure_correct_host(self._session)
try:
vm_utils.cleanup_attached_vdis(self._session)
except Exception:
LOG.exception(_('Failure while cleaning up attached VDIs'))
def instance_exists(self, instance_name):
"""Checks existence of an instance on the host.
:param instance_name: The name of the instance to lookup
Returns True if an instance with the supplied name exists on
the host, False otherwise.
NOTE(belliott): This is an override of the base method for
efficiency.
"""
return self._vmops.instance_exists(instance_name)
def estimate_instance_overhead(self, instance_info):
"""Get virtualization overhead required to build an instance of the
given flavor.
:param instance_info: Instance/flavor to calculate overhead for.
:returns: Overhead memory in MB.
"""
# XenServer memory overhead is proportional to the size of the
# VM. Larger flavor VMs become more efficient with respect to
# overhead.
# interpolated formula to predict overhead required per vm.
# based on data from:
# https://wiki.openstack.org/wiki/XenServer/Overhead
# Some padding is done to each value to fit all available VM data
memory_mb = instance_info['memory_mb']
vcpus = instance_info.get('vcpus', 1)
overhead = ((memory_mb * OVERHEAD_PER_MB) + (vcpus * OVERHEAD_PER_VCPU)
+ OVERHEAD_BASE)
overhead = math.ceil(overhead)
return {'memory_mb': overhead}
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
def list_instance_uuids(self):
"""Get the list of nova instance uuids for VMs found on the
hypervisor.
"""
return self._vmops.list_instance_uuids()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create VM instance."""
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
self._vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize."""
# NOTE(vish): Xen currently does not use network info.
self._vmops.finish_revert_migration(context, instance,
block_device_info,
power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, image_id, update_task_state)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, reboot_type,
bad_volumes_callback=bad_volumes_callback)
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
self._vmops.set_admin_password(instance, new_pass)
def inject_file(self, instance, b64_path, b64_contents):
"""Create a file on the VM instance. The file path and contents
should be base64-encoded.
"""
self._vmops.inject_file(instance, b64_path, b64_contents)
def change_instance_metadata(self, context, instance, diff):
"""Apply a diff to the instance metadata."""
self._vmops.change_instance_metadata(instance, diff)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy VM instance."""
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None):
"""Transfers the VHD of a running instance to another host, then shuts
off the instance copies over the COW disk
"""
# NOTE(vish): Xen currently does not use network info.
return self._vmops.migrate_disk_and_power_off(context, instance,
dest, flavor, block_device_info)
def suspend(self, instance):
"""suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
self._vmops.rescue(context, instance, network_info, image_meta,
rescue_password)
def set_bootable(self, instance, is_bootable):
"""Set the ability to power on/off an instance."""
self._vmops.set_bootable(instance, is_bootable)
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance):
"""Power off the specified instance."""
self._vmops.power_off(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
self._vmops.power_on(instance)
def soft_delete(self, instance):
"""Soft delete the specified instance."""
self._vmops.soft_delete(instance)
def restore(self, instance):
"""Restore the specified instance."""
self._vmops.restore(instance)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
def reset_network(self, instance):
"""reset networking for specified instance."""
self._vmops.reset_network(instance)
def inject_network_info(self, instance, network_info):
"""inject network info for specified instance."""
self._vmops.inject_network_info(instance, network_info)
def plug_vifs(self, instance_ref, network_info):
"""Plug VIFs into networks."""
self._vmops.plug_vifs(instance_ref, network_info)
def unplug_vifs(self, instance_ref, network_info):
"""Unplug VIFs from networks."""
self._vmops.unplug_vifs(instance_ref, network_info)
def get_info(self, instance):
"""Return data about VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_diagnostics(instance)
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM.
"""
# we only care about VMs that correspond to a nova-managed
# instance:
imap = dict([(inst['name'], inst['uuid']) for inst in instances])
bwcounters = []
# get a dictionary of instance names. values are dictionaries
# of mac addresses with values that are the bw counters:
# e.g. {'instance-001' : { 12:34:56:78:90:12 : {'bw_in': 0, ....}}
all_counters = self._vmops.get_all_bw_counters()
for instance_name, counters in all_counters.iteritems():
if instance_name in imap:
# yes these are stats for a nova-managed vm
# correlate the stats with the nova instance uuid:
for vif_counter in counters.values():
vif_counter['uuid'] = imap[instance_name]
bwcounters.append(vif_counter)
return bwcounters
def get_console_output(self, context, instance):
"""Return snapshot of console."""
return self._vmops.get_console_output(instance)
def get_vnc_console(self, context, instance):
"""Return link to instance's VNC console."""
return self._vmops.get_vnc_console(instance)
def get_volume_connector(self, instance):
"""Return volume connector information."""
if not self._initiator or not self._hypervisor_hostname:
stats = self.get_host_stats(refresh=True)
try:
self._initiator = stats['host_other-config']['iscsi_iqn']
self._hypervisor_hostname = stats['host_hostname']
except (TypeError, KeyError) as err:
LOG.warn(_('Could not determine key: %s') % err,
instance=instance)
self._initiator = None
return {
'ip': self.get_host_ip_addr(),
'initiator': self._initiator,
'host': self._hypervisor_hostname
}
@staticmethod
def get_host_ip_addr():
xs_url = urlparse.urlparse(CONF.xenserver.connection_url)
return xs_url.netloc
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info,
instance['name'],
mountpoint)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach volume storage from VM instance."""
return self._volumeops.detach_volume(connection_info,
instance['name'],
mountpoint)
def get_console_pool_info(self, console_type):
xs_url = urlparse.urlparse(CONF.xenserver.connection_url)
return {'address': xs_url.netloc,
'username': CONF.xenserver.connection_username,
'password': CONF.xenserver.connection_password}
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: ignored in this driver
:returns: dictionary describing resources
"""
host_stats = self.get_host_stats(refresh=True)
# Updating host information
total_ram_mb = host_stats['host_memory_total'] / units.Mi
# NOTE(belliott) memory-free-computed is a value provided by XenServer
# for gauging free memory more conservatively than memory-free.
free_ram_mb = host_stats['host_memory_free_computed'] / units.Mi
total_disk_gb = host_stats['disk_total'] / units.Gi
used_disk_gb = host_stats['disk_used'] / units.Gi
hyper_ver = utils.convert_version_to_int(self._session.product_version)
dic = {'vcpus': host_stats['host_cpu_info']['cpu_count'],
'memory_mb': total_ram_mb,
'local_gb': total_disk_gb,
'vcpus_used': host_stats['vcpus_used'],
'memory_mb_used': total_ram_mb - free_ram_mb,
'local_gb_used': used_disk_gb,
'hypervisor_type': 'xen',
'hypervisor_version': hyper_ver,
'hypervisor_hostname': host_stats['host_hostname'],
# Todo(bobba) cpu_info may be in a format not supported by
# arch_filter.py - see libvirt/driver.py get_cpu_info
'cpu_info': jsonutils.dumps(host_stats['host_cpu_info']),
'supported_instances': jsonutils.dumps(
host_stats['supported_instances']),
'pci_passthrough_devices': jsonutils.dumps(
host_stats['pci_passthrough_devices'])}
return dic
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
# NOTE(salvatore-orlando): it enforces security groups on
# host initialization and live migration.
# In XenAPI we do not assume instances running upon host initialization
return
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False, disk_over_commit=False):
"""Check if it is possible to execute live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
return self._vmops.check_can_live_migrate_destination(ctxt,
instance_ref,
block_migration,
disk_over_commit)
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param ctxt: security context
:param disk_over_commit: if true, allow disk over commit
"""
pass
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
includes the block_migration flag
"""
return self._vmops.check_can_live_migrate_source(ctxt, instance_ref,
dest_check_data)
def get_instance_disk_info(self, instance_name):
"""Used by libvirt for live migration. We rely on xenapi
checks to do this for us.
"""
pass
def live_migration(self, ctxt, instance_ref, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Performs the live migration of the specified instance.
:param ctxt: security context
:param instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:param block_migration: if true, migrate VM disk.
:param migrate_data: implementation specific params
"""
self._vmops.live_migrate(ctxt, instance_ref, dest, post_method,
recover_method, block_migration, migrate_data)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info):
# NOTE(johngarbutt) Destroying the VM is not appropriate here
# and in the cases where it might make sense,
# XenServer has already done it.
# TODO(johngarbutt) investigate if any cleanup is required here
pass
def pre_live_migration(self, context, instance_ref, block_device_info,
network_info, data, migrate_data=None):
"""Preparation live migration.
:param block_device_info:
It must be the result of _get_instance_volume_bdms()
at compute manager.
"""
# TODO(JohnGarbutt) look again when boot-from-volume hits trunk
pre_live_migration_result = {}
pre_live_migration_result['sr_uuid_map'] = \
self._vmops.attach_block_device_volumes(block_device_info)
return pre_live_migration_result
def post_live_migration(self, ctxt, instance_ref, block_device_info,
migrate_data=None):
"""Post operation of live migration at source host.
:param ctxt: security context
:instance_ref: instance object that was migrated
:block_device_info: instance block device information
:param migrate_data: if not None, it is a dict which has data
"""
self._vmops.post_live_migration(ctxt, instance_ref, migrate_data)
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info, block_migration,
block_device_info=None):
"""Post operation of live migration at destination host.
:param ctxt: security context
:param instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param : block_migration: if true, post operation of block_migration.
"""
self._vmops.post_live_migration_at_destination(ctxt, instance_ref,
network_info, block_device_info, block_device_info)
def unfilter_instance(self, instance_ref, network_info):
"""Removes security groups configured for an instance."""
return self._vmops.unfilter_instance(instance_ref, network_info)
def refresh_security_group_rules(self, security_group_id):
"""Updates security group rules for all instances associated with a
given security group.
Invoked when security group rules are updated.
"""
return self._vmops.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
"""Updates security group rules for all instances associated with a
given security group.
Invoked when instances are added/removed to a security group.
"""
return self._vmops.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
"""Updates security group rules for specified instance.
Invoked when instances are added/removed to a security group
or when a rule is added/removed to a security group.
"""
return self._vmops.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
return self._vmops.refresh_provider_fw_rules()
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run the update first.
"""
return self.host_state.get_host_stats(refresh=refresh)
def host_power_action(self, host, action):
"""The only valid values for 'action' on XenServer are 'reboot' or
'shutdown', even though the API also accepts 'startup'. As this is
not technically possible on XenServer, since the host is the same
physical machine as the hypervisor, if this is requested, we need to
raise an exception.
"""
if action in ("reboot", "shutdown"):
return self._host.host_power_action(host, action)
else:
msg = _("Host startup on XenServer is not supported.")
raise NotImplementedError(msg)
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
return self._host.set_host_enabled(host, enabled)
def get_host_uptime(self, host):
"""Returns the result of calling "uptime" on the target host."""
return self._host.get_host_uptime(host)
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
return self._host.host_maintenance_mode(host, mode)
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
return self._pool.add_to_aggregate(context, aggregate, host, **kwargs)
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
return self._pool.remove_from_aggregate(context,
aggregate, host, **kwargs)
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""Undo aggregate operation when pool error raised."""
return self._pool.undo_aggregate_operation(context, op,
aggregate, host, set_error)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
self._vmops.power_on(instance)
def get_per_instance_usage(self):
"""Get information about instance resource usage.
:returns: dict of nova uuid => dict of usage
info
"""
return self._vmops.get_per_instance_usage()
| apache-2.0 |
AhanM/ChessAI | action.py | 1 | 1274 | # action.py
# ---------
from game import *
class Action(object):
'''
An Action object would basically be used to formalize moves
defined by a paticular piece and it's new position.
It will be used to determine whether an action is feasable in terms of
collisions etc. or not and whether an action involves a capture.
'''
def __init__(self, piece, (x,y), config):
self.piece = piece
self.newPos = (x,y)
self.color = piece.color
self.promotion = False # For Pawn Promotion
self.player_pieces = config.getPlayerPieces(self.color)
self.enemy_pieces = config.getEnemyPieces(self.color)
def toString(self):
return self.piece.toString() + " -> " + str(self.newPos)
def isValid(self):
''' Checks for direct position collisions with same colored pieces'''
for piece in self.player_pieces:
if self.newPos == piece.pos:
return False
return True
def isCapture(self):
'''
Returns whether this action results in a capture or not
'''
return self.newPos in [enemypiece.pos for enemypiece in self.enemy_pieces]
def capturedPiece(self):
'''
Returns the piece object which was captured in the
respective action
'''
for enemypiece in self.enemy_pieces:
if self.newPos == enemypiece.pos:
return enemypiece
return None | mit |
Celthi/youtube-dl-GUI | youtube_dl/extractor/glide.py | 153 | 1402 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class GlideIE(InfoExtractor):
IE_DESC = 'Glide mobile video messages (glide.me)'
_VALID_URL = r'https?://share\.glide\.me/(?P<id>[A-Za-z0-9\-=_+]+)'
_TEST = {
'url': 'http://share.glide.me/UZF8zlmuQbe4mr+7dCiQ0w==',
'md5': '4466372687352851af2d131cfaa8a4c7',
'info_dict': {
'id': 'UZF8zlmuQbe4mr+7dCiQ0w==',
'ext': 'mp4',
'title': 'Damon Timm\'s Glide message',
'thumbnail': 're:^https?://.*?\.cloudfront\.net/.*\.jpg$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<title>(.*?)</title>', webpage, 'title')
video_url = self.http_scheme() + self._search_regex(
r'<source src="(.*?)" type="video/mp4">', webpage, 'video URL')
thumbnail_url = self._search_regex(
r'<img id="video-thumbnail" src="(.*?)"',
webpage, 'thumbnail url', fatal=False)
thumbnail = (
thumbnail_url if thumbnail_url is None
else self.http_scheme() + thumbnail_url)
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
}
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.