repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
rocopartners/django-oscar | src/oscar/__init__.py | 32 | 2952 | import os
# Use 'dev', 'beta', or 'final' as the 4th element to indicate release type.
VERSION = (1, 2, 0, 'dev')
def get_short_version():
return '%s.%s' % (VERSION[0], VERSION[1])
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
# Append 3rd digit if > 0
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
elif VERSION[3] != 'final':
version = '%s %s' % (version, VERSION[3])
if len(VERSION) == 5:
version = '%s %s' % (version, VERSION[4])
return version
# Cheeky setting that allows each template to be accessible by two paths.
# Eg: the template 'oscar/templates/oscar/base.html' can be accessed via both
# 'base.html' and 'oscar/base.html'. This allows Oscar's templates to be
# extended by templates with the same filename
OSCAR_MAIN_TEMPLATE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'templates/oscar')
OSCAR_CORE_APPS = [
'oscar',
'oscar.apps.analytics',
'oscar.apps.checkout',
'oscar.apps.address',
'oscar.apps.shipping',
'oscar.apps.catalogue',
'oscar.apps.catalogue.reviews',
'oscar.apps.partner',
'oscar.apps.basket',
'oscar.apps.payment',
'oscar.apps.offer',
'oscar.apps.order',
'oscar.apps.customer',
'oscar.apps.promotions',
'oscar.apps.search',
'oscar.apps.voucher',
'oscar.apps.wishlists',
'oscar.apps.dashboard',
'oscar.apps.dashboard.reports',
'oscar.apps.dashboard.users',
'oscar.apps.dashboard.orders',
'oscar.apps.dashboard.promotions',
'oscar.apps.dashboard.catalogue',
'oscar.apps.dashboard.offers',
'oscar.apps.dashboard.partners',
'oscar.apps.dashboard.pages',
'oscar.apps.dashboard.ranges',
'oscar.apps.dashboard.reviews',
'oscar.apps.dashboard.vouchers',
'oscar.apps.dashboard.communications',
'oscar.apps.dashboard.shipping',
# 3rd-party apps that oscar depends on
'haystack',
'treebeard',
'sorl.thumbnail',
'django_tables2',
]
def get_core_apps(overrides=None):
"""
Return a list of oscar's apps amended with any passed overrides
"""
if not overrides:
return OSCAR_CORE_APPS
# Conservative import to ensure that this file can be loaded
# without the presence Django.
from django.utils import six
if isinstance(overrides, six.string_types):
raise ValueError(
"get_core_apps expects a list or tuple of apps "
"to override")
def get_app_label(app_label, overrides):
pattern = app_label.replace('oscar.apps.', '')
for override in overrides:
if override.endswith(pattern):
if 'dashboard' in override and 'dashboard' not in pattern:
continue
return override
return app_label
apps = []
for app_label in OSCAR_CORE_APPS:
apps.append(get_app_label(app_label, overrides))
return apps
| bsd-3-clause |
noroutine/ansible | lib/ansible/modules/web_infrastructure/ansible_tower/tower_job_list.py | 34 | 4003 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_job_list
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: List Ansible Tower jobs.
description:
- List Ansible Tower jobs. See
U(https://www.ansible.com/tower) for an overview.
options:
status:
description:
- Only list jobs with this status.
default: null
choices: ['pending', 'waiting', 'running', 'error', 'failed', 'canceled', 'successful']
page:
description:
- Page number of the results to fetch.
default: null
all_pages:
description:
- Fetch all the pages and return a single result.
default: False
query:
description:
- Query used to further filter the list of jobs. {"foo":"bar"} will be passed at ?foo=bar
default: null
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: List running jobs for the testing.yml playbook
tower_job_list:
status: running
query: {"playbook": "testing.yml"}
register: testing_jobs
tower_config_file: "~/tower_cli.cfg"
'''
RETURN = '''
count:
description: Total count of objects return
returned: success
type: int
sample: 51
next:
description: next page available for the listing
returned: success
type: int
sample: 3
previous:
description: previous page available for the listing
returned: success
type: int
sample: 1
results:
description: a list of job objects represented as dictionaries
returned: success
type: list
sample: [{"allow_simultaneous": false, "artifacts": {}, "ask_credential_on_launch": false,
"ask_inventory_on_launch": false, "ask_job_type_on_launch": false, "failed": false,
"finished": "2017-02-22T15:09:05.633942Z", "force_handlers": false, "forks": 0, "id": 2,
"inventory": 1, "job_explanation": "", "job_tags": "", "job_template": 5, "job_type": "run"}, ...]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ansible_tower import tower_auth_config, tower_check_mode, tower_argument_spec, HAS_TOWER_CLI
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def main():
argument_spec = tower_argument_spec()
argument_spec.update(dict(
status=dict(choices=['pending', 'waiting', 'running', 'error', 'failed', 'canceled', 'successful']),
page=dict(type='int'),
all_pages=dict(type='bool', default=False),
query=dict(type='dict'),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
json_output = {}
query = module.params.get('query')
status = module.params.get('status')
page = module.params.get('page')
all_pages = module.params.get('all_pages')
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
try:
job = tower_cli.get_resource('job')
params = {'status': status, 'page': page, 'all_pages': all_pages}
if query:
params['query'] = query.items()
json_output = job.list(**params)
except (exc.ConnectionError, exc.BadRequest) as excinfo:
module.fail_json(msg='Failed to list jobs: {0}'.format(excinfo), changed=False)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| gpl-3.0 |
nathanbjenx/cairis | cairis/gui/TemplateRequirementPanel.py | 1 | 2080 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
from BasePanel import BasePanel
from TemplateRequirementNotebook import TemplateRequirementNotebook
__author__ = 'Shamal Faily'
class TemplateRequirementPanel(BasePanel):
def __init__(self,parent):
wx.Panel.__init__(self,parent,TEMPLATEREQUIREMENT_ID)
def buildControls(self,isCreate,isUpdateable=True):
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(TemplateRequirementNotebook(self),1,wx.EXPAND)
mainSizer.Add(self.buildCommitButtonSizer(TEMPLATEREQUIREMENT_BUTTONCOMMIT_ID,isCreate),0,wx.CENTER)
self.SetSizer(mainSizer)
def loadControls(self,req):
nameCtrl = self.FindWindowById(TEMPLATEREQUIREMENT_TEXTNAME_ID)
assetCtrl = self.FindWindowById(TEMPLATEREQUIREMENT_COMBOASSET_ID)
descCtrl = self.FindWindowById(TEMPLATEREQUIREMENT_TEXTDESCRIPTION_ID)
typeCtrl = self.FindWindowById(TEMPLATEREQUIREMENT_COMBOTYPE_ID)
ratCtrl = self.FindWindowById(TEMPLATEREQUIREMENT_TEXTRATIONALE_ID)
fcCtrl = self.FindWindowById(TEMPLATEREQUIREMENT_TEXTFITCRITERION_ID)
nameCtrl.SetValue(req.name())
assetCtrl.SetValue(req.asset())
descCtrl.SetValue(req.description())
typeCtrl.SetValue(req.type())
ratCtrl.SetValue(req.rationale())
fcCtrl.SetValue(req.fitCriterion())
| apache-2.0 |
emonty/deb-vhd-util | tools/xm-test/lib/XmTestLib/XenDomain.py | 21 | 11296 | #!/usr/bin/python
"""
Copyright (C) International Business Machines Corp., 2005
Author: Dan Smith <danms@us.ibm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; under version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys
import commands
import re
import time
from Xm import *
from arch import *
from Test import *
from config import *
from Console import *
from XenDevice import *
from DomainTracking import *
DOM0_UUID = "00000000-0000-0000-0000-000000000000"
def getDefaultKernel():
return arch.getDefaultKernel()
def getRdPath():
return arch.getRdPath()
def getUniqueName():
"""Get a uniqueish name for use in a domain"""
unixtime = int(time.time())
test_name = sys.argv[0]
test_name = re.sub("\.test", "", test_name)
test_name = re.sub("[\/\.]", "", test_name)
name = "%s-%i" % (test_name, unixtime)
return name
class XenConfig:
"""An object to help create a xen-compliant config file"""
def __init__(self):
self.defaultOpts = {}
# These options need to be lists
self.defaultOpts["disk"] = []
self.defaultOpts["vif"] = []
self.opts = self.defaultOpts
def toString(self):
"""Convert this config to a string for writing out
to a file"""
string = "# Xen configuration generated by xm-test\n"
for k, v in self.opts.items():
if isinstance(v, int):
piece = "%s = %i" % (k, v)
elif isinstance(v, list) and v:
piece = "%s = %s" % (k, v)
elif isinstance(v, str) and v:
piece = "%s = \"%s\"" % (k, v)
else:
piece = None
if piece:
string += "%s\n" % piece
return string
def write(self, filename):
"""Write this config out to filename"""
output = file(filename, "w")
output.write(self.toString())
output.close()
def __str__(self):
"""When used as a string, we represent ourself by a config
filename, which points to a temporary config that we write
out ahead of time"""
filename = "/tmp/xm-test.conf"
self.write(filename)
return filename
def setOpt(self, name, value):
"""Set an option in the config"""
if name in self.opts.keys() and isinstance(self.opts[name] ,
list) and not isinstance(value, list):
self.opts[name] = [value]
# "extra" is special so append to it.
elif name == "extra" and name in self.opts.keys():
self.opts[name] += " %s" % (value)
else:
self.opts[name] = value
def appOpt(self, name, value):
"""Append a value to a list option"""
if name in self.opts.keys() and isinstance(self.opts[name], list):
self.opts[name].append(value)
def getOpt(self, name):
"""Return the value of a config option"""
if name in self.opts.keys():
return self.opts[name]
else:
return None
def setOpts(self, opts):
"""Batch-set options from a dictionary"""
for k, v in opts.items():
self.setOpt(k, v)
def clearOpts(self, name=None):
"""Clear one or all config options"""
if name:
self.opts[name] = self.defaultOpts[name]
else:
self.opts = self.defaultOpts
class DomainError(Exception):
def __init__(self, msg, extra="", errorcode=0):
self.msg = msg
self.extra = extra
try:
self.errorcode = int(errorcode)
except Exception, e:
self.errorcode = -1
def __str__(self):
return str(self.msg)
class XenDomain:
def __init__(self, name=None, config=None, isManaged=False):
"""Create a domain object.
@param config: String filename of config file
"""
if name:
self.name = name
else:
self.name = getUniqueName()
self.config = config
self.console = None
self.devices = {}
self.netEnv = "bridge"
if os.getenv("XM_MANAGED_DOMAINS"):
isManaged = True
self.isManaged = isManaged
# Set domain type, either PV for ParaVirt domU or HVM for
# FullVirt domain
if ENABLE_HVM_SUPPORT:
self.type = "HVM"
else:
self.type = "PV"
def start(self, noConsole=False):
if not self.isManaged:
ret, output = traceCommand("xm create %s" % self.config)
print self.config
else:
ret, output = traceCommand("xm new %s" % self.config)
if ret != 0:
_ret, output = traceCommand("xm delete " +
self.config.getOpt("name"))
else:
ret, output = traceCommand("xm start " +
self.config.getOpt("name"))
addManagedDomain(self.config.getOpt("name"))
if ret != 0:
raise DomainError("Failed to create domain",
extra=output,
errorcode=ret)
# HVM domains require waiting for boot
if self.getDomainType() == "HVM":
waitForBoot()
# Go through device list and run console cmds
for dev in self.devices.keys():
self.devices[dev].execAddCmds()
if self.console and noConsole == True:
self.closeConsole()
elif self.console and noConsole == False:
return self.console
elif not self.console and noConsole == False:
return self.getConsole()
def stop(self):
prog = "xm"
cmd = " shutdown "
self.removeAllDevices()
if self.console:
self.closeConsole()
ret, output = traceCommand(prog + cmd + self.config.getOpt("name"))
return ret
def destroy(self):
prog = "xm"
cmd = " destroy "
self.removeAllDevices()
if self.console:
self.closeConsole()
ret, output = traceCommand(prog + cmd + self.config.getOpt("name"))
if self.isManaged:
ret, output = traceCommand(prog + " delete " +
self.config.getOpt("name"))
delManagedDomain(self.config.getOpt("name"))
return ret
def getName(self):
return self.name
def getId(self):
return domid(self.getName());
def getDomainType(self):
return self.type
def closeConsole(self):
# The domain closeConsole command must be called by tests, not the
# console's close command. Once close is called, the console is
# gone. You can't get history or anything else from it.
if self.console:
self.console._XmConsole__closeConsole()
self.console = None
def getConsole(self):
if self.console:
self.closeConsole()
self.console = XmConsole(self.getName())
# Activate the console
self.console.sendInput("input")
return self.console
def newDevice(self, Device, *args):
"""Device Factory: Generic factory for creating new XenDevices.
All device creation should be done through the XenDomain
factory. Supply a XenDevice instance and its args and the
constructor will be called."""
# Make sure device with id hasn't already been added
if self.devices.has_key(args[0]):
raise DeviceError("Error: Domain already has device %s" % args[0])
# Call constructor for supplied Device instance
dargs = (self,)
dargs += args
dev = apply(Device, dargs)
if self.isRunning():
# Note: This needs to be done, XenDevice should have an attach
# method.
print "Domain is running, need to attach new device to domain."
self.devices[dev.id] = dev
self.config.appOpt(dev.configNode, str(dev))
return dev
def removeDevice(self, id):
if self.devices.has_key(id):
self.devices[id].removeDevice()
def removeAllDevices(self):
for k in self.devices.keys():
self.removeDevice(k)
def isRunning(self):
return isDomainRunning(self.name)
def getNetEnv(self):
# We need to know the network environment: bridge, NAT, or routed.
return self.netEnv
def getDevice(self, id):
dev = self.devices[id]
if dev:
return dev
print "Device %s not found for domain %s" % (id, self.getName())
class XmTestDomain(XenDomain):
def __init__(self, name=None, extraConfig=None,
baseConfig=arch.configDefaults, isManaged=False):
"""Create a new xm-test domain
@param name: The requested domain name
@param extraConfig: Additional configuration options
@param baseConfig: The initial configuration defaults to use
"""
config = XenConfig()
config.setOpts(baseConfig)
if extraConfig:
config.setOpts(extraConfig)
if name:
config.setOpt("name", name)
elif not config.getOpt("name"):
config.setOpt("name", getUniqueName())
XenDomain.__init__(self, config.getOpt("name"), config=config,
isManaged=isManaged)
def minSafeMem(self):
return arch.minSafeMem
class XmTestNetDomain(XmTestDomain):
def __init__(self, name=None, extraConfig=None,
baseConfig=arch.configDefaults):
"""Create a new xm-test domain with one network device
@param name: The requested domain name
@param extraConfig: Additional configuration options
@param baseConfig: The initial configuration defaults to use
"""
config = XenConfig()
config.setOpts(baseConfig)
if extraConfig:
config.setOpts(extraConfig)
if name:
config.setOpt("name", name)
elif not config.getOpt("name"):
config.setOpt("name", getUniqueName())
XenDomain.__init__(self, config.getOpt("name"), config=config)
# Add one network devices to domain
self.newDevice(XenNetDevice, "eth0")
if __name__ == "__main__":
c = XenConfig()
c.setOpt("foo", "bar")
c.setOpt("foob", 1)
opts = {"opt1" : 19,
"opt2" : "blah"}
c.setOpts(opts)
c.setOpt("disk", "phy:/dev/ram0,hda1,w")
c.appOpt("disk", "phy:/dev/ram1,hdb1,w")
print str(c)
# c.write("/tmp/foo.conf")
# d = XmTestDomain();
#
# d.start();
| gpl-2.0 |
burgerdev/volumina | volumina/skeletons/skeletonNode.py | 3 | 3152 | ###############################################################################
# volumina: volume slicing and editing library
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
from PyQt4.QtCore import QPointF, QObject, pyqtSignal
from PyQt4.QtGui import QColor
class SkeletonNode(QObject):
selected = pyqtSignal(bool)
def __init__(self, pos3D, axis, skeletons):
super(SkeletonNode, self).__init__()
from volumina.skeletons import Skeletons
assert isinstance(skeletons, Skeletons)
assert len(pos3D) == 3
assert axis in [0,1,2]
self.pos = pos3D
self.shape = [6,6,6]
self.axis = axis
self._skeletons = skeletons
self._selected = False
self._isMovable = True
self._color = QColor(0,0,255)
self._name = "unnamed node"
def setColor(self, c):
self._color = c
def setName(self, name):
self._name = name
def name(self):
return self._name
def color(self):
return self._color
def isMovable(self):
return self._isMovable
def setMovable(self, movable):
self._isMovable = movable
def __str__(self):
return "SkeletonNode(pos=%r, axis=%r)" % (self.pos, self.axis)
def __repr__(self):
return "SkeletonNode(pos=%r, axis=%r)" % (self.pos, self.axis)
def move(self, pos):
self.pos = pos
def intersectsBbox(self, point):
assert len(point) == 3
for i in range(3):
if not (self.pos[i] - self.shape/2.0 >= point[i] and self.pos[i] + self.shape/2.0 <= point[i]):
return False
return True
def shape2D(self, axis):
shape = list(self.shape)
del shape[axis]
return shape
def setNewShape(self, axis, newShape):
self.shape[axis] = newShape
def pointF(self, axis=None):
if axis is None:
axis = self.axis
pos2D = list(self.pos)
del pos2D[axis]
return QPointF(*pos2D)
def setSelected(self, selected):
if self._selected == selected:
return
self._selected = selected
self.selected.emit(self._selected)
def isSelected(self):
return self._selected
| lgpl-3.0 |
AndrewSmart/audacity | lib-src/lv2/lv2/plugins/eg01-amp.lv2/waflib/Tools/ccroot.py | 70 | 12917 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,re
from waflib import Task,Utils,Node,Errors
from waflib.TaskGen import after_method,before_method,feature,taskgen_method,extension
from waflib.Tools import c_aliases,c_preproc,c_config,c_osx,c_tests
from waflib.Configure import conf
SYSTEM_LIB_PATHS=['/usr/lib64','/usr/lib','/usr/local/lib64','/usr/local/lib']
USELIB_VARS=Utils.defaultdict(set)
USELIB_VARS['c']=set(['INCLUDES','FRAMEWORKPATH','DEFINES','CPPFLAGS','CCDEPS','CFLAGS','ARCH'])
USELIB_VARS['cxx']=set(['INCLUDES','FRAMEWORKPATH','DEFINES','CPPFLAGS','CXXDEPS','CXXFLAGS','ARCH'])
USELIB_VARS['d']=set(['INCLUDES','DFLAGS'])
USELIB_VARS['includes']=set(['INCLUDES','FRAMEWORKPATH','ARCH'])
USELIB_VARS['cprogram']=USELIB_VARS['cxxprogram']=set(['LIB','STLIB','LIBPATH','STLIBPATH','LINKFLAGS','RPATH','LINKDEPS','FRAMEWORK','FRAMEWORKPATH','ARCH'])
USELIB_VARS['cshlib']=USELIB_VARS['cxxshlib']=set(['LIB','STLIB','LIBPATH','STLIBPATH','LINKFLAGS','RPATH','LINKDEPS','FRAMEWORK','FRAMEWORKPATH','ARCH'])
USELIB_VARS['cstlib']=USELIB_VARS['cxxstlib']=set(['ARFLAGS','LINKDEPS'])
USELIB_VARS['dprogram']=set(['LIB','STLIB','LIBPATH','STLIBPATH','LINKFLAGS','RPATH','LINKDEPS'])
USELIB_VARS['dshlib']=set(['LIB','STLIB','LIBPATH','STLIBPATH','LINKFLAGS','RPATH','LINKDEPS'])
USELIB_VARS['dstlib']=set(['ARFLAGS','LINKDEPS'])
USELIB_VARS['asm']=set(['ASFLAGS'])
@taskgen_method
def create_compiled_task(self,name,node):
out='%s.%d.o'%(node.name,self.idx)
task=self.create_task(name,node,node.parent.find_or_declare(out))
try:
self.compiled_tasks.append(task)
except AttributeError:
self.compiled_tasks=[task]
return task
@taskgen_method
def to_incnodes(self,inlst):
lst=[]
seen=set([])
for x in self.to_list(inlst):
if x in seen or not x:
continue
seen.add(x)
if isinstance(x,Node.Node):
lst.append(x)
else:
if os.path.isabs(x):
lst.append(self.bld.root.make_node(x)or x)
else:
if x[0]=='#':
p=self.bld.bldnode.make_node(x[1:])
v=self.bld.srcnode.make_node(x[1:])
else:
p=self.path.get_bld().make_node(x)
v=self.path.make_node(x)
if p.is_child_of(self.bld.bldnode):
p.mkdir()
lst.append(p)
lst.append(v)
return lst
@feature('c','cxx','d','asm','fc','includes')
@after_method('propagate_uselib_vars','process_source')
def apply_incpaths(self):
lst=self.to_incnodes(self.to_list(getattr(self,'includes',[]))+self.env['INCLUDES'])
self.includes_nodes=lst
self.env['INCPATHS']=[x.abspath()for x in lst]
class link_task(Task.Task):
color='YELLOW'
inst_to=None
chmod=Utils.O755
def add_target(self,target):
if isinstance(target,str):
pattern=self.env[self.__class__.__name__+'_PATTERN']
if not pattern:
pattern='%s'
folder,name=os.path.split(target)
if self.__class__.__name__.find('shlib')>0:
if self.env.DEST_BINFMT=='pe'and getattr(self.generator,'vnum',None):
name=name+'-'+self.generator.vnum.split('.')[0]
tmp=folder+os.sep+pattern%name
target=self.generator.path.find_or_declare(tmp)
self.set_outputs(target)
class stlink_task(link_task):
run_str='${AR} ${ARFLAGS} ${AR_TGT_F}${TGT} ${AR_SRC_F}${SRC}'
def rm_tgt(cls):
old=cls.run
def wrap(self):
try:os.remove(self.outputs[0].abspath())
except OSError:pass
return old(self)
setattr(cls,'run',wrap)
rm_tgt(stlink_task)
@feature('c','cxx','d','fc','asm')
@after_method('process_source')
def apply_link(self):
for x in self.features:
if x=='cprogram'and'cxx'in self.features:
x='cxxprogram'
elif x=='cshlib'and'cxx'in self.features:
x='cxxshlib'
if x in Task.classes:
if issubclass(Task.classes[x],link_task):
link=x
break
else:
return
objs=[t.outputs[0]for t in getattr(self,'compiled_tasks',[])]
self.link_task=self.create_task(link,objs)
self.link_task.add_target(self.target)
try:
inst_to=self.install_path
except AttributeError:
inst_to=self.link_task.__class__.inst_to
if inst_to:
self.install_task=self.bld.install_files(inst_to,self.link_task.outputs[:],env=self.env,chmod=self.link_task.chmod)
@taskgen_method
def use_rec(self,name,**kw):
if name in self.tmp_use_not or name in self.tmp_use_seen:
return
try:
y=self.bld.get_tgen_by_name(name)
except Errors.WafError:
self.uselib.append(name)
self.tmp_use_not.add(name)
return
self.tmp_use_seen.append(name)
y.post()
y.tmp_use_objects=objects=kw.get('objects',True)
y.tmp_use_stlib=stlib=kw.get('stlib',True)
try:
link_task=y.link_task
except AttributeError:
y.tmp_use_var=''
else:
objects=False
if not isinstance(link_task,stlink_task):
stlib=False
y.tmp_use_var='LIB'
else:
y.tmp_use_var='STLIB'
p=self.tmp_use_prec
for x in self.to_list(getattr(y,'use',[])):
try:
p[x].append(name)
except KeyError:
p[x]=[name]
self.use_rec(x,objects=objects,stlib=stlib)
@feature('c','cxx','d','use','fc')
@before_method('apply_incpaths','propagate_uselib_vars')
@after_method('apply_link','process_source')
def process_use(self):
use_not=self.tmp_use_not=set([])
self.tmp_use_seen=[]
use_prec=self.tmp_use_prec={}
self.uselib=self.to_list(getattr(self,'uselib',[]))
self.includes=self.to_list(getattr(self,'includes',[]))
names=self.to_list(getattr(self,'use',[]))
for x in names:
self.use_rec(x)
for x in use_not:
if x in use_prec:
del use_prec[x]
out=[]
tmp=[]
for x in self.tmp_use_seen:
for k in use_prec.values():
if x in k:
break
else:
tmp.append(x)
while tmp:
e=tmp.pop()
out.append(e)
try:
nlst=use_prec[e]
except KeyError:
pass
else:
del use_prec[e]
for x in nlst:
for y in use_prec:
if x in use_prec[y]:
break
else:
tmp.append(x)
if use_prec:
raise Errors.WafError('Cycle detected in the use processing %r'%use_prec)
out.reverse()
link_task=getattr(self,'link_task',None)
for x in out:
y=self.bld.get_tgen_by_name(x)
var=y.tmp_use_var
if var and link_task:
if var=='LIB'or y.tmp_use_stlib:
self.env.append_value(var,[y.target[y.target.rfind(os.sep)+1:]])
self.link_task.dep_nodes.extend(y.link_task.outputs)
tmp_path=y.link_task.outputs[0].parent.path_from(self.bld.bldnode)
self.env.append_value(var+'PATH',[tmp_path])
else:
if y.tmp_use_objects:
self.add_objects_from_tgen(y)
if getattr(y,'export_includes',None):
self.includes.extend(y.to_incnodes(y.export_includes))
if getattr(y,'export_defines',None):
self.env.append_value('DEFINES',self.to_list(y.export_defines))
for x in names:
try:
y=self.bld.get_tgen_by_name(x)
except Exception:
if not self.env['STLIB_'+x]and not x in self.uselib:
self.uselib.append(x)
else:
for k in self.to_list(getattr(y,'uselib',[])):
if not self.env['STLIB_'+k]and not k in self.uselib:
self.uselib.append(k)
@taskgen_method
def accept_node_to_link(self,node):
return not node.name.endswith('.pdb')
@taskgen_method
def add_objects_from_tgen(self,tg):
try:
link_task=self.link_task
except AttributeError:
pass
else:
for tsk in getattr(tg,'compiled_tasks',[]):
for x in tsk.outputs:
if self.accept_node_to_link(x):
link_task.inputs.append(x)
@taskgen_method
def get_uselib_vars(self):
_vars=set([])
for x in self.features:
if x in USELIB_VARS:
_vars|=USELIB_VARS[x]
return _vars
@feature('c','cxx','d','fc','javac','cs','uselib','asm')
@after_method('process_use')
def propagate_uselib_vars(self):
_vars=self.get_uselib_vars()
env=self.env
for x in _vars:
y=x.lower()
env.append_unique(x,self.to_list(getattr(self,y,[])))
for x in self.features:
for var in _vars:
compvar='%s_%s'%(var,x)
env.append_value(var,env[compvar])
for x in self.to_list(getattr(self,'uselib',[])):
for v in _vars:
env.append_value(v,env[v+'_'+x])
@feature('cshlib','cxxshlib','fcshlib')
@after_method('apply_link')
def apply_implib(self):
if not self.env.DEST_BINFMT=='pe':
return
dll=self.link_task.outputs[0]
if isinstance(self.target,Node.Node):
name=self.target.name
else:
name=os.path.split(self.target)[1]
implib=self.env['implib_PATTERN']%name
implib=dll.parent.find_or_declare(implib)
self.env.append_value('LINKFLAGS',self.env['IMPLIB_ST']%implib.bldpath())
self.link_task.outputs.append(implib)
if getattr(self,'defs',None)and self.env.DEST_BINFMT=='pe':
node=self.path.find_resource(self.defs)
if not node:
raise Errors.WafError('invalid def file %r'%self.defs)
if'msvc'in(self.env.CC_NAME,self.env.CXX_NAME):
self.env.append_value('LINKFLAGS','/def:%s'%node.path_from(self.bld.bldnode))
self.link_task.dep_nodes.append(node)
else:
self.link_task.inputs.append(node)
try:
inst_to=self.install_path
except AttributeError:
inst_to=self.link_task.__class__.inst_to
if not inst_to:
return
self.implib_install_task=self.bld.install_as('${LIBDIR}/%s'%implib.name,implib,self.env)
re_vnum=re.compile('^([1-9]\\d*|0)[.]([1-9]\\d*|0)[.]([1-9]\\d*|0)$')
@feature('cshlib','cxxshlib','dshlib','fcshlib','vnum')
@after_method('apply_link','propagate_uselib_vars')
def apply_vnum(self):
if not getattr(self,'vnum','')or os.name!='posix'or self.env.DEST_BINFMT not in('elf','mac-o'):
return
link=self.link_task
if not re_vnum.match(self.vnum):
raise Errors.WafError('Invalid version %r for %r'%(self.vnum,self))
nums=self.vnum.split('.')
node=link.outputs[0]
libname=node.name
if libname.endswith('.dylib'):
name3=libname.replace('.dylib','.%s.dylib'%self.vnum)
name2=libname.replace('.dylib','.%s.dylib'%nums[0])
else:
name3=libname+'.'+self.vnum
name2=libname+'.'+nums[0]
if self.env.SONAME_ST:
v=self.env.SONAME_ST%name2
self.env.append_value('LINKFLAGS',v.split())
self.create_task('vnum',node,[node.parent.find_or_declare(name2),node.parent.find_or_declare(name3)])
if getattr(self,'install_task',None):
self.install_task.hasrun=Task.SKIP_ME
bld=self.bld
path=self.install_task.dest
t1=bld.install_as(path+os.sep+name3,node,env=self.env,chmod=self.link_task.chmod)
t2=bld.symlink_as(path+os.sep+name2,name3)
t3=bld.symlink_as(path+os.sep+libname,name3)
self.vnum_install_task=(t1,t2,t3)
if'-dynamiclib'in self.env['LINKFLAGS']:
try:
inst_to=self.install_path
except AttributeError:
inst_to=self.link_task.__class__.inst_to
if inst_to:
p=Utils.subst_vars(inst_to,self.env)
path=os.path.join(p,self.link_task.outputs[0].name)
self.env.append_value('LINKFLAGS',['-install_name',path])
class vnum(Task.Task):
color='CYAN'
quient=True
ext_in=['.bin']
def run(self):
for x in self.outputs:
path=x.abspath()
try:
os.remove(path)
except OSError:
pass
try:
os.symlink(self.inputs[0].name,path)
except OSError:
return 1
class fake_shlib(link_task):
def runnable_status(self):
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
for x in self.outputs:
x.sig=Utils.h_file(x.abspath())
return Task.SKIP_ME
class fake_stlib(stlink_task):
def runnable_status(self):
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
for x in self.outputs:
x.sig=Utils.h_file(x.abspath())
return Task.SKIP_ME
@conf
def read_shlib(self,name,paths=[],export_includes=[],export_defines=[]):
return self(name=name,features='fake_lib',lib_paths=paths,lib_type='shlib',export_includes=export_includes,export_defines=export_defines)
@conf
def read_stlib(self,name,paths=[],export_includes=[],export_defines=[]):
return self(name=name,features='fake_lib',lib_paths=paths,lib_type='stlib',export_includes=export_includes,export_defines=export_defines)
lib_patterns={'shlib':['lib%s.so','%s.so','lib%s.dylib','lib%s.dll','%s.dll'],'stlib':['lib%s.a','%s.a','lib%s.dll','%s.dll','lib%s.lib','%s.lib'],}
@feature('fake_lib')
def process_lib(self):
node=None
names=[x%self.name for x in lib_patterns[self.lib_type]]
for x in self.lib_paths+[self.path]+SYSTEM_LIB_PATHS:
if not isinstance(x,Node.Node):
x=self.bld.root.find_node(x)or self.path.find_node(x)
if not x:
continue
for y in names:
node=x.find_node(y)
if node:
node.sig=Utils.h_file(node.abspath())
break
else:
continue
break
else:
raise Errors.WafError('could not find library %r'%self.name)
self.link_task=self.create_task('fake_%s'%self.lib_type,[],[node])
self.target=self.name
class fake_o(Task.Task):
def runnable_status(self):
return Task.SKIP_ME
@extension('.o','.obj')
def add_those_o_files(self,node):
tsk=self.create_task('fake_o',[],node)
try:
self.compiled_tasks.append(tsk)
except AttributeError:
self.compiled_tasks=[tsk]
@feature('fake_obj')
@before_method('process_source')
def process_objs(self):
for node in self.to_nodes(self.source):
self.add_those_o_files(node)
self.source=[]
@conf
def read_object(self,obj):
if not isinstance(obj,self.path.__class__):
obj=self.path.find_resource(obj)
return self(features='fake_obj',source=obj,name=obj.name)
| gpl-2.0 |
SMALLplayer/smallplayer-image-creator | storage/.xbmc/addons/script.module.urlresolver/lib/urlresolver/plugins/zalaa.py | 2 | 3651 | """
urlresolver XBMC Addon
Copyright (C) 2012 Bstrdsmkr
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
import urllib2, re, os
from urlresolver import common
from lib import jsunpack
#SET ERROR_LOGO# THANKS TO VOINAGE, BSTRDMKR, ELDORADO
error_logo = os.path.join(common.addon_path, 'resources', 'images', 'redx.png')
class ZalaaResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "zalaa"
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
#e.g. http://www.zalaa.com/hj0eyq4jg0io
#FIXME: http://www.zalaa.com/npwp1cr4uys7/Nikita.S02E14.HDTV.XviD-LOL.avi.htm
self.pattern = 'http://www.(zalaa.com)/([a-zA-Z0-9]+)(?:/.+?\.htm)?'
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
try:
html = self.net.http_GET(web_url).content
#send all form values
sPattern = '<input.*?name="([^"]+)".*?value=([^>]+)>'
r = re.findall(sPattern, html)
data = {}
if r:
for match in r:
name = match[0]
value = match[1].replace('"','')
data[name] = value
html = self.net.http_POST(web_url, data).content
else:
raise Exception ('File Not Found or removed')
# modified by mscreations. get the file url from the returned javascript
match = re.search("addVariable[(]'file','(.+?)'[)]", html, re.DOTALL + re.IGNORECASE)
if match:
return match.group(1)+'|Referer=http%3A%2F%2Fwww.zalaa.com%2Fplayer%2Fplayer-embed.swf'
raise Exception ('File Not Found or removed')
except urllib2.URLError, e:
common.addon.log_error(self.name + ': got http error %d fetching %s' %
(e.code, web_url))
common.addon.show_small_popup('Error','Http error: '+str(e), 8000, error_logo)
return self.unresolvable(code=3, msg=e)
except Exception, e:
common.addon.log('**** Zalaa Error occured: %s' % e)
common.addon.show_small_popup(title='[B][COLOR white]ZALAA[/COLOR][/B]', msg='[COLOR red]%s[/COLOR]' % e, delay=5000, image=error_logo)
return self.unresolvable(code=0, msg=e)
def get_url(self, host, media_id):
return 'http://www.zalaa.com/%s' % (media_id)
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return re.match(self.pattern, url) or self.name in host
| gpl-2.0 |
rwakulszowa/servo | tests/wpt/css-tests/tools/html5lib/html5lib/serializer/htmlserializer.py | 423 | 12897 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
import gettext
_ = gettext.gettext
try:
from functools import reduce
except ImportError:
pass
from ..constants import voidElements, booleanAttributes, spaceCharacters
from ..constants import rcdataElements, entities, xmlEntities
from .. import utils
from xml.sax.saxutils import escape
spaceCharacters = "".join(spaceCharacters)
try:
from codecs import register_error, xmlcharrefreplace_errors
except ImportError:
unicode_encode_errors = "strict"
else:
unicode_encode_errors = "htmlentityreplace"
encode_entity_map = {}
is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((is_ucs4 and len(v) > 1) or
(not is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if v not in encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error(unicode_encode_errors, htmlentityreplace_errors)
del register_error
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = False
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values=True|False
Whether to quote attribute values that don't require quoting
per HTML5 parsing rules.
quote_char=u'"'|u"'"
Use given quote character for attribute quoting. Default is to
use double quote unless attribute value contains a double quote,
in which case single quotes are used instead.
escape_lt_in_attrs=False|True
Whether to escape < in attribute values.
escape_rcdata=False|True
Whether to escape characters that need to be escaped within normal
elements within rcdata elements such as style.
resolve_entities=True|False
Whether to resolve named character entities that appear in the
source tree. The XML predefined entities < > & " '
are unaffected by this setting.
strip_whitespace=False|True
Whether to remove semantically meaningless whitespace. (This
compresses all whitespace to a single space except within pre.)
minimize_boolean_attributes=True|False
Shortens boolean attributes to give just the attribute value,
for example <input disabled="disabled"> becomes <input disabled>.
use_trailing_solidus=False|True
Includes a close-tag slash at the end of the start tag of void
elements (empty elements whose end tag is forbidden). E.g. <hr/>.
space_before_trailing_solidus=True|False
Places a space immediately before the closing slash in a tag
using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
sanitize=False|True
Strip all unsafe or unknown constructs from output.
See `html5lib user documentation`_
omit_optional_tags=True|False
Omit start/end tags that are optional.
alphabetical_attributes=False|True
Reorder attributes to be in alphabetical order.
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
"""
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, unicode_encode_errors)
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from ..filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from ..filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from ..filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from ..filters.optionaltags import Filter
treewalker = Filter(treewalker)
# Alphabetical attributes must be last, as other filters
# could add attributes and alter the order
if self.alphabetical_attributes:
from ..filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError(_("System identifer contains both single and double quote characters"))
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError(_("Unexpected </ in CDATA"))
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
for (attr_namespace, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple())
and k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values or not v:
quote_attr = True
else:
quote_attr = reduce(lambda x, y: x or (y in v),
spaceCharacters + ">\"'=", False)
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError(_("Comment contains --"))
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if key not in entities:
self.serializeError(_("Entity %s not recognized" % name))
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
def SerializeError(Exception):
"""Error in serialized tree"""
pass
| mpl-2.0 |
vzantedeschi/L3SVMs | validation.py | 1 | 1853 | import time
import statistics
from src.l3svms import *
from src.utils import *
args = get_args(__file__)
TRAIN = args.train_file
TEST = args.test_file
LAND = args.nb_landmarks # default 10
CLUS = args.nb_clusters # default 1
NORM = args.norm # default False
LIN = args.linear # default True
PCA_BOOL = args.pca # default False
ITER = args.nb_iterations # default 1
VERB = args.verbose # default False
YPOS = args.y_pos # default 0
verboseprint = print if VERB else lambda *a, **k: None
verboseprint("training on {}, testing on {}: {} clusters, {} landmarks".format(TRAIN,TEST,CLUS,LAND))
if LIN:
verboseprint("linear kernel")
else:
verboseprint("rbf kernel")
if NORM:
verboseprint("normalized dataset")
else:
verboseprint("scaled data")
t1 = time.time()
# load dataset
try:
train_y,train_x = load_sparse_dataset(TRAIN,norm=NORM,y_pos=YPOS)
test_y,test_x = load_sparse_dataset(TEST,norm=NORM,y_pos=YPOS)
except:
train_y,train_x = load_dense_dataset(TRAIN,norm=NORM,y_pos=YPOS)
test_y,test_x = load_dense_dataset(TEST,norm=NORM,y_pos=YPOS)
t2 = time.time()
verboseprint("dataset loading time:",t2-t1,"s")
if PCA_BOOL:
if LAND > train_x.shape[1]:
raise Exception("When using PCA, the nb landmarks must be at most the nb of features")
verboseprint("landmarks = principal components")
else:
verboseprint("random landmarks")
verboseprint("--------------------\n")
acc_list = []
time_list = []
for it in range(ITER):
acc,time = learning(train_x,train_y,test_x,test_y,verboseprint,CLUS,PCA_BOOL,LIN,LAND)
acc_list.append(acc)
time_list.append(time)
print("Mean accuracy (%), mean stdev (%), mean time (s) over {} iterations:".format(ITER))
try:
print(statistics.mean(acc_list),statistics.stdev(acc_list),statistics.mean(time_list))
except:
print(acc_list[0],0.,time_list[0]) | mit |
googleapis/python-kms | samples/snippets/create_key_ring.py | 1 | 1511 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# [START kms_create_key_ring]
def create_key_ring(project_id, location_id, id):
"""
Creates a new key ring in Cloud KMS
Args:
project_id (string): Google Cloud project ID (e.g. 'my-project').
location_id (string): Cloud KMS location (e.g. 'us-east1').
id (string): ID of the key ring to create (e.g. 'my-key-ring').
Returns:
KeyRing: Cloud KMS key ring.
"""
# Import the client library.
from google.cloud import kms
# Create the client.
client = kms.KeyManagementServiceClient()
# Build the parent location name.
location_name = f'projects/{project_id}/locations/{location_id}'
# Build the key ring.
key_ring = {}
# Call the API.
created_key_ring = client.create_key_ring(request={'parent': location_name, 'key_ring_id': id, 'key_ring': key_ring})
print('Created key ring: {}'.format(created_key_ring.name))
return created_key_ring
# [END kms_create_key_ring]
| apache-2.0 |
rwl/PyCIM | CIM14/ENTSOE/Dynamics/IEC61970/Dynamics/DynamicsMetaBlockParameter.py | 1 | 3262 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.ENTSOE.Dynamics.IEC61970.Dynamics.DynamicsMetaBlockConnectable import DynamicsMetaBlockConnectable
class DynamicsMetaBlockParameter(DynamicsMetaBlockConnectable):
def __init__(self, MemberOf_MetaBlock=None, BlockParameter=None, *args, **kw_args):
"""Initialises a new 'DynamicsMetaBlockParameter' instance.
@param MemberOf_MetaBlock:
@param BlockParameter:
"""
self._MemberOf_MetaBlock = None
self.MemberOf_MetaBlock = MemberOf_MetaBlock
self._BlockParameter = []
self.BlockParameter = [] if BlockParameter is None else BlockParameter
super(DynamicsMetaBlockParameter, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["MemberOf_MetaBlock", "BlockParameter"]
_many_refs = ["BlockParameter"]
def getMemberOf_MetaBlock(self):
return self._MemberOf_MetaBlock
def setMemberOf_MetaBlock(self, value):
if self._MemberOf_MetaBlock is not None:
filtered = [x for x in self.MemberOf_MetaBlock.MetaBlockParameter if x != self]
self._MemberOf_MetaBlock._MetaBlockParameter = filtered
self._MemberOf_MetaBlock = value
if self._MemberOf_MetaBlock is not None:
if self not in self._MemberOf_MetaBlock._MetaBlockParameter:
self._MemberOf_MetaBlock._MetaBlockParameter.append(self)
MemberOf_MetaBlock = property(getMemberOf_MetaBlock, setMemberOf_MetaBlock)
def getBlockParameter(self):
"""
"""
return self._BlockParameter
def setBlockParameter(self, value):
for x in self._BlockParameter:
x.MetaBlockParameter = None
for y in value:
y._MetaBlockParameter = self
self._BlockParameter = value
BlockParameter = property(getBlockParameter, setBlockParameter)
def addBlockParameter(self, *BlockParameter):
for obj in BlockParameter:
obj.MetaBlockParameter = self
def removeBlockParameter(self, *BlockParameter):
for obj in BlockParameter:
obj.MetaBlockParameter = None
| mit |
JakeBrand/CMPUT410-E6 | v1/lib/python2.7/site-packages/django/utils/tzinfo.py | 97 | 3923 | "Implementation of tzinfo classes for use with datetime.datetime."
from __future__ import unicode_literals
from datetime import timedelta, tzinfo
import time
import warnings
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_str, force_text, DEFAULT_LOCALE_ENCODING
warnings.warn(
"django.utils.tzinfo will be removed in Django 1.9. "
"Use django.utils.timezone instead.",
RemovedInDjango19Warning, stacklevel=2)
# Python's doc say: "A tzinfo subclass must have an __init__() method that can
# be called with no arguments". FixedOffset and LocalTimezone don't honor this
# requirement. Defining __getinitargs__ is sufficient to fix copy/deepcopy as
# well as pickling/unpickling.
class FixedOffset(tzinfo):
"Fixed offset in minutes east from UTC."
def __init__(self, offset):
warnings.warn(
"django.utils.tzinfo.FixedOffset will be removed in Django 1.9. "
"Use django.utils.timezone.get_fixed_timezone instead.",
RemovedInDjango19Warning)
if isinstance(offset, timedelta):
self.__offset = offset
offset = self.__offset.seconds // 60
else:
self.__offset = timedelta(minutes=offset)
sign = '-' if offset < 0 else '+'
self.__name = "%s%02d%02d" % (sign, abs(offset) / 60., abs(offset) % 60)
def __repr__(self):
return self.__name
def __getinitargs__(self):
return self.__offset,
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
# This implementation is used for display purposes. It uses an approximation
# for DST computations on dates >= 2038.
# A similar implementation exists in django.utils.timezone. It's used for
# timezone support (when USE_TZ = True) and focuses on correctness.
class LocalTimezone(tzinfo):
"Proxy timezone information from time module."
def __init__(self, dt):
warnings.warn(
"django.utils.tzinfo.LocalTimezone will be removed in Django 1.9. "
"Use django.utils.timezone.get_default_timezone instead.",
RemovedInDjango19Warning)
tzinfo.__init__(self)
self.__dt = dt
self._tzname = self.tzname(dt)
def __repr__(self):
return force_str(self._tzname)
def __getinitargs__(self):
return self.__dt,
def utcoffset(self, dt):
if self._isdst(dt):
return timedelta(seconds=-time.altzone)
else:
return timedelta(seconds=-time.timezone)
def dst(self, dt):
if self._isdst(dt):
return timedelta(seconds=-time.altzone) - timedelta(seconds=-time.timezone)
else:
return timedelta(0)
def tzname(self, dt):
is_dst = False if dt is None else self._isdst(dt)
try:
return force_text(time.tzname[is_dst], DEFAULT_LOCALE_ENCODING)
except UnicodeDecodeError:
return None
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
try:
stamp = time.mktime(tt)
except (OverflowError, ValueError):
# 32 bit systems can't handle dates after Jan 2038, and certain
# systems can't handle dates before ~1901-12-01:
#
# >>> time.mktime((1900, 1, 13, 0, 0, 0, 0, 0, 0))
# OverflowError: mktime argument out of range
# >>> time.mktime((1850, 1, 13, 0, 0, 0, 0, 0, 0))
# ValueError: year out of range
#
# In this case, we fake the date, because we only care about the
# DST flag.
tt = (2037,) + tt[1:]
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
| apache-2.0 |
pratikgujjar/DeepIntent | code/autoencoder_model/scripts/attention_autoencoder.py | 1 | 20029 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hickle as hkl
import numpy as np
np.random.seed(2 ** 10)
from keras import backend as K
K.set_image_dim_ordering('tf')
from keras.layers import Dropout
from keras.models import Sequential
from keras.layers.core import Activation
from keras.utils.vis_utils import plot_model
from keras.layers.wrappers import TimeDistributed
from keras.layers import Layer
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import Conv2DTranspose
from keras.layers.convolutional import Conv3D
from keras.layers.convolutional import Conv3DTranspose
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers.merge import multiply
from keras.layers.merge import concatenate
from keras.layers.core import Permute
from keras.layers.core import RepeatVector
from keras.layers.core import Dense
from keras.layers.core import Lambda
from keras.layers.core import Reshape
from keras.layers.core import Flatten
from keras.layers.recurrent import LSTM
from keras.layers.normalization import BatchNormalization
from keras.callbacks import LearningRateScheduler
from keras.layers.advanced_activations import LeakyReLU
from keras.layers import Input
from keras.models import Model
from keras import metrics
from config_aa import *
import tb_callback
import lrs_callback
import argparse
import math
import os
import cv2
from sys import stdout
def encoder_model():
model = Sequential()
# 10x128x128
model.add(Conv3D(filters=128,
strides=(1, 4, 4),
kernel_size=(3, 11, 11),
padding='same',
input_shape=(int(VIDEO_LENGTH/2), 128, 128, 3)))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
model.add(TimeDistributed(Dropout(0.5)))
# 10x32x32
model.add(Conv3D(filters=64,
strides=(1, 2, 2),
kernel_size=(3, 5, 5),
padding='same'))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
model.add(TimeDistributed(Dropout(0.5)))
# 10x16x16
model.add(Conv3D(filters=32,
strides=(1, 1, 1),
kernel_size=(3, 5, 5),
padding='same'))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
model.add(TimeDistributed(Dropout(0.5)))
return model
def decoder_model():
inputs = Input(shape=(10, 16, 16, 32))
# 10x64x64
conv_1 = Conv3DTranspose(filters=64,
kernel_size=(3, 5, 5),
padding='same',
strides=(1, 1, 1))(inputs)
x = TimeDistributed(BatchNormalization())(conv_1)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
out_1 = TimeDistributed(Dropout(0.5))(x)
# 10x32x32
conv_2 = Conv3DTranspose(filters=128,
kernel_size=(3, 5, 5),
padding='same',
strides=(1, 2, 2))(out_1)
x = TimeDistributed(BatchNormalization())(conv_2)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
out_2 = TimeDistributed(Dropout(0.5))(x)
# 10x64x64
conv_3 = Conv3DTranspose(filters=64,
kernel_size=(3, 5, 5),
padding='same',
strides=(1, 2, 2))(out_2)
x = TimeDistributed(BatchNormalization())(conv_3)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
out_3 = TimeDistributed(Dropout(0.5))(x)
# Learn alpha_1
conv3D_1 = Conv3D(filters=1,
strides=(1, 1, 1),
kernel_size=(3, 3, 3),
dilation_rate=(2, 2, 2),
padding='same')(out_3)
x = TimeDistributed(BatchNormalization())(conv3D_1)
x = TimeDistributed(Dropout(0.5))(x)
# conv3D_2 = Conv3D(filters=1,
# strides=(1, 1, 1),
# kernel_size=(3, 3, 3),
# dilation_rate=(3, 3, 3),
# padding='same')(x)
# x = TimeDistributed(BatchNormalization())(conv3D_2)
# x = TimeDistributed(Dropout(0.5))(x)
flat_1 = TimeDistributed(Flatten())(x)
dense_1 = TimeDistributed(Dense(units=64 * 64, activation='softmax'))(flat_1)
x = TimeDistributed(Dropout(0.5))(dense_1)
a = Reshape(target_shape=(10, 64, 64, 1))(x)
# Custom loss layer
class CustomLossLayer(Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(CustomLossLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
super(CustomLossLayer, self).build(input_shape) # Be sure to call this somewhere!
def attn_loss(self, a):
attn_loss = K.sum(K.flatten(K.square(1 - K.sum(a, axis=1))), axis=-1)
return ATTN_COEFF * K.mean(attn_loss)
def call(self, inputs):
x = inputs
print (inputs.shape)
loss = self.attn_loss(x)
self.add_loss(loss, inputs=inputs)
# We do use this output.
return x
def compute_output_shape(self, input_shape):
return (input_shape[0], 10, 64, 64, 1)
x = CustomLossLayer()(a)
x = Flatten()(x)
x = RepeatVector(n=64)(x)
x = Permute((2, 1))(x)
x = Reshape(target_shape=(10, 64, 64, 64))(x)
attn_1 = multiply([out_3, x])
# 10x128x128
conv_4 = Conv3DTranspose(filters=3,
kernel_size=(3, 11, 11),
strides=(1, 2, 2),
padding='same')(attn_1)
x = TimeDistributed(BatchNormalization())(conv_4)
x = TimeDistributed(Activation('tanh'))(x)
predictions = TimeDistributed(Dropout(0.5))(x)
# x = TimeDistributed(Dropout(0.5))(x)
model = Model(inputs=inputs, outputs=predictions)
return model
def set_trainability(model, trainable):
model.trainable = trainable
for layer in model.layers:
layer.trainable = trainable
def autoencoder_model(encoder, decoder):
model = Sequential()
model.add(encoder)
model.add(decoder)
return model
def combine_images(X, y, generated_images):
# Unroll all generated video frames
n_frames = generated_images.shape[0] * generated_images.shape[1]
frames = np.zeros((n_frames,) + generated_images.shape[2:], dtype=generated_images.dtype)
frame_index = 0
for i in range(generated_images.shape[0]):
for j in range(generated_images.shape[1]):
frames[frame_index] = generated_images[i, j]
frame_index += 1
num = frames.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = frames.shape[1:]
image = np.zeros((height * shape[0], width * shape[1], shape[2]), dtype=generated_images.dtype)
for index, img in enumerate(frames):
i = int(index / width)
j = index % width
image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], :] = img
n_frames = X.shape[0] * X.shape[1]
orig_frames = np.zeros((n_frames,) + X.shape[2:], dtype=X.dtype)
# Original frames
frame_index = 0
for i in range(X.shape[0]):
for j in range(X.shape[1]):
orig_frames[frame_index] = X[i, j]
frame_index += 1
num = orig_frames.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = orig_frames.shape[1:]
orig_image = np.zeros((height * shape[0], width * shape[1], shape[2]), dtype=X.dtype)
for index, img in enumerate(orig_frames):
i = int(index / width)
j = index % width
orig_image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], :] = img
# Ground truth
truth_frames = np.zeros((n_frames,) + y.shape[2:], dtype=y.dtype)
frame_index = 0
for i in range(y.shape[0]):
for j in range(y.shape[1]):
truth_frames[frame_index] = y[i, j]
frame_index += 1
num = truth_frames.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = truth_frames.shape[1:]
truth_image = np.zeros((height * shape[0], width * shape[1], shape[2]), dtype=y.dtype)
for index, img in enumerate(truth_frames):
i = int(index / width)
j = index % width
truth_image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], :] = img
return orig_image, truth_image, image
def load_weights(weights_file, model):
model.load_weights(weights_file)
def run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS):
if PRINT_MODEL_SUMMARY:
print (encoder.summary())
print (decoder.summary())
print (autoencoder.summary())
# exit(0)
# Save model to file
if SAVE_MODEL:
print ("Saving models to file...")
model_json = encoder.to_json()
with open(os.path.join(MODEL_DIR, "encoder.json"), "w") as json_file:
json_file.write(model_json)
model_json = decoder.to_json()
with open(os.path.join(MODEL_DIR, "decoder.json"), "w") as json_file:
json_file.write(model_json)
model_json = autoencoder.to_json()
with open(os.path.join(MODEL_DIR, "autoencoder.json"), "w") as json_file:
json_file.write(model_json)
if PLOT_MODEL:
plot_model(encoder, to_file=os.path.join(MODEL_DIR, 'encoder.png'), show_shapes=True)
plot_model(decoder, to_file=os.path.join(MODEL_DIR, 'decoder.png'), show_shapes=True)
plot_model(autoencoder, to_file=os.path.join(MODEL_DIR, 'autoencoder.png'), show_shapes=True)
if ENC_WEIGHTS != "None":
print ("Pre-loading encoder with weights...")
load_weights(ENC_WEIGHTS, encoder)
if DEC_WEIGHTS != "None":
print ("Pre-loading decoder with weights...")
load_weights(DEC_WEIGHTS, decoder)
def load_X(videos_list, index, data_dir):
X = np.zeros((BATCH_SIZE, VIDEO_LENGTH,) + IMG_SIZE)
for i in range(BATCH_SIZE):
for j in range(VIDEO_LENGTH):
filename = "frame_" + str(videos_list[(index*BATCH_SIZE + i), j]) + ".png"
im_file = os.path.join(data_dir, filename)
try:
frame = cv2.imread(im_file, cv2.IMREAD_COLOR)
X[i, j] = (frame.astype(np.float32) - 127.5) / 127.5
except AttributeError as e:
print (im_file)
print (e)
return X
def train(BATCH_SIZE, ENC_WEIGHTS, DEC_WEIGHTS):
print ("Loading data...")
frames_source = hkl.load(os.path.join(DATA_DIR, 'sources_train_128.hkl'))
# Build video progressions
videos_list = []
start_frame_index = 1
end_frame_index = VIDEO_LENGTH + 1
while (end_frame_index <= len(frames_source)):
frame_list = frames_source[start_frame_index:end_frame_index]
if (len(set(frame_list)) == 1):
videos_list.append(range(start_frame_index, end_frame_index))
start_frame_index = start_frame_index + 1
end_frame_index = end_frame_index + 1
else:
start_frame_index = end_frame_index - 1
end_frame_index = start_frame_index + VIDEO_LENGTH
videos_list = np.asarray(videos_list, dtype=np.int32)
n_videos = videos_list.shape[0]
if SHUFFLE:
# Shuffle images to aid generalization
videos_list = np.random.permutation(videos_list)
# Build the Spatio-temporal Autoencoder
print ("Creating models...")
encoder = encoder_model()
decoder = decoder_model()
autoencoder = autoencoder_model(encoder, decoder)
run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS)
autoencoder.compile(loss='mean_squared_error', optimizer=OPTIM)
NB_ITERATIONS = int(n_videos/BATCH_SIZE)
# Setup TensorBoard Callback
TC = tb_callback.TensorBoard(log_dir=TF_LOG_DIR, histogram_freq=0, write_graph=False, write_images=False)
LRS = lrs_callback.LearningRateScheduler(schedule=schedule)
LRS.set_model(autoencoder)
print ("Beginning Training...")
# Begin Training
for epoch in range(NB_EPOCHS):
print("\n\nEpoch ", epoch)
loss = []
# Set learning rate every epoch
LRS.on_epoch_begin(epoch=epoch)
lr = K.get_value(autoencoder.optimizer.lr)
print ("Learning rate: " + str(lr))
for index in range(NB_ITERATIONS):
# Train Autoencoder
X = load_X(videos_list, index, DATA_DIR)
X_train = X[:, 0 : int(VIDEO_LENGTH/2)]
y_train = X[:, int(VIDEO_LENGTH/2) :]
loss.append(autoencoder.train_on_batch(X_train, y_train))
arrow = int(index / (NB_ITERATIONS / 40))
stdout.write("\rIteration: " + str(index) + "/" + str(NB_ITERATIONS-1) + " " +
"loss: " + str(loss[len(loss)-1]) +
"\t [" + "{0}>".format("="*(arrow)))
stdout.flush()
if SAVE_GENERATED_IMAGES:
# Save generated images to file
predicted_images = autoencoder.predict(X_train, verbose=0)
orig_image, truth_image, pred_image = combine_images(X_train, y_train, predicted_images)
pred_image = pred_image * 127.5 + 127.5
orig_image = orig_image * 127.5 + 127.5
truth_image = truth_image * 127.5 + 127.5
if epoch == 0 :
cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_orig.png"), orig_image)
cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_truth.png"), truth_image)
cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_pred.png"), pred_image)
# then after each epoch/iteration
avg_loss = sum(loss)/len(loss)
logs = {'loss': avg_loss}
TC.on_epoch_end(epoch, logs)
# Log the losses
with open(os.path.join(LOG_DIR, 'losses.json'), 'a') as log_file:
log_file.write("{\"epoch\":%d, \"d_loss\":%f};\n" % (epoch, avg_loss))
print("\nAvg loss: " + str(avg_loss))
# Save model weights per epoch to file
encoder.save_weights(os.path.join(CHECKPOINT_DIR, 'encoder_epoch_'+str(epoch)+'.h5'), True)
decoder.save_weights(os.path.join(CHECKPOINT_DIR, 'decoder_epoch_' + str(epoch) + '.h5'), True)
# End TensorBoard Callback
TC.on_train_end('_')
def test(ENC_WEIGHTS, DEC_WEIGHTS):
# Create models
print ("Creating models...")
encoder = encoder_model()
decoder = decoder_model()
autoencoder = autoencoder_model(encoder, decoder)
run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS)
autoencoder.compile(loss='mean_squared_error', optimizer=OPTIM)
for i in range(len(decoder.layers)):
print (decoder.layers[i], str(i))
# exit(0)
def build_intermediate_model(encoder, decoder):
# convlstm-13, conv3d-25
intermediate_decoder_1 = Model(inputs=decoder.layers[0].input, outputs=decoder.layers[19].output)
# intermediate_decoder_2 = Model(inputs=decoder.layers[0].input, outputs=decoder.layers[12].output)
imodel_1 = Sequential()
imodel_1.add(encoder)
imodel_1.add(intermediate_decoder_1)
# imodel_2 = Sequential()
# imodel_2.add(encoder)
# imodel_2.add(intermediate_decoder_2)
return imodel_1
imodel_1 = build_intermediate_model(encoder, decoder)
imodel_1.compile(loss='mean_squared_error', optimizer=OPTIM)
# imodel_2.compile(loss='mean_squared_error', optimizer=OPTIM)
# imodel = build_intermediate_model(encoder, decoder)
# Build video progressions
frames_source = hkl.load(os.path.join(TEST_DATA_DIR, 'sources_test_128.hkl'))
videos_list = []
start_frame_index = 1
end_frame_index = VIDEO_LENGTH + 1
while (end_frame_index <= len(frames_source)):
frame_list = frames_source[start_frame_index:end_frame_index]
if (len(set(frame_list)) == 1):
videos_list.append(range(start_frame_index, end_frame_index))
start_frame_index = start_frame_index + VIDEO_LENGTH
end_frame_index = end_frame_index + VIDEO_LENGTH
else:
start_frame_index = end_frame_index - 1
end_frame_index = start_frame_index + VIDEO_LENGTH
videos_list = np.asarray(videos_list, dtype=np.int32)
n_videos = videos_list.shape[0]
# Test model by making predictions
loss = []
NB_ITERATIONS = int(n_videos / BATCH_SIZE)
for index in range(NB_ITERATIONS):
# Test Autoencoder
X = load_X(videos_list, index, TEST_DATA_DIR)
X_test = X[:, 0: int(VIDEO_LENGTH / 2)]
y_test = X[:, int(VIDEO_LENGTH / 2):]
loss.append(autoencoder.test_on_batch(X_test, y_test))
y_pred = autoencoder.predict_on_batch(X_test)
a_pred_1 = imodel_1.predict_on_batch(X_test)
# a_pred_2 = imodel_2.predict_on_batch(X_test)
arrow = int(index / (NB_ITERATIONS / 40))
stdout.write("\rIteration: " + str(index) + "/" + str(NB_ITERATIONS - 1) + " " +
"loss: " + str(loss[len(loss) - 1]) +
"\t [" + "{0}>".format("=" * (arrow)))
stdout.flush()
orig_image, truth_image, pred_image = combine_images(X_test, y_test, y_pred)
pred_image = pred_image * 127.5 + 127.5
orig_image = orig_image * 127.5 + 127.5
truth_image = truth_image * 127.5 + 127.5
cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_orig.png"), orig_image)
cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_truth.png"), truth_image)
cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_pred.png"), pred_image)
#------------------------------------------
a_pred_1 = np.reshape(a_pred_1, newshape=(10, 10, 64, 64, 1))
np.save(os.path.join(TEST_RESULTS_DIR, 'attention_weights_' + str(index) +'.npy'), a_pred_1)
orig_image, truth_image, pred_image = combine_images(X_test, y_test, a_pred_1)
# pred_image = (pred_image*100) * 127.5 + 127.5
# y_pred = y_pred * 127.5 + 127.5
# np.save(os.path.join(TEST_RESULTS_DIR, 'attention_weights_' + str(index) + '.npy'), y_pred)
# cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_attn_1.png"), pred_image)
# a_pred_2 = np.reshape(a_pred_2, newshape=(10, 10, 16, 16, 1))
# with open('attention_weights.txt', mode='w') as file:
# file.write(str(a_pred_2[0, 4]))
# orig_image, truth_image, pred_image = combine_images(X_test, y_test, a_pred_2)
# pred_image = (pred_image*100) * 127.5 + 127.5
# cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_attn_2.png"), pred_image)
avg_loss = sum(loss) / len(loss)
print("\nAvg loss: " + str(avg_loss))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str)
parser.add_argument("--enc_weights", type=str, default="None")
parser.add_argument("--dec_weights", type=str, default="None")
parser.add_argument("--batch_size", type=int, default=BATCH_SIZE)
parser.add_argument("--nice", dest="nice", action="store_true")
parser.set_defaults(nice=False)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
if args.mode == "train":
train(BATCH_SIZE=args.batch_size,
ENC_WEIGHTS=args.enc_weights,
DEC_WEIGHTS=args.dec_weights)
if args.mode == "test":
test(ENC_WEIGHTS=args.enc_weights,
DEC_WEIGHTS=args.dec_weights) | mit |
edunham/servo | tests/wpt/css-tests/tools/pywebsocket/src/mod_pywebsocket/mux.py | 636 | 71218 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides classes and helper functions for multiplexing extension.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-websocket-multiplexing-06
"""
import collections
import copy
import email
import email.parser
import logging
import math
import struct
import threading
import traceback
from mod_pywebsocket import common
from mod_pywebsocket import handshake
from mod_pywebsocket import util
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_hybi import Frame
from mod_pywebsocket._stream_hybi import Stream
from mod_pywebsocket._stream_hybi import StreamOptions
from mod_pywebsocket._stream_hybi import create_binary_frame
from mod_pywebsocket._stream_hybi import create_closing_handshake_body
from mod_pywebsocket._stream_hybi import create_header
from mod_pywebsocket._stream_hybi import create_length_header
from mod_pywebsocket._stream_hybi import parse_frame
from mod_pywebsocket.handshake import hybi
_CONTROL_CHANNEL_ID = 0
_DEFAULT_CHANNEL_ID = 1
_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0
_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1
_MUX_OPCODE_FLOW_CONTROL = 2
_MUX_OPCODE_DROP_CHANNEL = 3
_MUX_OPCODE_NEW_CHANNEL_SLOT = 4
_MAX_CHANNEL_ID = 2 ** 29 - 1
_INITIAL_NUMBER_OF_CHANNEL_SLOTS = 64
_INITIAL_QUOTA_FOR_CLIENT = 8 * 1024
_HANDSHAKE_ENCODING_IDENTITY = 0
_HANDSHAKE_ENCODING_DELTA = 1
# We need only these status code for now.
_HTTP_BAD_RESPONSE_MESSAGES = {
common.HTTP_STATUS_BAD_REQUEST: 'Bad Request',
}
# DropChannel reason code
# TODO(bashi): Define all reason code defined in -05 draft.
_DROP_CODE_NORMAL_CLOSURE = 1000
_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE = 2001
_DROP_CODE_CHANNEL_ID_TRUNCATED = 2002
_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED = 2003
_DROP_CODE_UNKNOWN_MUX_OPCODE = 2004
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK = 2005
_DROP_CODE_CHANNEL_ALREADY_EXISTS = 2006
_DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION = 2007
_DROP_CODE_UNKNOWN_REQUEST_ENCODING = 2010
_DROP_CODE_SEND_QUOTA_VIOLATION = 3005
_DROP_CODE_SEND_QUOTA_OVERFLOW = 3006
_DROP_CODE_ACKNOWLEDGED = 3008
_DROP_CODE_BAD_FRAGMENTATION = 3009
class MuxUnexpectedException(Exception):
"""Exception in handling multiplexing extension."""
pass
# Temporary
class MuxNotImplementedException(Exception):
"""Raised when a flow enters unimplemented code path."""
pass
class LogicalConnectionClosedException(Exception):
"""Raised when logical connection is gracefully closed."""
pass
class PhysicalConnectionError(Exception):
"""Raised when there is a physical connection error."""
def __init__(self, drop_code, message=''):
super(PhysicalConnectionError, self).__init__(
'code=%d, message=%r' % (drop_code, message))
self.drop_code = drop_code
self.message = message
class LogicalChannelError(Exception):
"""Raised when there is a logical channel error."""
def __init__(self, channel_id, drop_code, message=''):
super(LogicalChannelError, self).__init__(
'channel_id=%d, code=%d, message=%r' % (
channel_id, drop_code, message))
self.channel_id = channel_id
self.drop_code = drop_code
self.message = message
def _encode_channel_id(channel_id):
if channel_id < 0:
raise ValueError('Channel id %d must not be negative' % channel_id)
if channel_id < 2 ** 7:
return chr(channel_id)
if channel_id < 2 ** 14:
return struct.pack('!H', 0x8000 + channel_id)
if channel_id < 2 ** 21:
first = chr(0xc0 + (channel_id >> 16))
return first + struct.pack('!H', channel_id & 0xffff)
if channel_id < 2 ** 29:
return struct.pack('!L', 0xe0000000 + channel_id)
raise ValueError('Channel id %d is too large' % channel_id)
def _encode_number(number):
return create_length_header(number, False)
def _create_add_channel_response(channel_id, encoded_handshake,
encoding=0, rejected=False):
if encoding != 0 and encoding != 1:
raise ValueError('Invalid encoding %d' % encoding)
first_byte = ((_MUX_OPCODE_ADD_CHANNEL_RESPONSE << 5) |
(rejected << 4) | encoding)
block = (chr(first_byte) +
_encode_channel_id(channel_id) +
_encode_number(len(encoded_handshake)) +
encoded_handshake)
return block
def _create_drop_channel(channel_id, code=None, message=''):
if len(message) > 0 and code is None:
raise ValueError('Code must be specified if message is specified')
first_byte = _MUX_OPCODE_DROP_CHANNEL << 5
block = chr(first_byte) + _encode_channel_id(channel_id)
if code is None:
block += _encode_number(0) # Reason size
else:
reason = struct.pack('!H', code) + message
reason_size = _encode_number(len(reason))
block += reason_size + reason
return block
def _create_flow_control(channel_id, replenished_quota):
first_byte = _MUX_OPCODE_FLOW_CONTROL << 5
block = (chr(first_byte) +
_encode_channel_id(channel_id) +
_encode_number(replenished_quota))
return block
def _create_new_channel_slot(slots, send_quota):
if slots < 0 or send_quota < 0:
raise ValueError('slots and send_quota must be non-negative.')
first_byte = _MUX_OPCODE_NEW_CHANNEL_SLOT << 5
block = (chr(first_byte) +
_encode_number(slots) +
_encode_number(send_quota))
return block
def _create_fallback_new_channel_slot():
first_byte = (_MUX_OPCODE_NEW_CHANNEL_SLOT << 5) | 1 # Set the F flag
block = (chr(first_byte) + _encode_number(0) + _encode_number(0))
return block
def _parse_request_text(request_text):
request_line, header_lines = request_text.split('\r\n', 1)
words = request_line.split(' ')
if len(words) != 3:
raise ValueError('Bad Request-Line syntax %r' % request_line)
[command, path, version] = words
if version != 'HTTP/1.1':
raise ValueError('Bad request version %r' % version)
# email.parser.Parser() parses RFC 2822 (RFC 822) style headers.
# RFC 6455 refers RFC 2616 for handshake parsing, and RFC 2616 refers
# RFC 822.
headers = email.parser.Parser().parsestr(header_lines)
return command, path, version, headers
class _ControlBlock(object):
"""A structure that holds parsing result of multiplexing control block.
Control block specific attributes will be added by _MuxFramePayloadParser.
(e.g. encoded_handshake will be added for AddChannelRequest and
AddChannelResponse)
"""
def __init__(self, opcode):
self.opcode = opcode
class _MuxFramePayloadParser(object):
"""A class that parses multiplexed frame payload."""
def __init__(self, payload):
self._data = payload
self._read_position = 0
self._logger = util.get_class_logger(self)
def read_channel_id(self):
"""Reads channel id.
Raises:
ValueError: when the payload doesn't contain
valid channel id.
"""
remaining_length = len(self._data) - self._read_position
pos = self._read_position
if remaining_length == 0:
raise ValueError('Invalid channel id format')
channel_id = ord(self._data[pos])
channel_id_length = 1
if channel_id & 0xe0 == 0xe0:
if remaining_length < 4:
raise ValueError('Invalid channel id format')
channel_id = struct.unpack('!L',
self._data[pos:pos+4])[0] & 0x1fffffff
channel_id_length = 4
elif channel_id & 0xc0 == 0xc0:
if remaining_length < 3:
raise ValueError('Invalid channel id format')
channel_id = (((channel_id & 0x1f) << 16) +
struct.unpack('!H', self._data[pos+1:pos+3])[0])
channel_id_length = 3
elif channel_id & 0x80 == 0x80:
if remaining_length < 2:
raise ValueError('Invalid channel id format')
channel_id = struct.unpack('!H',
self._data[pos:pos+2])[0] & 0x3fff
channel_id_length = 2
self._read_position += channel_id_length
return channel_id
def read_inner_frame(self):
"""Reads an inner frame.
Raises:
PhysicalConnectionError: when the inner frame is invalid.
"""
if len(self._data) == self._read_position:
raise PhysicalConnectionError(
_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED)
bits = ord(self._data[self._read_position])
self._read_position += 1
fin = (bits & 0x80) == 0x80
rsv1 = (bits & 0x40) == 0x40
rsv2 = (bits & 0x20) == 0x20
rsv3 = (bits & 0x10) == 0x10
opcode = bits & 0xf
payload = self.remaining_data()
# Consume rest of the message which is payload data of the original
# frame.
self._read_position = len(self._data)
return fin, rsv1, rsv2, rsv3, opcode, payload
def _read_number(self):
if self._read_position + 1 > len(self._data):
raise ValueError(
'Cannot read the first byte of number field')
number = ord(self._data[self._read_position])
if number & 0x80 == 0x80:
raise ValueError(
'The most significant bit of the first byte of number should '
'be unset')
self._read_position += 1
pos = self._read_position
if number == 127:
if pos + 8 > len(self._data):
raise ValueError('Invalid number field')
self._read_position += 8
number = struct.unpack('!Q', self._data[pos:pos+8])[0]
if number > 0x7FFFFFFFFFFFFFFF:
raise ValueError('Encoded number(%d) >= 2^63' % number)
if number <= 0xFFFF:
raise ValueError(
'%d should not be encoded by 9 bytes encoding' % number)
return number
if number == 126:
if pos + 2 > len(self._data):
raise ValueError('Invalid number field')
self._read_position += 2
number = struct.unpack('!H', self._data[pos:pos+2])[0]
if number <= 125:
raise ValueError(
'%d should not be encoded by 3 bytes encoding' % number)
return number
def _read_size_and_contents(self):
"""Reads data that consists of followings:
- the size of the contents encoded the same way as payload length
of the WebSocket Protocol with 1 bit padding at the head.
- the contents.
"""
try:
size = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
pos = self._read_position
if pos + size > len(self._data):
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Cannot read %d bytes data' % size)
self._read_position += size
return self._data[pos:pos+size]
def _read_add_channel_request(self, first_byte, control_block):
reserved = (first_byte >> 2) & 0x7
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
# Invalid encoding will be handled by MuxHandler.
encoding = first_byte & 0x3
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
control_block.encoding = encoding
encoded_handshake = self._read_size_and_contents()
control_block.encoded_handshake = encoded_handshake
return control_block
def _read_add_channel_response(self, first_byte, control_block):
reserved = (first_byte >> 2) & 0x3
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
control_block.accepted = (first_byte >> 4) & 1
control_block.encoding = first_byte & 0x3
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
control_block.encoded_handshake = self._read_size_and_contents()
return control_block
def _read_flow_control(self, first_byte, control_block):
reserved = first_byte & 0x1f
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
try:
control_block.channel_id = self.read_channel_id()
control_block.send_quota = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
return control_block
def _read_drop_channel(self, first_byte, control_block):
reserved = first_byte & 0x1f
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
reason = self._read_size_and_contents()
if len(reason) == 0:
control_block.drop_code = None
control_block.drop_message = ''
elif len(reason) >= 2:
control_block.drop_code = struct.unpack('!H', reason[:2])[0]
control_block.drop_message = reason[2:]
else:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received DropChannel that conains only 1-byte reason')
return control_block
def _read_new_channel_slot(self, first_byte, control_block):
reserved = first_byte & 0x1e
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
control_block.fallback = first_byte & 1
try:
control_block.slots = self._read_number()
control_block.send_quota = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
return control_block
def read_control_blocks(self):
"""Reads control block(s).
Raises:
PhysicalConnectionError: when the payload contains invalid control
block(s).
StopIteration: when no control blocks left.
"""
while self._read_position < len(self._data):
first_byte = ord(self._data[self._read_position])
self._read_position += 1
opcode = (first_byte >> 5) & 0x7
control_block = _ControlBlock(opcode=opcode)
if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
yield self._read_add_channel_request(first_byte, control_block)
elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
yield self._read_add_channel_response(
first_byte, control_block)
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
yield self._read_flow_control(first_byte, control_block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
yield self._read_drop_channel(first_byte, control_block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
yield self._read_new_channel_slot(first_byte, control_block)
else:
raise PhysicalConnectionError(
_DROP_CODE_UNKNOWN_MUX_OPCODE,
'Invalid opcode %d' % opcode)
assert self._read_position == len(self._data)
raise StopIteration
def remaining_data(self):
"""Returns remaining data."""
return self._data[self._read_position:]
class _LogicalRequest(object):
"""Mimics mod_python request."""
def __init__(self, channel_id, command, path, protocol, headers,
connection):
"""Constructs an instance.
Args:
channel_id: the channel id of the logical channel.
command: HTTP request command.
path: HTTP request path.
headers: HTTP headers.
connection: _LogicalConnection instance.
"""
self.channel_id = channel_id
self.method = command
self.uri = path
self.protocol = protocol
self.headers_in = headers
self.connection = connection
self.server_terminated = False
self.client_terminated = False
def is_https(self):
"""Mimics request.is_https(). Returns False because this method is
used only by old protocols (hixie and hybi00).
"""
return False
class _LogicalConnection(object):
"""Mimics mod_python mp_conn."""
# For details, see the comment of set_read_state().
STATE_ACTIVE = 1
STATE_GRACEFULLY_CLOSED = 2
STATE_TERMINATED = 3
def __init__(self, mux_handler, channel_id):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
channel_id: channel id of this connection.
"""
self._mux_handler = mux_handler
self._channel_id = channel_id
self._incoming_data = ''
# - Protects _waiting_write_completion
# - Signals the thread waiting for completion of write by mux handler
self._write_condition = threading.Condition()
self._waiting_write_completion = False
self._read_condition = threading.Condition()
self._read_state = self.STATE_ACTIVE
def get_local_addr(self):
"""Getter to mimic mp_conn.local_addr."""
return self._mux_handler.physical_connection.get_local_addr()
local_addr = property(get_local_addr)
def get_remote_addr(self):
"""Getter to mimic mp_conn.remote_addr."""
return self._mux_handler.physical_connection.get_remote_addr()
remote_addr = property(get_remote_addr)
def get_memorized_lines(self):
"""Gets memorized lines. Not supported."""
raise MuxUnexpectedException('_LogicalConnection does not support '
'get_memorized_lines')
def write(self, data):
"""Writes data. mux_handler sends data asynchronously. The caller will
be suspended until write done.
Args:
data: data to be written.
Raises:
MuxUnexpectedException: when called before finishing the previous
write.
"""
try:
self._write_condition.acquire()
if self._waiting_write_completion:
raise MuxUnexpectedException(
'Logical connection %d is already waiting the completion '
'of write' % self._channel_id)
self._waiting_write_completion = True
self._mux_handler.send_data(self._channel_id, data)
self._write_condition.wait()
# TODO(tyoshino): Raise an exception if woke up by on_writer_done.
finally:
self._write_condition.release()
def write_control_data(self, data):
"""Writes data via the control channel. Don't wait finishing write
because this method can be called by mux dispatcher.
Args:
data: data to be written.
"""
self._mux_handler.send_control_data(data)
def on_write_data_done(self):
"""Called when sending data is completed."""
try:
self._write_condition.acquire()
if not self._waiting_write_completion:
raise MuxUnexpectedException(
'Invalid call of on_write_data_done for logical '
'connection %d' % self._channel_id)
self._waiting_write_completion = False
self._write_condition.notify()
finally:
self._write_condition.release()
def on_writer_done(self):
"""Called by the mux handler when the writer thread has finished."""
try:
self._write_condition.acquire()
self._waiting_write_completion = False
self._write_condition.notify()
finally:
self._write_condition.release()
def append_frame_data(self, frame_data):
"""Appends incoming frame data. Called when mux_handler dispatches
frame data to the corresponding application.
Args:
frame_data: incoming frame data.
"""
self._read_condition.acquire()
self._incoming_data += frame_data
self._read_condition.notify()
self._read_condition.release()
def read(self, length):
"""Reads data. Blocks until enough data has arrived via physical
connection.
Args:
length: length of data to be read.
Raises:
LogicalConnectionClosedException: when closing handshake for this
logical channel has been received.
ConnectionTerminatedException: when the physical connection has
closed, or an error is caused on the reader thread.
"""
self._read_condition.acquire()
while (self._read_state == self.STATE_ACTIVE and
len(self._incoming_data) < length):
self._read_condition.wait()
try:
if self._read_state == self.STATE_GRACEFULLY_CLOSED:
raise LogicalConnectionClosedException(
'Logical channel %d has closed.' % self._channel_id)
elif self._read_state == self.STATE_TERMINATED:
raise ConnectionTerminatedException(
'Receiving %d byte failed. Logical channel (%d) closed' %
(length, self._channel_id))
value = self._incoming_data[:length]
self._incoming_data = self._incoming_data[length:]
finally:
self._read_condition.release()
return value
def set_read_state(self, new_state):
"""Sets the state of this connection. Called when an event for this
connection has occurred.
Args:
new_state: state to be set. new_state must be one of followings:
- STATE_GRACEFULLY_CLOSED: when closing handshake for this
connection has been received.
- STATE_TERMINATED: when the physical connection has closed or
DropChannel of this connection has received.
"""
self._read_condition.acquire()
self._read_state = new_state
self._read_condition.notify()
self._read_condition.release()
class _InnerMessage(object):
"""Holds the result of _InnerMessageBuilder.build().
"""
def __init__(self, opcode, payload):
self.opcode = opcode
self.payload = payload
class _InnerMessageBuilder(object):
"""A class that holds the context of inner message fragmentation and
builds a message from fragmented inner frame(s).
"""
def __init__(self):
self._control_opcode = None
self._pending_control_fragments = []
self._message_opcode = None
self._pending_message_fragments = []
self._frame_handler = self._handle_first
def _handle_first(self, frame):
if frame.opcode == common.OPCODE_CONTINUATION:
raise InvalidFrameException('Sending invalid continuation opcode')
if common.is_control_opcode(frame.opcode):
return self._process_first_fragmented_control(frame)
else:
return self._process_first_fragmented_message(frame)
def _process_first_fragmented_control(self, frame):
self._control_opcode = frame.opcode
self._pending_control_fragments.append(frame.payload)
if not frame.fin:
self._frame_handler = self._handle_fragmented_control
return None
return self._reassemble_fragmented_control()
def _process_first_fragmented_message(self, frame):
self._message_opcode = frame.opcode
self._pending_message_fragments.append(frame.payload)
if not frame.fin:
self._frame_handler = self._handle_fragmented_message
return None
return self._reassemble_fragmented_message()
def _handle_fragmented_control(self, frame):
if frame.opcode != common.OPCODE_CONTINUATION:
raise InvalidFrameException(
'Sending invalid opcode %d while sending fragmented control '
'message' % frame.opcode)
self._pending_control_fragments.append(frame.payload)
if not frame.fin:
return None
return self._reassemble_fragmented_control()
def _reassemble_fragmented_control(self):
opcode = self._control_opcode
payload = ''.join(self._pending_control_fragments)
self._control_opcode = None
self._pending_control_fragments = []
if self._message_opcode is not None:
self._frame_handler = self._handle_fragmented_message
else:
self._frame_handler = self._handle_first
return _InnerMessage(opcode, payload)
def _handle_fragmented_message(self, frame):
# Sender can interleave a control message while sending fragmented
# messages.
if common.is_control_opcode(frame.opcode):
if self._control_opcode is not None:
raise MuxUnexpectedException(
'Should not reach here(Bug in builder)')
return self._process_first_fragmented_control(frame)
if frame.opcode != common.OPCODE_CONTINUATION:
raise InvalidFrameException(
'Sending invalid opcode %d while sending fragmented message' %
frame.opcode)
self._pending_message_fragments.append(frame.payload)
if not frame.fin:
return None
return self._reassemble_fragmented_message()
def _reassemble_fragmented_message(self):
opcode = self._message_opcode
payload = ''.join(self._pending_message_fragments)
self._message_opcode = None
self._pending_message_fragments = []
self._frame_handler = self._handle_first
return _InnerMessage(opcode, payload)
def build(self, frame):
"""Build an inner message. Returns an _InnerMessage instance when
the given frame is the last fragmented frame. Returns None otherwise.
Args:
frame: an inner frame.
Raises:
InvalidFrameException: when received invalid opcode. (e.g.
receiving non continuation data opcode but the fin flag of
the previous inner frame was not set.)
"""
return self._frame_handler(frame)
class _LogicalStream(Stream):
"""Mimics the Stream class. This class interprets multiplexed WebSocket
frames.
"""
def __init__(self, request, stream_options, send_quota, receive_quota):
"""Constructs an instance.
Args:
request: _LogicalRequest instance.
stream_options: StreamOptions instance.
send_quota: Initial send quota.
receive_quota: Initial receive quota.
"""
# Physical stream is responsible for masking.
stream_options.unmask_receive = False
Stream.__init__(self, request, stream_options)
self._send_closed = False
self._send_quota = send_quota
# - Protects _send_closed and _send_quota
# - Signals the thread waiting for send quota replenished
self._send_condition = threading.Condition()
# The opcode of the first frame in messages.
self._message_opcode = common.OPCODE_TEXT
# True when the last message was fragmented.
self._last_message_was_fragmented = False
self._receive_quota = receive_quota
self._write_inner_frame_semaphore = threading.Semaphore()
self._inner_message_builder = _InnerMessageBuilder()
def _create_inner_frame(self, opcode, payload, end=True):
frame = Frame(fin=end, opcode=opcode, payload=payload)
for frame_filter in self._options.outgoing_frame_filters:
frame_filter.filter(frame)
if len(payload) != len(frame.payload):
raise MuxUnexpectedException(
'Mux extension must not be used after extensions which change '
' frame boundary')
first_byte = ((frame.fin << 7) | (frame.rsv1 << 6) |
(frame.rsv2 << 5) | (frame.rsv3 << 4) | frame.opcode)
return chr(first_byte) + frame.payload
def _write_inner_frame(self, opcode, payload, end=True):
payload_length = len(payload)
write_position = 0
try:
# An inner frame will be fragmented if there is no enough send
# quota. This semaphore ensures that fragmented inner frames are
# sent in order on the logical channel.
# Note that frames that come from other logical channels or
# multiplexing control blocks can be inserted between fragmented
# inner frames on the physical channel.
self._write_inner_frame_semaphore.acquire()
# Consume an octet quota when this is the first fragmented frame.
if opcode != common.OPCODE_CONTINUATION:
try:
self._send_condition.acquire()
while (not self._send_closed) and self._send_quota == 0:
self._send_condition.wait()
if self._send_closed:
raise BadOperationException(
'Logical connection %d is closed' %
self._request.channel_id)
self._send_quota -= 1
finally:
self._send_condition.release()
while write_position < payload_length:
try:
self._send_condition.acquire()
while (not self._send_closed) and self._send_quota == 0:
self._logger.debug(
'No quota. Waiting FlowControl message for %d.' %
self._request.channel_id)
self._send_condition.wait()
if self._send_closed:
raise BadOperationException(
'Logical connection %d is closed' %
self.request._channel_id)
remaining = payload_length - write_position
write_length = min(self._send_quota, remaining)
inner_frame_end = (
end and
(write_position + write_length == payload_length))
inner_frame = self._create_inner_frame(
opcode,
payload[write_position:write_position+write_length],
inner_frame_end)
self._send_quota -= write_length
self._logger.debug('Consumed quota=%d, remaining=%d' %
(write_length, self._send_quota))
finally:
self._send_condition.release()
# Writing data will block the worker so we need to release
# _send_condition before writing.
self._logger.debug('Sending inner frame: %r' % inner_frame)
self._request.connection.write(inner_frame)
write_position += write_length
opcode = common.OPCODE_CONTINUATION
except ValueError, e:
raise BadOperationException(e)
finally:
self._write_inner_frame_semaphore.release()
def replenish_send_quota(self, send_quota):
"""Replenish send quota."""
try:
self._send_condition.acquire()
if self._send_quota + send_quota > 0x7FFFFFFFFFFFFFFF:
self._send_quota = 0
raise LogicalChannelError(
self._request.channel_id, _DROP_CODE_SEND_QUOTA_OVERFLOW)
self._send_quota += send_quota
self._logger.debug('Replenished send quota for channel id %d: %d' %
(self._request.channel_id, self._send_quota))
finally:
self._send_condition.notify()
self._send_condition.release()
def consume_receive_quota(self, amount):
"""Consumes receive quota. Returns False on failure."""
if self._receive_quota < amount:
self._logger.debug('Violate quota on channel id %d: %d < %d' %
(self._request.channel_id,
self._receive_quota, amount))
return False
self._receive_quota -= amount
return True
def send_message(self, message, end=True, binary=False):
"""Override Stream.send_message."""
if self._request.server_terminated:
raise BadOperationException(
'Requested send_message after sending out a closing handshake')
if binary and isinstance(message, unicode):
raise BadOperationException(
'Message for binary frame must be instance of str')
if binary:
opcode = common.OPCODE_BINARY
else:
opcode = common.OPCODE_TEXT
message = message.encode('utf-8')
for message_filter in self._options.outgoing_message_filters:
message = message_filter.filter(message, end, binary)
if self._last_message_was_fragmented:
if opcode != self._message_opcode:
raise BadOperationException('Message types are different in '
'frames for the same message')
opcode = common.OPCODE_CONTINUATION
else:
self._message_opcode = opcode
self._write_inner_frame(opcode, message, end)
self._last_message_was_fragmented = not end
def _receive_frame(self):
"""Overrides Stream._receive_frame.
In addition to call Stream._receive_frame, this method adds the amount
of payload to receiving quota and sends FlowControl to the client.
We need to do it here because Stream.receive_message() handles
control frames internally.
"""
opcode, payload, fin, rsv1, rsv2, rsv3 = Stream._receive_frame(self)
amount = len(payload)
# Replenish extra one octet when receiving the first fragmented frame.
if opcode != common.OPCODE_CONTINUATION:
amount += 1
self._receive_quota += amount
frame_data = _create_flow_control(self._request.channel_id,
amount)
self._logger.debug('Sending flow control for %d, replenished=%d' %
(self._request.channel_id, amount))
self._request.connection.write_control_data(frame_data)
return opcode, payload, fin, rsv1, rsv2, rsv3
def _get_message_from_frame(self, frame):
"""Overrides Stream._get_message_from_frame.
"""
try:
inner_message = self._inner_message_builder.build(frame)
except InvalidFrameException:
raise LogicalChannelError(
self._request.channel_id, _DROP_CODE_BAD_FRAGMENTATION)
if inner_message is None:
return None
self._original_opcode = inner_message.opcode
return inner_message.payload
def receive_message(self):
"""Overrides Stream.receive_message."""
# Just call Stream.receive_message(), but catch
# LogicalConnectionClosedException, which is raised when the logical
# connection has closed gracefully.
try:
return Stream.receive_message(self)
except LogicalConnectionClosedException, e:
self._logger.debug('%s', e)
return None
def _send_closing_handshake(self, code, reason):
"""Overrides Stream._send_closing_handshake."""
body = create_closing_handshake_body(code, reason)
self._logger.debug('Sending closing handshake for %d: (%r, %r)' %
(self._request.channel_id, code, reason))
self._write_inner_frame(common.OPCODE_CLOSE, body, end=True)
self._request.server_terminated = True
def send_ping(self, body=''):
"""Overrides Stream.send_ping"""
self._logger.debug('Sending ping on logical channel %d: %r' %
(self._request.channel_id, body))
self._write_inner_frame(common.OPCODE_PING, body, end=True)
self._ping_queue.append(body)
def _send_pong(self, body):
"""Overrides Stream._send_pong"""
self._logger.debug('Sending pong on logical channel %d: %r' %
(self._request.channel_id, body))
self._write_inner_frame(common.OPCODE_PONG, body, end=True)
def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''):
"""Overrides Stream.close_connection."""
# TODO(bashi): Implement
self._logger.debug('Closing logical connection %d' %
self._request.channel_id)
self._request.server_terminated = True
def stop_sending(self):
"""Stops accepting new send operation (_write_inner_frame)."""
self._send_condition.acquire()
self._send_closed = True
self._send_condition.notify()
self._send_condition.release()
class _OutgoingData(object):
"""A structure that holds data to be sent via physical connection and
origin of the data.
"""
def __init__(self, channel_id, data):
self.channel_id = channel_id
self.data = data
class _PhysicalConnectionWriter(threading.Thread):
"""A thread that is responsible for writing data to physical connection.
TODO(bashi): Make sure there is no thread-safety problem when the reader
thread reads data from the same socket at a time.
"""
def __init__(self, mux_handler):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self.setDaemon(True)
# When set, make this thread stop accepting new data, flush pending
# data and exit.
self._stop_requested = False
# The close code of the physical connection.
self._close_code = common.STATUS_NORMAL_CLOSURE
# Deque for passing write data. It's protected by _deque_condition
# until _stop_requested is set.
self._deque = collections.deque()
# - Protects _deque, _stop_requested and _close_code
# - Signals threads waiting for them to be available
self._deque_condition = threading.Condition()
def put_outgoing_data(self, data):
"""Puts outgoing data.
Args:
data: _OutgoingData instance.
Raises:
BadOperationException: when the thread has been requested to
terminate.
"""
try:
self._deque_condition.acquire()
if self._stop_requested:
raise BadOperationException('Cannot write data anymore')
self._deque.append(data)
self._deque_condition.notify()
finally:
self._deque_condition.release()
def _write_data(self, outgoing_data):
message = (_encode_channel_id(outgoing_data.channel_id) +
outgoing_data.data)
try:
self._mux_handler.physical_stream.send_message(
message=message, end=True, binary=True)
except Exception, e:
util.prepend_message_to_exception(
'Failed to send message to %r: ' %
(self._mux_handler.physical_connection.remote_addr,), e)
raise
# TODO(bashi): It would be better to block the thread that sends
# control data as well.
if outgoing_data.channel_id != _CONTROL_CHANNEL_ID:
self._mux_handler.notify_write_data_done(outgoing_data.channel_id)
def run(self):
try:
self._deque_condition.acquire()
while not self._stop_requested:
if len(self._deque) == 0:
self._deque_condition.wait()
continue
outgoing_data = self._deque.popleft()
self._deque_condition.release()
self._write_data(outgoing_data)
self._deque_condition.acquire()
# Flush deque.
#
# At this point, self._deque_condition is always acquired.
try:
while len(self._deque) > 0:
outgoing_data = self._deque.popleft()
self._write_data(outgoing_data)
finally:
self._deque_condition.release()
# Close physical connection.
try:
# Don't wait the response here. The response will be read
# by the reader thread.
self._mux_handler.physical_stream.close_connection(
self._close_code, wait_response=False)
except Exception, e:
util.prepend_message_to_exception(
'Failed to close the physical connection: %r' % e)
raise
finally:
self._mux_handler.notify_writer_done()
def stop(self, close_code=common.STATUS_NORMAL_CLOSURE):
"""Stops the writer thread."""
self._deque_condition.acquire()
self._stop_requested = True
self._close_code = close_code
self._deque_condition.notify()
self._deque_condition.release()
class _PhysicalConnectionReader(threading.Thread):
"""A thread that is responsible for reading data from physical connection.
"""
def __init__(self, mux_handler):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self.setDaemon(True)
def run(self):
while True:
try:
physical_stream = self._mux_handler.physical_stream
message = physical_stream.receive_message()
if message is None:
break
# Below happens only when a data message is received.
opcode = physical_stream.get_last_received_opcode()
if opcode != common.OPCODE_BINARY:
self._mux_handler.fail_physical_connection(
_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE,
'Received a text message on physical connection')
break
except ConnectionTerminatedException, e:
self._logger.debug('%s', e)
break
try:
self._mux_handler.dispatch_message(message)
except PhysicalConnectionError, e:
self._mux_handler.fail_physical_connection(
e.drop_code, e.message)
break
except LogicalChannelError, e:
self._mux_handler.fail_logical_channel(
e.channel_id, e.drop_code, e.message)
except Exception, e:
self._logger.debug(traceback.format_exc())
break
self._mux_handler.notify_reader_done()
class _Worker(threading.Thread):
"""A thread that is responsible for running the corresponding application
handler.
"""
def __init__(self, mux_handler, request):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
request: _LogicalRequest instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self._request = request
self.setDaemon(True)
def run(self):
self._logger.debug('Logical channel worker started. (id=%d)' %
self._request.channel_id)
try:
# Non-critical exceptions will be handled by dispatcher.
self._mux_handler.dispatcher.transfer_data(self._request)
except LogicalChannelError, e:
self._mux_handler.fail_logical_channel(
e.channel_id, e.drop_code, e.message)
finally:
self._mux_handler.notify_worker_done(self._request.channel_id)
class _MuxHandshaker(hybi.Handshaker):
"""Opening handshake processor for multiplexing."""
_DUMMY_WEBSOCKET_KEY = 'dGhlIHNhbXBsZSBub25jZQ=='
def __init__(self, request, dispatcher, send_quota, receive_quota):
"""Constructs an instance.
Args:
request: _LogicalRequest instance.
dispatcher: Dispatcher instance (dispatch.Dispatcher).
send_quota: Initial send quota.
receive_quota: Initial receive quota.
"""
hybi.Handshaker.__init__(self, request, dispatcher)
self._send_quota = send_quota
self._receive_quota = receive_quota
# Append headers which should not be included in handshake field of
# AddChannelRequest.
# TODO(bashi): Make sure whether we should raise exception when
# these headers are included already.
request.headers_in[common.UPGRADE_HEADER] = (
common.WEBSOCKET_UPGRADE_TYPE)
request.headers_in[common.SEC_WEBSOCKET_VERSION_HEADER] = (
str(common.VERSION_HYBI_LATEST))
request.headers_in[common.SEC_WEBSOCKET_KEY_HEADER] = (
self._DUMMY_WEBSOCKET_KEY)
def _create_stream(self, stream_options):
"""Override hybi.Handshaker._create_stream."""
self._logger.debug('Creating logical stream for %d' %
self._request.channel_id)
return _LogicalStream(
self._request, stream_options, self._send_quota,
self._receive_quota)
def _create_handshake_response(self, accept):
"""Override hybi._create_handshake_response."""
response = []
response.append('HTTP/1.1 101 Switching Protocols\r\n')
# Upgrade and Sec-WebSocket-Accept should be excluded.
response.append('%s: %s\r\n' % (
common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
if self._request.ws_protocol is not None:
response.append('%s: %s\r\n' % (
common.SEC_WEBSOCKET_PROTOCOL_HEADER,
self._request.ws_protocol))
if (self._request.ws_extensions is not None and
len(self._request.ws_extensions) != 0):
response.append('%s: %s\r\n' % (
common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
common.format_extensions(self._request.ws_extensions)))
response.append('\r\n')
return ''.join(response)
def _send_handshake(self, accept):
"""Override hybi.Handshaker._send_handshake."""
# Don't send handshake response for the default channel
if self._request.channel_id == _DEFAULT_CHANNEL_ID:
return
handshake_response = self._create_handshake_response(accept)
frame_data = _create_add_channel_response(
self._request.channel_id,
handshake_response)
self._logger.debug('Sending handshake response for %d: %r' %
(self._request.channel_id, frame_data))
self._request.connection.write_control_data(frame_data)
class _LogicalChannelData(object):
"""A structure that holds information about logical channel.
"""
def __init__(self, request, worker):
self.request = request
self.worker = worker
self.drop_code = _DROP_CODE_NORMAL_CLOSURE
self.drop_message = ''
class _HandshakeDeltaBase(object):
"""A class that holds information for delta-encoded handshake."""
def __init__(self, headers):
self._headers = headers
def create_headers(self, delta=None):
"""Creates request headers for an AddChannelRequest that has
delta-encoded handshake.
Args:
delta: headers should be overridden.
"""
headers = copy.copy(self._headers)
if delta:
for key, value in delta.items():
# The spec requires that a header with an empty value is
# removed from the delta base.
if len(value) == 0 and headers.has_key(key):
del headers[key]
else:
headers[key] = value
return headers
class _MuxHandler(object):
"""Multiplexing handler. When a handler starts, it launches three
threads; the reader thread, the writer thread, and a worker thread.
The reader thread reads data from the physical stream, i.e., the
ws_stream object of the underlying websocket connection. The reader
thread interprets multiplexed frames and dispatches them to logical
channels. Methods of this class are mostly called by the reader thread.
The writer thread sends multiplexed frames which are created by
logical channels via the physical connection.
The worker thread launched at the starting point handles the
"Implicitly Opened Connection". If multiplexing handler receives
an AddChannelRequest and accepts it, the handler will launch a new worker
thread and dispatch the request to it.
"""
def __init__(self, request, dispatcher):
"""Constructs an instance.
Args:
request: mod_python request of the physical connection.
dispatcher: Dispatcher instance (dispatch.Dispatcher).
"""
self.original_request = request
self.dispatcher = dispatcher
self.physical_connection = request.connection
self.physical_stream = request.ws_stream
self._logger = util.get_class_logger(self)
self._logical_channels = {}
self._logical_channels_condition = threading.Condition()
# Holds client's initial quota
self._channel_slots = collections.deque()
self._handshake_base = None
self._worker_done_notify_received = False
self._reader = None
self._writer = None
def start(self):
"""Starts the handler.
Raises:
MuxUnexpectedException: when the handler already started, or when
opening handshake of the default channel fails.
"""
if self._reader or self._writer:
raise MuxUnexpectedException('MuxHandler already started')
self._reader = _PhysicalConnectionReader(self)
self._writer = _PhysicalConnectionWriter(self)
self._reader.start()
self._writer.start()
# Create "Implicitly Opened Connection".
logical_connection = _LogicalConnection(self, _DEFAULT_CHANNEL_ID)
headers = copy.copy(self.original_request.headers_in)
# Add extensions for logical channel.
headers[common.SEC_WEBSOCKET_EXTENSIONS_HEADER] = (
common.format_extensions(
self.original_request.mux_processor.extensions()))
self._handshake_base = _HandshakeDeltaBase(headers)
logical_request = _LogicalRequest(
_DEFAULT_CHANNEL_ID,
self.original_request.method,
self.original_request.uri,
self.original_request.protocol,
self._handshake_base.create_headers(),
logical_connection)
# Client's send quota for the implicitly opened connection is zero,
# but we will send FlowControl later so set the initial quota to
# _INITIAL_QUOTA_FOR_CLIENT.
self._channel_slots.append(_INITIAL_QUOTA_FOR_CLIENT)
send_quota = self.original_request.mux_processor.quota()
if not self._do_handshake_for_logical_request(
logical_request, send_quota=send_quota):
raise MuxUnexpectedException(
'Failed handshake on the default channel id')
self._add_logical_channel(logical_request)
# Send FlowControl for the implicitly opened connection.
frame_data = _create_flow_control(_DEFAULT_CHANNEL_ID,
_INITIAL_QUOTA_FOR_CLIENT)
logical_request.connection.write_control_data(frame_data)
def add_channel_slots(self, slots, send_quota):
"""Adds channel slots.
Args:
slots: number of slots to be added.
send_quota: initial send quota for slots.
"""
self._channel_slots.extend([send_quota] * slots)
# Send NewChannelSlot to client.
frame_data = _create_new_channel_slot(slots, send_quota)
self.send_control_data(frame_data)
def wait_until_done(self, timeout=None):
"""Waits until all workers are done. Returns False when timeout has
occurred. Returns True on success.
Args:
timeout: timeout in sec.
"""
self._logical_channels_condition.acquire()
try:
while len(self._logical_channels) > 0:
self._logger.debug('Waiting workers(%d)...' %
len(self._logical_channels))
self._worker_done_notify_received = False
self._logical_channels_condition.wait(timeout)
if not self._worker_done_notify_received:
self._logger.debug('Waiting worker(s) timed out')
return False
finally:
self._logical_channels_condition.release()
# Flush pending outgoing data
self._writer.stop()
self._writer.join()
return True
def notify_write_data_done(self, channel_id):
"""Called by the writer thread when a write operation has done.
Args:
channel_id: objective channel id.
"""
try:
self._logical_channels_condition.acquire()
if channel_id in self._logical_channels:
channel_data = self._logical_channels[channel_id]
channel_data.request.connection.on_write_data_done()
else:
self._logger.debug('Seems that logical channel for %d has gone'
% channel_id)
finally:
self._logical_channels_condition.release()
def send_control_data(self, data):
"""Sends data via the control channel.
Args:
data: data to be sent.
"""
self._writer.put_outgoing_data(_OutgoingData(
channel_id=_CONTROL_CHANNEL_ID, data=data))
def send_data(self, channel_id, data):
"""Sends data via given logical channel. This method is called by
worker threads.
Args:
channel_id: objective channel id.
data: data to be sent.
"""
self._writer.put_outgoing_data(_OutgoingData(
channel_id=channel_id, data=data))
def _send_drop_channel(self, channel_id, code=None, message=''):
frame_data = _create_drop_channel(channel_id, code, message)
self._logger.debug(
'Sending drop channel for channel id %d' % channel_id)
self.send_control_data(frame_data)
def _send_error_add_channel_response(self, channel_id, status=None):
if status is None:
status = common.HTTP_STATUS_BAD_REQUEST
if status in _HTTP_BAD_RESPONSE_MESSAGES:
message = _HTTP_BAD_RESPONSE_MESSAGES[status]
else:
self._logger.debug('Response message for %d is not found' % status)
message = '???'
response = 'HTTP/1.1 %d %s\r\n\r\n' % (status, message)
frame_data = _create_add_channel_response(channel_id,
encoded_handshake=response,
encoding=0, rejected=True)
self.send_control_data(frame_data)
def _create_logical_request(self, block):
if block.channel_id == _CONTROL_CHANNEL_ID:
# TODO(bashi): Raise PhysicalConnectionError with code 2006
# instead of MuxUnexpectedException.
raise MuxUnexpectedException(
'Received the control channel id (0) as objective channel '
'id for AddChannel')
if block.encoding > _HANDSHAKE_ENCODING_DELTA:
raise PhysicalConnectionError(
_DROP_CODE_UNKNOWN_REQUEST_ENCODING)
method, path, version, headers = _parse_request_text(
block.encoded_handshake)
if block.encoding == _HANDSHAKE_ENCODING_DELTA:
headers = self._handshake_base.create_headers(headers)
connection = _LogicalConnection(self, block.channel_id)
request = _LogicalRequest(block.channel_id, method, path, version,
headers, connection)
return request
def _do_handshake_for_logical_request(self, request, send_quota=0):
try:
receive_quota = self._channel_slots.popleft()
except IndexError:
raise LogicalChannelError(
request.channel_id, _DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION)
handshaker = _MuxHandshaker(request, self.dispatcher,
send_quota, receive_quota)
try:
handshaker.do_handshake()
except handshake.VersionException, e:
self._logger.info('%s', e)
self._send_error_add_channel_response(
request.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
return False
except handshake.HandshakeException, e:
# TODO(bashi): Should we _Fail the Logical Channel_ with 3001
# instead?
self._logger.info('%s', e)
self._send_error_add_channel_response(request.channel_id,
status=e.status)
return False
except handshake.AbortedByUserException, e:
self._logger.info('%s', e)
self._send_error_add_channel_response(request.channel_id)
return False
return True
def _add_logical_channel(self, logical_request):
try:
self._logical_channels_condition.acquire()
if logical_request.channel_id in self._logical_channels:
self._logger.debug('Channel id %d already exists' %
logical_request.channel_id)
raise PhysicalConnectionError(
_DROP_CODE_CHANNEL_ALREADY_EXISTS,
'Channel id %d already exists' %
logical_request.channel_id)
worker = _Worker(self, logical_request)
channel_data = _LogicalChannelData(logical_request, worker)
self._logical_channels[logical_request.channel_id] = channel_data
worker.start()
finally:
self._logical_channels_condition.release()
def _process_add_channel_request(self, block):
try:
logical_request = self._create_logical_request(block)
except ValueError, e:
self._logger.debug('Failed to create logical request: %r' % e)
self._send_error_add_channel_response(
block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
return
if self._do_handshake_for_logical_request(logical_request):
if block.encoding == _HANDSHAKE_ENCODING_IDENTITY:
# Update handshake base.
# TODO(bashi): Make sure this is the right place to update
# handshake base.
self._handshake_base = _HandshakeDeltaBase(
logical_request.headers_in)
self._add_logical_channel(logical_request)
else:
self._send_error_add_channel_response(
block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
def _process_flow_control(self, block):
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
return
channel_data = self._logical_channels[block.channel_id]
channel_data.request.ws_stream.replenish_send_quota(
block.send_quota)
finally:
self._logical_channels_condition.release()
def _process_drop_channel(self, block):
self._logger.debug(
'DropChannel received for %d: code=%r, reason=%r' %
(block.channel_id, block.drop_code, block.drop_message))
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
return
channel_data = self._logical_channels[block.channel_id]
channel_data.drop_code = _DROP_CODE_ACKNOWLEDGED
# Close the logical channel
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
channel_data.request.ws_stream.stop_sending()
finally:
self._logical_channels_condition.release()
def _process_control_blocks(self, parser):
for control_block in parser.read_control_blocks():
opcode = control_block.opcode
self._logger.debug('control block received, opcode: %d' % opcode)
if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
self._process_add_channel_request(control_block)
elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received AddChannelResponse')
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
self._process_flow_control(control_block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
self._process_drop_channel(control_block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received NewChannelSlot')
else:
raise MuxUnexpectedException(
'Unexpected opcode %r' % opcode)
def _process_logical_frame(self, channel_id, parser):
self._logger.debug('Received a frame. channel id=%d' % channel_id)
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
# We must ignore the message for an inactive channel.
return
channel_data = self._logical_channels[channel_id]
fin, rsv1, rsv2, rsv3, opcode, payload = parser.read_inner_frame()
consuming_byte = len(payload)
if opcode != common.OPCODE_CONTINUATION:
consuming_byte += 1
if not channel_data.request.ws_stream.consume_receive_quota(
consuming_byte):
# The client violates quota. Close logical channel.
raise LogicalChannelError(
channel_id, _DROP_CODE_SEND_QUOTA_VIOLATION)
header = create_header(opcode, len(payload), fin, rsv1, rsv2, rsv3,
mask=False)
frame_data = header + payload
channel_data.request.connection.append_frame_data(frame_data)
finally:
self._logical_channels_condition.release()
def dispatch_message(self, message):
"""Dispatches message. The reader thread calls this method.
Args:
message: a message that contains encapsulated frame.
Raises:
PhysicalConnectionError: if the message contains physical
connection level errors.
LogicalChannelError: if the message contains logical channel
level errors.
"""
parser = _MuxFramePayloadParser(message)
try:
channel_id = parser.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_CHANNEL_ID_TRUNCATED)
if channel_id == _CONTROL_CHANNEL_ID:
self._process_control_blocks(parser)
else:
self._process_logical_frame(channel_id, parser)
def notify_worker_done(self, channel_id):
"""Called when a worker has finished.
Args:
channel_id: channel id corresponded with the worker.
"""
self._logger.debug('Worker for channel id %d terminated' % channel_id)
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
raise MuxUnexpectedException(
'Channel id %d not found' % channel_id)
channel_data = self._logical_channels.pop(channel_id)
finally:
self._worker_done_notify_received = True
self._logical_channels_condition.notify()
self._logical_channels_condition.release()
if not channel_data.request.server_terminated:
self._send_drop_channel(
channel_id, code=channel_data.drop_code,
message=channel_data.drop_message)
def notify_reader_done(self):
"""This method is called by the reader thread when the reader has
finished.
"""
self._logger.debug(
'Termiating all logical connections waiting for incoming data '
'...')
self._logical_channels_condition.acquire()
for channel_data in self._logical_channels.values():
try:
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
except Exception:
self._logger.debug(traceback.format_exc())
self._logical_channels_condition.release()
def notify_writer_done(self):
"""This method is called by the writer thread when the writer has
finished.
"""
self._logger.debug(
'Termiating all logical connections waiting for write '
'completion ...')
self._logical_channels_condition.acquire()
for channel_data in self._logical_channels.values():
try:
channel_data.request.connection.on_writer_done()
except Exception:
self._logger.debug(traceback.format_exc())
self._logical_channels_condition.release()
def fail_physical_connection(self, code, message):
"""Fail the physical connection.
Args:
code: drop reason code.
message: drop message.
"""
self._logger.debug('Failing the physical connection...')
self._send_drop_channel(_CONTROL_CHANNEL_ID, code, message)
self._writer.stop(common.STATUS_INTERNAL_ENDPOINT_ERROR)
def fail_logical_channel(self, channel_id, code, message):
"""Fail a logical channel.
Args:
channel_id: channel id.
code: drop reason code.
message: drop message.
"""
self._logger.debug('Failing logical channel %d...' % channel_id)
try:
self._logical_channels_condition.acquire()
if channel_id in self._logical_channels:
channel_data = self._logical_channels[channel_id]
# Close the logical channel. notify_worker_done() will be
# called later and it will send DropChannel.
channel_data.drop_code = code
channel_data.drop_message = message
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
channel_data.request.ws_stream.stop_sending()
else:
self._send_drop_channel(channel_id, code, message)
finally:
self._logical_channels_condition.release()
def use_mux(request):
return hasattr(request, 'mux_processor') and (
request.mux_processor.is_active())
def start(request, dispatcher):
mux_handler = _MuxHandler(request, dispatcher)
mux_handler.start()
mux_handler.add_channel_slots(_INITIAL_NUMBER_OF_CHANNEL_SLOTS,
_INITIAL_QUOTA_FOR_CLIENT)
mux_handler.wait_until_done()
# vi:sts=4 sw=4 et
| mpl-2.0 |
rosudrag/eve-wspace | evewspace/core/views.py | 15 | 1502 | # Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from Map.models import Map
from django.template.response import TemplateResponse
# Create your views here.
@login_required()
def home_view(request):
"""The home view detects whether a user has a default map and either
directs them to that map or displays a home page template."""
return TemplateResponse(request, 'home.html')
@login_required
def config_view(request):
"""
Gets the configuration page.
"""
return TemplateResponse(request, 'settings.html')
| gpl-3.0 |
shakamunyi/nova | nova/api/openstack/compute/plugins/v3/extended_server_attributes.py | 23 | 2652 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Server Attributes API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
ALIAS = "os-extended-server-attributes"
authorize = extensions.soft_extension_authorizer('compute', 'v3:' + ALIAS)
class ExtendedServerAttributesController(wsgi.Controller):
def _extend_server(self, context, server, instance):
key = "OS-EXT-SRV-ATTR:hypervisor_hostname"
server[key] = instance['node']
for attr in ['host', 'name']:
if attr == 'name':
key = "OS-EXT-SRV-ATTR:instance_%s" % attr
else:
key = "OS-EXT-SRV-ATTR:%s" % attr
server[key] = instance[attr]
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show' method.
self._extend_server(context, server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'detail' method.
self._extend_server(context, server, db_instance)
class ExtendedServerAttributes(extensions.V3APIExtensionBase):
"""Extended Server Attributes support."""
name = "ExtendedServerAttributes"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = ExtendedServerAttributesController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
| apache-2.0 |
digitalmacgyver/vedit | vedit/__init__.py | 1 | 1074 | __all__ = [
# Paths to binaries we depend on.
'FFMPEG',
'FFPROBE',
# Various "constants" used in configuration.
'OVERLAY',
'CROP',
'PAD',
'PAN',
'DISPLAY_STYLES,'
'DOWN',
'LEFT',
'RIGHT',
'UP',
'OVERLAY_DIRECTIONS',
'ALTERNATE',
'PAN_DIRECTIONS',
# Classes.
'Display',
'Video',
'Clip',
'Window',
'Watermark',
# Utility functions.
'distribute_clips',
'gen_background_video'
]
from .vedit import FFMPEG
from .vedit import FFPROBE
from .vedit import OVERLAY
from .vedit import CROP
from .vedit import PAD
from .vedit import PAN
from .vedit import DISPLAY_STYLES
from .vedit import DOWN
from .vedit import LEFT
from .vedit import RIGHT
from .vedit import UP
from .vedit import OVERLAY_DIRECTIONS
from .vedit import ALTERNATE
from .vedit import PAN_DIRECTIONS
from .vedit import Display
from .vedit import Video
from .vedit import Clip
from .vedit import Window
from .vedit import Watermark
from .vedit import distribute_clips
from .vedit import gen_background_video
| mit |
gVallverdu/myScripts | CRYSTAL/crystalio.py | 1 | 14817 | #!/usr/bin/env python3
# -*- coding=utf-8 -*-
"""
crystailio
----------
Read a CRYSTAL output file and export all structures, SCF energies and
convergence data.
"""
__author__ = "Germain Vallverdu"
__email__ = "germain.vallverdu@univ-pau.fr"
__licence__ = "GPL"
import re
import numpy as np
from pymatgen import Structure, Lattice
class CrystalOutfile:
"""
A convenient parser for CRYSTAL output files.
Args:
filename: Filename of CRYSTAL output file.
encoding: encoding of the output file (utf-8)
Attributes:
.. attribute:: structures
All structures from the calculation in the standard orientation. If the
symmetry is not considered, the standard orientation is not printed out
and the input orientation is used instead. Check the `standard_orientation`
attribute.
"""
def __init__(self, filename, encoding="utf-8"):
self.filename = filename
self.encoding = encoding
self._parse()
@property
def initial_structure(self):
""" First structure read in the calculation """
return self.structures[0]
@property
def final_structure(self):
""" Last structure read in the calculation """
return self.structures[-1]
def get_structure(self, n):
""" return the nth structure read in the calculation """
if n > len(self.structures) or n < 1:
raise ValueError("Bad n value. n=%d. I read %d structures." % (n, len(self.structures)))
return self.structures[n - 1]
@property
def final_energy(self):
return self.energies[-1]
def _parse(self):
float_patt = re.compile(r"[+-]?\d+\.\d+[EFD]?[+-]?\d+") # -9.3892E+02
start_patt = re.compile(r"^\s*EEEEEEEEEE STARTING DATE \d+")
coord_patt = re.compile(r"^\s+(\d+)\s+(?P<aunit>[TF])\s+(?P<Z>\d+)\s+"
r"(?P<specie>\w+)\s+(?P<x>[+-]?\d+\.\d+E[+-]?\d+)"
r"\s+(?P<y>[+-]?\d+\.\d+E[+-]?\d+)\s+"
r"(?P<z>[+-]?\d+\.\d+E[+-]?\d+)")
coord_nanotube_patt = re.compile(r"^\s+(\d+)\s+(?P<aunit>[TF])\s+(?P<Z>\d+)\s+"
r"(?P<specie>\w+)\s+(?P<x>[+-]?\d+\.\d+E[+-]?\d+)"
r"\s+(?P<y>[+-]?\d+\.\d+E[+-]?\d+)\s+"
r"(?P<z>[+-]?\d+\.\d+E[+-]?\d+)\s+"
r"(?P<radius>\d+\.\d+)")
forces_patt = re.compile(r"^\s+(?P<iat>\d+)\s+(?P<Z>\d+)\s+"
r"(?P<x>[+-]?\d+\.\d+E[+-]?\d+)\s+"
r"(?P<y>[+-]?\d+\.\d+E[+-]?\d+)\s+"
r"(?P<z>[+-]?\d+\.\d+E[+-]?\d+)")
max_grad_patt = re.compile(r"^\sMAX GRADIENT\s+(?P<max_grad>\d+\.\d+)"
r"\s+THRESHOLD\s+(?P<max_grad_thr>\d+\.\d+)")
rms_grad_patt = re.compile(r"^\sRMS GRADIENT\s+(?P<rms_grad>\d+\.\d+)"
r"\s+THRESHOLD\s+(?P<rms_grad_thr>\d+\.\d+)")
max_displac_patt = re.compile(r"^\sMAX DISPLAC\.\s+(?P<max_displac>\d+\.\d+)"
r"\s+THRESHOLD\s+(?P<max_displac_thr>\d+\.\d+)")
rms_displac_patt = re.compile(r"^\sRMS DISPLAC\.\s+(?P<rms_displac>\d+\.\d+)"
r"\s+THRESHOLD\s+(?P<rms_displac_thr>\d+\.\d+)")
norm_grad_patt = re.compile(r"^\s+GRADIENT NORM\s+(?P<norm_grad>\d+\.\d+)"
r"\s+GRADIENT THRESHOLD\s+(?P<norm_grad_thr>\d+\.\d+)")
self.title = ""
self.system = ""
self.group = ""
self.slab = False
self.nanotube = False
self.volumes = list()
self.energies = list()
self.forces = list()
self.convergence_data = list()
self.geometry_converge = False
self.scf_converge = False
external_geometry = False
with open(self.filename, "r", encoding=self.encoding) as f:
# look for starting message
for line in f:
if start_patt.match(line):
self.title = f.readline().strip()
break
# ------------------------------------------------------------------
# first, read the initial geometry & identify the type of structure
# ------------------------------------------------------------------
for line in f:
if re.match(r"^\sGEOMETRY INPUT FROM EXTERNAL FILE", line):
external_geometry = True
line = f.readline()
if "SLAB" in line:
self.slab = True
if "NANOTUBE" in line:
self.nanotube = True
print("WARNING: Geometry from an external file.")
break
if re.match(r"^\sSLAB CALCULATION", line):
self.slab = True
system_patt = re.compile(r"^\sSYSTEM AND LATTICE")
group_patt = re.compile(r" PLANE GROUP N.")
break
if re.match(r"^\sCRYSTAL CALCULATION", line):
system_patt = re.compile(r"^\sCRYSTAL FAMILY")
group_patt = re.compile(r"^\sSPACE GROUP")
break
# look for initial geometry: GEOMETRY FOR WAVEFUNCTION
# read group and crystallographic system
# check if a SLAB or NANOTUBE is built by GEOMETRY EDITING
geom_for_wf = False
for line in f:
if not external_geometry and system_patt.search(line):
self.system = line.split(":")[1].strip()
if not external_geometry and group_patt.search(line):
self.group = line.split(":")[1].strip()
if " SLAB GENERATED " in line:
self.slab = True
# group and system no more relevant
self.group = ""
self.system = ""
if "CONSTRUCTION OF A NANOTUBE FROM A SLAB" in line:
self.nanotube = True
self.slab = False
# group and system no more relevant
self.group = ""
self.system = ""
if re.match(r"^\sGEOMETRY FOR WAVE FUNCTION", line):
geom_for_wf = True
break
if line == "":
# end of file, geometry for wavefunction not found
break
if not geom_for_wf:
# STOP case, add TESTGEOM to d12
raise ValueError("GEOMETRY FOR WAVEFUNCTION NOT FOUND.\n"
"Please, add TESTGEOM in the d12 input file.")
# read until calculation start
# read starting geometry and look for PRIMITIVE or CRYSTALLOGRAPHIC
read_geom = False
while "CRYSTAL - SCF - TYPE OF CALCULATION" not in line:
line = f.readline()
if line == "":
raise ValueError("End of file.")
# search PRIMITIVE CELL
if re.match(r"^\sPRIMITIVE CELL", line):
read_geom = True
geom_patt = re.compile(r"^\sPRIMITIVE CELL")
# search CRYSTALLOGRAPHIC CELL if exist
if re.match(r"^\sCRYSTALLOGRAPHIC CELL", line):
read_geom = True
geom_patt = re.compile(r"^\sCRYSTALLOGRAPHIC CELL")
if read_geom:
if not self.slab and not self.nanotube:
volume = float(line.split("=")[1].split()[0].strip(")"))
self.volumes.append(volume)
f.readline()
# lattice parameters
line = f.readline()
params = [float(val) for val in re.findall(r"\d+\.\d+", line)]
lattice = Lattice.from_lengths_and_angles(params[0:3], params[3:])
# step on for 4 lines
[f.readline() for _ in range(4)]
# read coordinates
species = list() # atom names
uniq = list() # True if atom belong to the asymmetric unit
radius = list() # distance from the axes of the nanotube
coords = list()
while line != "\n":
read = False
line = f.readline()
if self.nanotube and coord_nanotube_patt.match(line):
data = coord_nanotube_patt.match(line).groupdict()
read = True
elif coord_patt.match(line):
data = coord_patt.match(line).groupdict()
read = True
if read:
specie = data["specie"]
specie = specie if len(specie) == 1 else specie[0] + specie[1].lower()
species.append(specie)
coord = [float(data[k]) for k in "xyz"]
uniq.append(True if data["aunit"] == "T" else False)
if self.slab:
coord[2] /= lattice.c
elif self.nanotube:
coord[1] /= lattice.b
coord[2] /= lattice.c
radius.append(float(data["radius"]))
coords.append(coord)
self.structures = [Structure(lattice, species, coords,
site_properties={"aunit": uniq})]
read_geom = False
# ------------------------------------------------------------------
# from that point, SCF, or structure optimization start !
# continue up to the end of file
# ------------------------------------------------------------------
n_geom = 0
cvg_data = dict()
while line != "":
line = f.readline()
if " TOTAL ENERGY" in line:
self.energies.append(float(float_patt.findall(line)[0]))
self.scf_converge = True
if "CARTESIAN FORCES IN HARTREE/BOHR" in line:
# WARNING: Forces are not printed at each geom step
line = f.readline()
forces = list()
for _ in range(self.initial_structure.num_sites):
data = forces_patt.match(f.readline()).groupdict()
forces.append([float(data[c]) for c in "xyz"])
self.forces.append(np.array(forces))
if max_grad_patt.match(line):
cvg_data.update(max_grad_patt.match(line).groupdict())
if rms_grad_patt.match(line):
cvg_data.update(rms_grad_patt.match(line).groupdict())
if max_displac_patt.match(line):
cvg_data.update(max_displac_patt.match(line).groupdict())
if rms_displac_patt.match(line):
cvg_data.update(rms_displac_patt.match(line).groupdict())
if norm_grad_patt.match(line):
cvg_data.update(norm_grad_patt.match(line).groupdict())
if line == "":
# end of file ?
break
if "COORDINATE AND CELL OPTIMIZATION" in line:
cvg_data = {k: float(v) for k, v in cvg_data.items()}
# end of optimization cycle
self.convergence_data.append(cvg_data)
n_geom += 1
cvg_data = dict()
if "FINAL OPTIMIZED GEOMETRY" in line:
self.geometry_converge = True
n_geom += 1
# search structure data
if geom_patt.match(line):
# PRIMITVE or CRYSTALLOGRAPHIC depending on what is present
read_geom = True
if read_geom:
if not self.slab and not self.nanotube:
volume = float(line.split("=")[1].split()[0].strip(")"))
self.volumes.append(volume)
f.readline()
# lattice parameters
line = f.readline()
params = [float(val) for val in re.findall(r"\d+\.\d+", line)]
lattice = Lattice.from_lengths_and_angles(params[0:3], params[3:])
# step on for 4 lines
[f.readline() for _ in range(4)]
# read coordinates
species = list() # atom names
uniq = list() # True if atom belong to the asymmetric unit
radius = list() # distance from the axes of the nanotube
coords = list()
while line != "\n":
read = False
line = f.readline()
if self.nanotube and coord_nanotube_patt.match(line):
data = coord_nanotube_patt.match(line).groupdict()
read = True
elif coord_patt.match(line):
data = coord_patt.match(line).groupdict()
read = True
if read:
specie = data["specie"]
specie = specie if len(specie) == 1 else specie[0] + specie[1].lower()
species.append(specie)
coord = [float(data[k]) for k in "xyz"]
uniq.append(True if data["aunit"] == "T" else False)
if self.slab:
coord[2] /= lattice.c
elif self.nanotube:
coord[1] /= lattice.b
coord[2] /= lattice.c
radius.append(float(data["radius"]))
coords.append(coord)
self.structures.append(Structure(lattice, species, coords,
site_properties={"aunit": uniq}))
read_geom = False
if __name__ == "__main__":
pass
| gpl-2.0 |
homme/ansible | lib/ansible/cli/__init__.py | 10 | 22872 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import operator
import optparse
import os
import sys
import time
import yaml
import re
import getpass
import subprocess
from ansible import __version__
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.utils.unicode import to_bytes
from ansible.utils.display import Display
from ansible.utils.path import is_executable
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
#FIXME: epilog parsing: OptionParser.format_epilog = lambda self, formatter: self.epilog
def format_help(self, formatter=None, epilog=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
class CLI(object):
''' code behind bin/ansible* programs '''
VALID_ACTIONS = ['No Actions']
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
PAGER = 'less'
LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
def __init__(self, args, display=None):
"""
Base init method for all command line programs
"""
self.args = args
self.options = None
self.parser = None
self.action = None
if display is None:
self.display = Display()
else:
self.display = display
def set_action(self):
"""
Get the action the user wants to execute from the sys argv list.
"""
for i in range(0,len(self.args)):
arg = self.args[i]
if arg in self.VALID_ACTIONS:
self.action = arg
del self.args[i]
break
if not self.action:
raise AnsibleOptionsError("Missing required action")
def execute(self):
"""
Actually runs a child defined method using the execute_<action> pattern
"""
fn = getattr(self, "execute_%s" % self.action)
fn()
def parse(self):
raise Exception("Need to implement!")
def run(self):
if self.options.verbosity > 0:
if C.CONFIG_FILE:
self.display.display("Using %s as config file" % C.CONFIG_FILE)
else:
self.display.display("No config file found; using defaults")
@staticmethod
def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False):
''' prompt for vault password and/or password change '''
vault_pass = None
new_vault_pass = None
try:
if ask_vault_pass:
vault_pass = getpass.getpass(prompt="Vault password: ")
if ask_vault_pass and confirm_vault:
vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
if vault_pass != vault_pass2:
raise errors.AnsibleError("Passwords do not match")
if ask_new_vault_pass:
new_vault_pass = getpass.getpass(prompt="New Vault password: ")
if ask_new_vault_pass and confirm_new:
new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
if new_vault_pass != new_vault_pass2:
raise errors.AnsibleError("Passwords do not match")
except EOFError:
pass
# enforce no newline chars at the end of passwords
if vault_pass:
vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
if new_vault_pass:
new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip()
return vault_pass, new_vault_pass
def ask_passwords(self):
''' prompt for connection and become passwords if needed '''
op = self.options
sshpass = None
becomepass = None
become_prompt = ''
try:
if op.ask_pass:
sshpass = getpass.getpass(prompt="SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % op.become_method.upper()
if sshpass:
sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
else:
become_prompt = "%s password: " % op.become_method.upper()
if op.become_ask_pass:
becomepass = getpass.getpass(prompt=become_prompt)
if op.ask_pass and becomepass == '':
becomepass = sshpass
if becomepass:
becomepass = to_bytes(becomepass)
except EOFError:
pass
return (sshpass, becomepass)
def normalize_become_options(self):
''' this keeps backwards compatibility with sudo/su self.options '''
self.options.become_ask_pass = self.options.become_ask_pass or self.options.ask_sudo_pass or self.options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
self.options.become_user = self.options.become_user or self.options.sudo_user or self.options.su_user or C.DEFAULT_BECOME_USER
if self.options.become:
pass
elif self.options.sudo:
self.options.become = True
self.options.become_method = 'sudo'
elif self.options.su:
self.options.become = True
self.options.become_method = 'su'
def validate_conflicts(self, vault_opts=False, runas_opts=False, fork_opts=False):
''' check for conflicting options '''
op = self.options
if vault_opts:
# Check for vault related conflicts
if (op.ask_vault_pass and op.vault_password_file):
self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
if runas_opts:
# Check for privilege escalation conflicts
if (op.su or op.su_user or op.ask_su_pass) and \
(op.sudo or op.sudo_user or op.ask_sudo_pass) or \
(op.su or op.su_user or op.ask_su_pass) and \
(op.become or op.become_user or op.become_ask_pass) or \
(op.sudo or op.sudo_user or op.ask_sudo_pass) and \
(op.become or op.become_user or op.become_ask_pass):
self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
"and su arguments ('-su', '--su-user', and '--ask-su-pass') "
"and become arguments ('--become', '--become-user', and '--ask-become-pass')"
" are exclusive of each other")
if fork_opts:
if op.forks < 1:
self.parser.error("The number of processes (--forks) must be >= 1")
@staticmethod
def expand_tilde(option, opt, value, parser):
setattr(parser.values, option.dest, os.path.expanduser(value))
@staticmethod
def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, module_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False):
''' create an options parser for most ansible scripts '''
#FIXME: implemente epilog parsing
#OptionParser.format_epilog = lambda self, formatter: self.epilog
# base opts
parser = SortedOptParser(usage, version=CLI.version("%prog"))
parser.add_option('-v','--verbose', dest='verbosity', default=0, action="count",
help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
if inventory_opts:
parser.add_option('-i', '--inventory-file', dest='inventory',
help="specify inventory host file (default=%s)" % C.DEFAULT_HOST_LIST,
default=C.DEFAULT_HOST_LIST, action="callback", callback=CLI.expand_tilde, type=str)
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
if module_opts:
parser.add_option('-M', '--module-path', dest='module_path', default=None,
help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH,
action="callback", callback=CLI.expand_tilde, type=str)
if runtask_opts:
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON", default=[])
if fork_opts:
parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
if vault_opts:
parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE,
dest='vault_password_file', help="vault password file", action="callback",
callback=CLI.expand_tilde, type=str)
parser.add_option('--new-vault-password-file',
dest='new_vault_password_file', help="new vault password file for rekey", action="callback",
callback=CLI.expand_tilde, type=str)
parser.add_option('--output', default=None, dest='output_file',
help='output file name for encrypt or decrypt; use - for stdout')
if subset_opts:
parser.add_option('-t', '--tags', dest='tags', default='all',
help="only run plays and tasks tagged with these values")
parser.add_option('--skip-tags', dest='skip_tags',
help="only run plays and tasks whose tags do not match these values")
if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_option('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
if runas_opts:
# priv user defaults to root later on to enable detecting when this option was given here
parser.add_option('-K', '--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
help='ask for sudo password (deprecated, use become)')
parser.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
help='ask for su password (deprecated, use become)')
parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo',
help="run operations with sudo (nopasswd) (deprecated, use become)")
parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
help='desired sudo user (default=root) (deprecated, use become)')
parser.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true',
help='run operations with su (deprecated, use become)')
parser.add_option('-R', '--su-user', default=None,
help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER)
# consolidated privilege escalation (become)
parser.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
help="run operations with become (nopasswd implied)")
parser.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='string',
help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS)))
parser.add_option('--become-user', default=None, dest='become_user', type='string',
help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
help='ask for privilege escalation password')
if connect_opts:
parser.add_option('-k', '--ask-pass', default=C.DEFAULT_ASK_PASS, dest='ask_pass', action='store_true',
help='ask for connection password')
parser.add_option('--private-key','--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection')
parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
parser.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
parser.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout',
help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
if async_opts:
parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
if check_opts:
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur")
parser.add_option('--syntax-check', dest='syntax', action='store_true',
help="perform a syntax check on the playbook, but do not execute it")
parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check")
if meta_opts:
parser.add_option('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true',
help="run handlers even if a task fails")
parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
help="clear the fact cache")
return parser
@staticmethod
def version(prog):
''' return ansible version '''
result = "{0} {1}".format(prog, __version__)
gitinfo = CLI._gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
result += "\n config file = %s" % C.CONFIG_FILE
result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
return result
@staticmethod
def version_info(gitinfo=False):
''' return full ansible version info '''
if gitinfo:
# expensive call, user with care
ansible_version_string = version('')
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
@staticmethod
def _git_repo_info(repo_path):
''' returns a string containing git branch, commit id and commit date '''
result = None
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path[:-4], gitdir)
except (IOError, AttributeError):
return ''
f = open(os.path.join(repo_path, "HEAD"))
branch = f.readline().split('/')[-1].rstrip("\n")
f.close()
branch_path = os.path.join(repo_path, "refs", "heads", branch)
if os.path.exists(branch_path):
f = open(branch_path)
commit = f.readline()[:10]
f.close()
else:
# detached HEAD
commit = branch[:10]
branch = 'detached HEAD'
branch_path = os.path.join(repo_path, "HEAD")
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36))
else:
result = ''
return result
@staticmethod
def _gitinfo():
basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
repo_path = os.path.join(basedir, '.git')
result = CLI._git_repo_info(repo_path)
submodules = os.path.join(basedir, '.gitmodules')
if not os.path.exists(submodules):
return result
f = open(submodules)
for line in f:
tokens = line.strip().split(' ')
if tokens[0] == 'path':
submodule_path = tokens[2]
submodule_info = CLI._git_repo_info(os.path.join(basedir, submodule_path, '.git'))
if not submodule_info:
submodule_info = ' not found - use git submodule update --init ' + submodule_path
result += "\n {0}: {1}".format(submodule_path, submodule_info)
f.close()
return result
def pager(self, text):
''' find reasonable way to display text '''
# this is a much simpler form of what is in pydoc.py
if not sys.stdout.isatty():
self.display.display(text)
elif 'PAGER' in os.environ:
if sys.platform == 'win32':
self.display.display(text)
else:
self.pager_pipe(text, os.environ['PAGER'])
elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0:
self.pager_pipe(text, 'less')
else:
self.display.display(text)
@staticmethod
def pager_pipe(text, cmd):
''' pipe text through a pager '''
if 'LESS' not in os.environ:
os.environ['LESS'] = CLI.LESS_OPTS
try:
cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=text.encode(sys.stdout.encoding))
except IOError:
pass
except KeyboardInterrupt:
pass
@classmethod
def tty_ify(self, text):
t = self._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
t = self._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
t = self._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
t = self._URL.sub(r"\1", t) # U(word) => word
t = self._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
return t
@staticmethod
def read_vault_password_file(vault_password_file):
"""
Read a vault password from a file or if executable, execute the script and
retrieve password from STDOUT
"""
this_path = os.path.realpath(os.path.expanduser(vault_password_file))
if not os.path.exists(this_path):
raise AnsibleError("The vault password file %s was not found" % this_path)
if is_executable(this_path):
try:
# STDERR not captured to make it easier for users to prompt for input in their scripts
p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
except OSError as e:
raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e))
stdout, stderr = p.communicate()
vault_pass = stdout.strip('\r\n')
else:
try:
f = open(this_path, "rb")
vault_pass=f.read().strip()
f.close()
except (OSError, IOError) as e:
raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e))
return vault_pass
| gpl-3.0 |
ogenstad/ansible | lib/ansible/plugins/action/ironware_config.py | 29 | 4215 | #
# (c) 2017, Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.ironware import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| gpl-3.0 |
SerenityS/Solid_Kernel-Stock-KK | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
gibiansky/tensorflow | tensorflow/contrib/training/python/training/sampling_ops_test.py | 6 | 14802 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.training.python.training import sampling_ops
from tensorflow.python.platform import tf_logging as logging
class StratifiedSampleTest(tf.test.TestCase):
def testGraphBuildAssertionFailures(self):
val = [tf.zeros([1, 3]), tf.ones([1, 5])]
label = tf.constant([1], shape=[1]) # must have batch dimension
probs = [.2] * 5
init_probs = [.1, .3, .1, .3, .2]
batch_size = 16
# Label must have only batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
tf.contrib.training.stratified_sample(
val, tf.zeros([]), probs, batch_size, init_probs, enqueue_many=True)
with self.assertRaises(ValueError):
tf.contrib.training.stratified_sample(
val, tf.zeros([1, 1]), probs, batch_size, init_probs,
enqueue_many=True)
# Label must not be one-hot.
with self.assertRaises(ValueError):
tf.contrib.training.stratified_sample(
val, tf.constant([0, 1, 0, 0, 0]), probs, batch_size, init_probs)
# Data must be list, not singleton tensor.
with self.assertRaises(TypeError):
tf.contrib.training.stratified_sample(
tf.zeros([1, 3]), label, probs, batch_size, init_probs)
# Data must have batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
tf.contrib.training.stratified_sample(
val, tf.constant(1), probs, batch_size, init_probs, enqueue_many=True)
# Batch dimensions on data and labels should be equal.
with self.assertRaises(ValueError):
tf.contrib.training.stratified_sample(
[tf.zeros([2, 1])], label, probs, batch_size, init_probs,
enqueue_many=True)
# Probabilities must be numpy array, python list, or tensor.
with self.assertRaises(ValueError):
tf.contrib.training.stratified_sample(
val, label, 1, batch_size, init_probs)
# Probabilities shape must be fully defined.
with self.assertRaises(ValueError):
tf.contrib.training.stratified_sample(
val, label, tf.placeholder(
tf.float32, shape=[None]), batch_size, init_probs)
# In the rejection sampling case, make sure that probability lengths are
# the same.
with self.assertRaises(ValueError):
tf.contrib.training.stratified_sample(
val, label, [.1] * 10, batch_size, init_probs=[.2] * 5)
# In the rejection sampling case, make sure that zero initial probability
# classes also have zero target probability.
with self.assertRaises(ValueError):
tf.contrib.training.stratified_sample(
val, label, [.2, .4, .4], batch_size, init_probs=[0, .5, .5])
def testRuntimeAssertionFailures(self):
valid_probs = [.2] * 5
valid_labels = [1, 2, 3]
vals = [tf.zeros([3, 1])]
illegal_labels = [
[0, -1, 1], # classes must be nonnegative
[5, 1, 1], # classes must be less than number of classes
[2, 3], # data and label batch size must be the same
]
illegal_probs = [
[.1] * 5, # probabilities must sum to one
[-.5, .5, .5, .4, .1], # probabilities must be non-negative
]
# Set up graph with illegal label vector.
label_ph = tf.placeholder(tf.int32, shape=[None])
probs_ph = tf.placeholder(tf.float32, shape=[5]) # shape must be defined
val_tf, lbl_tf, prob_tf = sampling_ops._verify_input( # pylint: disable=protected-access
vals, label_ph, [probs_ph])
for illegal_label in illegal_labels:
# Run session that should fail.
with self.test_session() as sess:
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run([val_tf, lbl_tf],
feed_dict={label_ph: illegal_label,
probs_ph: valid_probs})
for illegal_prob in illegal_probs:
# Run session that should fail.
with self.test_session() as sess:
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run([prob_tf],
feed_dict={label_ph: valid_labels,
probs_ph: illegal_prob})
def testCanBeCalledMultipleTimes(self):
batch_size = 20
val_input_batch = [tf.zeros([2, 3, 4])]
lbl_input_batch = tf.ones([], dtype=tf.int32)
probs = np.array([0, 1, 0, 0, 0])
batches = tf.contrib.training.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
batches += tf.contrib.training.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
summary_op = tf.contrib.deprecated.merge_summary(
tf.get_collection(tf.GraphKeys.SUMMARIES))
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
sess.run(batches + (summary_op,))
coord.request_stop()
coord.join(threads)
def testRejectionBatchingBehavior(self):
batch_size = 20
input_batch_size = 11
val_input_batch = [tf.zeros([input_batch_size, 2, 3, 4])]
lbl_input_batch = tf.cond(
tf.greater(.5, tf.random_uniform([])),
lambda: tf.ones([input_batch_size], dtype=tf.int32) * 1,
lambda: tf.ones([input_batch_size], dtype=tf.int32) * 3)
probs = np.array([0, .2, 0, .8, 0])
data_batch, labels = tf.contrib.training.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size,
init_probs=[0, .3, 0, .7, 0], enqueue_many=True)
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
sess.run([data_batch, labels])
coord.request_stop()
coord.join(threads)
def testBatchDimensionNotRequired(self):
classes = 5
# Probs must be a tensor, since we pass it directly to _verify_input.
probs = tf.constant([1.0 / classes] * classes)
# Make sure that these vals/labels pairs don't throw any runtime exceptions.
legal_input_pairs = [
(np.zeros([2, 3]), [x % classes for x in range(2)]), # batch dim 2
(np.zeros([4, 15]), [x % classes for x in range(4)]), # batch dim 4
(np.zeros([10, 1]), [x % classes for x in range(10)]), # batch dim 10
]
# Set up graph with placeholders.
vals_ph = tf.placeholder(tf.float32) # completely undefined shape
labels_ph = tf.placeholder(tf.int32) # completely undefined shape
val_tf, labels_tf, _ = sampling_ops._verify_input( # pylint: disable=protected-access
[vals_ph], labels_ph, [probs])
# Run graph to make sure there are no shape-related runtime errors.
for vals, labels in legal_input_pairs:
with self.test_session() as sess:
sess.run([val_tf, labels_tf],
feed_dict={vals_ph: vals,
labels_ph: labels})
def testRejectionDataListInput(self):
batch_size = 20
val_input_batch = [tf.zeros([2, 3, 4]), tf.ones([2, 4]), tf.ones(2) * 3]
lbl_input_batch = tf.ones([], dtype=tf.int32)
probs = np.array([0, 1, 0, 0, 0])
val_list, lbls = tf.contrib.training.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size,
init_probs=[0, 1, 0, 0, 0])
# Check output shapes.
self.assertTrue(isinstance(val_list, list))
self.assertEqual(len(val_list), len(val_input_batch))
self.assertTrue(isinstance(lbls, tf.Tensor))
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
out = sess.run(val_list + [lbls])
coord.request_stop()
coord.join(threads)
# Check output shapes.
self.assertEqual(len(out), len(val_input_batch) + 1)
def normalBehaviorHelper(self, sampler):
# Set up graph.
tf.set_random_seed(1234)
lbl1 = 0
lbl2 = 3
# This cond allows the necessary class queues to be populated.
label = tf.cond(
tf.greater(.5, tf.random_uniform([])), lambda: tf.constant(lbl1),
lambda: tf.constant(lbl2))
val = [np.array([1, 4]) * label]
probs = np.array([.8, 0, 0, .2, 0])
batch_size = 16
data_batch, labels = sampler(val, label, probs, batch_size)
# Run session and keep track of how frequently the labels and values appear.
data_l = []
label_l = []
with self.test_session() as sess:
# Need to initialize variables that keep running total of classes seen.
tf.global_variables_initializer().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for _ in range(20):
[data], lbls = sess.run([data_batch, labels])
data_l.append(data)
label_l.append(lbls)
coord.request_stop()
coord.join(threads)
# First check that the data matches the labels.
for lbl, data in zip(label_l, data_l):
for i in range(batch_size):
self.assertListEqual(list(np.array([1, 4]) * lbl[i]), list(data[i, :]))
# Check that the labels are approximately correct.
expected_label = probs[0] * lbl1 + probs[3] * lbl2
lbl_list = range(len(probs))
lbl_std_dev = np.sqrt(np.sum((np.square(lbl_list - expected_label))))
lbl_std_dev_of_mean = lbl_std_dev / np.sqrt(len(label_l)) # CLT
actual_lbl = np.mean(label_l)
# Tolerance is 3 standard deviations of the mean. According to the central
# limit theorem, this should cover 99.7% of cases. Note that since the seed
# is fixed, for a given implementation, this test will pass or fail 100% of
# the time. This use of assertNear is to cover cases where someone changes
# an implementation detail, which would cause the random behavior to differ.
self.assertNear(actual_lbl, expected_label, 3 * lbl_std_dev_of_mean)
def testRejectionNormalBehavior(self):
initial_p = [.7, 0, 0, .3, 0]
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return tf.contrib.training.stratified_sample(
val,
lbls,
probs,
batch,
init_probs=initial_p,
enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
def testRejectionNormalBehaviorWithOnlineInitPEstimate(self):
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return tf.contrib.training.stratified_sample(
val, lbls, probs, batch, init_probs=None, enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
class RejectionSampleTest(tf.test.TestCase):
def testGraphConstructionFailures(self):
accept_prob_fn = lambda _: tf.constant(1.0)
batch_size = 32
# Data must have batch dimension if `enqueue_many` is `True`.
with self.assertRaises(ValueError):
tf.contrib.training.rejection_sample(
[tf.zeros([])], accept_prob_fn, batch_size, enqueue_many=True)
# Batch dimensions should be equal if `enqueue_many` is `True`.
with self.assertRaises(ValueError):
tf.contrib.training.rejection_sample(
[tf.zeros([5, 1]), tf.zeros([4, 1])], accept_prob_fn, batch_size,
enqueue_many=True)
def testRuntimeFailures(self):
prob_ph = tf.placeholder(tf.float32, [])
accept_prob_fn = lambda _: prob_ph
batch_size = 32
# Set up graph.
tf.set_random_seed(1234)
tf.contrib.training.rejection_sample(
[tf.zeros([])], accept_prob_fn, batch_size, runtime_checks=True,
name='rejection_sample')
prob_tensor = tf.get_default_graph().get_tensor_by_name(
'rejection_sample/prob_with_checks:0')
# Run session that should fail.
with self.test_session() as sess:
for illegal_prob in [-0.1, 1.1]:
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(prob_tensor, feed_dict={prob_ph: illegal_prob})
def testNormalBehavior(self):
tensor_list = [tf.cond(
tf.greater(.5, tf.random_uniform([])),
lambda: tf.constant(1.0),
lambda: tf.constant(2.0))]
accept_prob_fn = lambda x: x[0] - 1.0
batch_size = 10
# Set up graph.
sample = tf.contrib.training.rejection_sample(
tensor_list, accept_prob_fn, batch_size)
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for _ in range(5):
sample_np = sess.run(sample)[0]
self.assertListEqual([2.0] * batch_size, list(sample_np))
coord.request_stop()
coord.join(threads)
class ConditionalBatchTest(tf.test.TestCase):
def testConditionallyEnqueueAndBatch(self):
tf.set_random_seed(1234)
tensor = tf.cond(
tf.greater(.5, tf.random_uniform([])),
lambda: tf.constant(1.0),
lambda: tf.constant(2.0))
keep_input = tf.equal(tensor, 2.0)
batch_size = 4
# Set up the test graph.
[batch] = sampling_ops._conditional_batch([tensor], keep_input, batch_size) # pylint: disable=protected-access
# Check conditional operation.
with self.test_session():
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
batch_np = batch.eval()
coord.request_stop()
coord.join(threads)
# Check that all elements in batch come from tensors with acceptance prob
# 1, so that none come from acceptance prob 0.
self.assertListEqual(list(batch_np), [2.0] * batch_size)
def testConditionallyEnqueueAndBatchTypes(self):
tensor = tf.constant(1.0)
keep_input = tf.constant(True)
batch_size = 4
# Check that output types are the same for 1 and 2-length input lists.
output1 = sampling_ops._conditional_batch([tensor], keep_input, batch_size) # pylint: disable=protected-access
output2 = sampling_ops._conditional_batch( # pylint: disable=protected-access
[tensor, tensor], keep_input, batch_size)
self.assertEqual(type(output1), type(output2))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
nichit93/Implementation-of-TRED-in-ns-3 | examples/tutorial/first.py | 102 | 2128 | # /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation;
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
import ns.applications
import ns.core
import ns.internet
import ns.network
import ns.point_to_point
ns.core.LogComponentEnable("UdpEchoClientApplication", ns.core.LOG_LEVEL_INFO)
ns.core.LogComponentEnable("UdpEchoServerApplication", ns.core.LOG_LEVEL_INFO)
nodes = ns.network.NodeContainer()
nodes.Create(2)
pointToPoint = ns.point_to_point.PointToPointHelper()
pointToPoint.SetDeviceAttribute("DataRate", ns.core.StringValue("5Mbps"))
pointToPoint.SetChannelAttribute("Delay", ns.core.StringValue("2ms"))
devices = pointToPoint.Install(nodes)
stack = ns.internet.InternetStackHelper()
stack.Install(nodes)
address = ns.internet.Ipv4AddressHelper()
address.SetBase(ns.network.Ipv4Address("10.1.1.0"),
ns.network.Ipv4Mask("255.255.255.0"))
interfaces = address.Assign(devices)
echoServer = ns.applications.UdpEchoServerHelper(9)
serverApps = echoServer.Install(nodes.Get(1))
serverApps.Start(ns.core.Seconds(1.0))
serverApps.Stop(ns.core.Seconds(10.0))
echoClient = ns.applications.UdpEchoClientHelper(interfaces.GetAddress(1), 9)
echoClient.SetAttribute("MaxPackets", ns.core.UintegerValue(1))
echoClient.SetAttribute("Interval", ns.core.TimeValue(ns.core.Seconds(1.0)))
echoClient.SetAttribute("PacketSize", ns.core.UintegerValue(1024))
clientApps = echoClient.Install(nodes.Get(0))
clientApps.Start(ns.core.Seconds(2.0))
clientApps.Stop(ns.core.Seconds(10.0))
ns.core.Simulator.Run()
ns.core.Simulator.Destroy()
| gpl-2.0 |
CloudServer/nova | nova/tests/functional/v3/test_instance_usage_audit_log.py | 33 | 2244 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from oslo_config import cfg
from nova.tests.functional.v3 import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class InstanceUsageAuditLogJsonTest(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = "os-instance-usage-audit-log"
# TODO(gmann): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def _get_flags(self):
f = super(InstanceUsageAuditLogJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.instance_usage_audit_log.'
'Instance_usage_audit_log')
return f
def test_show_instance_usage_audit_log(self):
response = self._do_get('os-instance_usage_audit_log/%s' %
urllib.quote('2012-07-05 10:00:00'))
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('inst-usage-audit-log-show-get-resp',
subs, response, 200)
def test_index_instance_usage_audit_log(self):
response = self._do_get('os-instance_usage_audit_log')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('inst-usage-audit-log-index-get-resp',
subs, response, 200)
| apache-2.0 |
hmen89/odoo | addons/account_analytic_plans/wizard/__init__.py | 445 | 1117 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import analytic_plan_create_model
import account_crossovered_analytic
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
3manuek/deeppy | deeppy/dataset/dataset.py | 17 | 2241 | import os
import shutil
import logging
from .util import checksum, download, archive_extract, is_archive, touch
log = logging.getLogger(__name__)
class Dataset(object):
name = None
data_dir = None
_download_checkpoint = '__download_complete'
_unpack_checkpoint = '__unpack_complete'
_install_checkpoint = '__install_complete'
def __init__(self):
raise NotImplementedError()
def _download(self, urls, sha1s=None):
''' Dowload dataset files given by the urls. If sha1s is given, the
downloaded files are checked for correctness. '''
checkpoint = os.path.join(self.data_dir, self._download_checkpoint)
if os.path.exists(checkpoint):
return
if os.path.exists(self.data_dir):
log.info('Incomplete %s exists - restarting download.',
self.data_dir)
shutil.rmtree(self.data_dir)
os.mkdir(self.data_dir)
else:
os.makedirs(self.data_dir)
for i, url in enumerate(urls):
log.info('Downloading %s', url)
filepath = download(url, self.data_dir)
if sha1s is not None:
if sha1s[i] != checksum(filepath):
raise RuntimeError('SHA-1 checksum mismatch for %s.' % url)
touch(checkpoint)
def _unpack(self, separate_dirs=False):
''' Unpack all archive files in data_dir. '''
checkpoint = os.path.join(self.data_dir, self._unpack_checkpoint)
if os.path.exists(checkpoint):
return
to_be_removed = []
for filename in os.listdir(self.data_dir):
filepath = os.path.join(self.data_dir, filename)
if is_archive(filepath):
log.info('Unpacking %s', filepath)
target_dir = os.path.abspath(self.data_dir)
if separate_dirs:
dirname, _ = os.path.splitext(filename)
target_dir = os.path.join(target_dir, dirname)
archive_extract(filepath, target_dir)
to_be_removed.append(filepath)
for filepath in to_be_removed:
os.remove(filepath)
touch(checkpoint)
def _install(self):
pass
| mit |
fredturnerr/SereteliNew | node_modules/grunt-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/generator/eclipse.py | 437 | 11894 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GYP backend that generates Eclipse CDT settings files.
This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML
files that can be imported into an Eclipse CDT project. The XML file contains a
list of include paths and symbols (i.e. defines).
Because a full .cproject definition is not created by this generator, it's not
possible to properly define the include dirs and symbols for each file
individually. Instead, one set of includes/symbols is generated for the entire
project. This works fairly well (and is a vast improvement in general), but may
still result in a few indexer issues here and there.
This generator has no automated tests, so expect it to be broken.
"""
from xml.sax.saxutils import escape
import os.path
import subprocess
import gyp
import gyp.common
import gyp.msvs_emulation
import shlex
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as
# part of the path when dealing with generated headers. This value will be
# replaced dynamically for each configuration.
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
'$SHARED_INTERMEDIATE_DIR'
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
flavor = gyp.common.GetFlavor(params)
default_variables.setdefault('OS', flavor)
if flavor == 'win':
# Copy additional generator configuration data from VS, which is shared
# by the Eclipse generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name, params):
"""Calculate the set of include directories to be used.
Returns:
A list including all the include_dir's specified for every target followed
by any include directories that were added as cflag compiler options.
"""
gyp_includes_set = set()
compiler_includes_list = []
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if config_name in target['configurations']:
config = target['configurations'][config_name]
# Look for any include dirs that were explicitly added via cflags. This
# may be done in gyp files to force certain includes to come at the end.
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
# remove this.
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
cflags = msvs_settings.GetCflags(config_name)
else:
cflags = config['cflags']
for cflag in cflags:
include_dir = ''
if cflag.startswith('-I'):
include_dir = cflag[2:]
if include_dir and not include_dir in compiler_includes_list:
compiler_includes_list.append(include_dir)
# Find standard gyp include dirs.
if config.has_key('include_dirs'):
include_dirs = config['include_dirs']
for shared_intermediate_dir in shared_intermediate_dirs:
for include_dir in include_dirs:
include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
shared_intermediate_dir)
if not os.path.isabs(include_dir):
base_dir = os.path.dirname(target_name)
include_dir = base_dir + '/' + include_dir
include_dir = os.path.abspath(include_dir)
if not include_dir in gyp_includes_set:
gyp_includes_set.add(include_dir)
# Generate a list that has all the include dirs.
all_includes_list = list(gyp_includes_set)
all_includes_list.sort()
for compiler_include in compiler_includes_list:
if not compiler_include in gyp_includes_set:
all_includes_list.append(compiler_include)
# All done.
return all_includes_list
def GetCompilerPath(target_list, target_dicts, data):
"""Determine a command that can be used to invoke the compiler.
Returns:
If this is a gyp project that has explicit make settings, try to determine
the compiler from that. Otherwise, see if a compiler was specified via the
CC_target environment variable.
"""
# First, see if the compiler is configured in make's settings.
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_dict = data[build_file].get('make_global_settings', {})
for key, value in make_global_settings_dict:
if key in ['CC', 'CXX']:
return value
# Check to see if the compiler was specified as an environment variable.
for key in ['CC_target', 'CC', 'CXX']:
compiler = os.environ.get(key)
if compiler:
return compiler
return 'gcc'
def GetAllDefines(target_list, target_dicts, data, config_name, params):
"""Calculate the defines for a project.
Returns:
A dict that includes explict defines declared in gyp files along with all of
the default defines that the compiler uses.
"""
# Get defines declared in the gyp files.
all_defines = {}
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
extra_defines = msvs_settings.GetComputedDefines(config_name)
else:
extra_defines = []
if config_name in target['configurations']:
config = target['configurations'][config_name]
target_defines = config['defines']
else:
target_defines = []
for define in target_defines + extra_defines:
split_define = define.split('=', 1)
if len(split_define) == 1:
split_define.append('1')
if split_define[0].strip() in all_defines:
# Already defined
continue
all_defines[split_define[0].strip()] = split_define[1].strip()
# Get default compiler defines (if possible).
if flavor == 'win':
return all_defines # Default defines already processed in the loop above.
cc_target = GetCompilerPath(target_list, target_dicts, data)
if cc_target:
command = shlex.split(cc_target)
command.extend(['-E', '-dM', '-'])
cpp_proc = subprocess.Popen(args=command, cwd='.',
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cpp_output = cpp_proc.communicate()[0]
cpp_lines = cpp_output.split('\n')
for cpp_line in cpp_lines:
if not cpp_line.strip():
continue
cpp_line_parts = cpp_line.split(' ', 2)
key = cpp_line_parts[1]
if len(cpp_line_parts) >= 3:
val = cpp_line_parts[2]
else:
val = '1'
all_defines[key] = val
return all_defines
def WriteIncludePaths(out, eclipse_langs, include_dirs):
"""Write the includes section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.IncludePaths">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for include_dir in include_dirs:
out.write(' <includepath workspace_path="false">%s</includepath>\n' %
include_dir)
out.write(' </language>\n')
out.write(' </section>\n')
def WriteMacros(out, eclipse_langs, defines):
"""Write the macros section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.Macros">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for key in sorted(defines.iterkeys()):
out.write(' <macro><name>%s</name><value>%s</value></macro>\n' %
(escape(key), escape(defines[key])))
out.write(' </language>\n')
out.write(' </section>\n')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
config_name)
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
# Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the
# SHARED_INTERMEDIATE_DIR. Include both possible locations.
shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'),
os.path.join(toplevel_build, 'gen')]
out_name = os.path.join(toplevel_build, 'eclipse-cdt-settings.xml')
gyp.common.EnsureDirExists(out_name)
out = open(out_name, 'w')
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
out.write('<cdtprojectproperties>\n')
eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File',
'GNU C++', 'GNU C', 'Assembly']
include_dirs = GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name,
params)
WriteIncludePaths(out, eclipse_langs, include_dirs)
defines = GetAllDefines(target_list, target_dicts, data, config_name, params)
WriteMacros(out, eclipse_langs, defines)
out.write('</cdtprojectproperties>\n')
out.close()
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate an XML settings file that can be imported into a CDT project."""
if params['options'].generator_output:
raise NotImplementedError, "--generator_output not implemented for eclipse"
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| mit |
golumn/xhtml2pdf | xhtml2pdf/tags.py | 10 | 20069 | # -*- coding: utf-8 -*-
from reportlab.graphics.barcode import createBarcodeDrawing
from reportlab.lib.pagesizes import A4
from reportlab.lib.units import inch, mm
from reportlab.platypus.doctemplate import NextPageTemplate, FrameBreak
from reportlab.platypus.flowables import Spacer, HRFlowable, PageBreak, Flowable
from reportlab.platypus.frames import Frame
from reportlab.platypus.paraparser import tt2ps, ABag
from xhtml2pdf import xhtml2pdf_reportlab
from xhtml2pdf.util import getColor, getSize, getAlign, dpi96
from xhtml2pdf.xhtml2pdf_reportlab import PmlImage, PmlPageTemplate
import copy
import logging
import re
import warnings
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
log = logging.getLogger("xhtml2pdf")
def deprecation(message):
warnings.warn("<" + message + "> is deprecated!", DeprecationWarning, stacklevel=2)
class pisaTag:
"""
The default class for a tag definition
"""
def __init__(self, node, attr):
self.node = node
self.tag = node.tagName
self.attr = attr
def start(self, c):
pass
def end(self, c):
pass
class pisaTagBODY(pisaTag):
"""
We can also asume that there is a BODY tag because html5lib
adds it for us. Here we take the base font size for later calculations
in the FONT tag.
"""
def start(self, c):
c.baseFontSize = c.frag.fontSize
# print "base font size", c.baseFontSize
class pisaTagTITLE(pisaTag):
def end(self, c):
c.meta["title"] = c.text
c.clearFrag()
class pisaTagSTYLE(pisaTag):
def start(self, c):
c.addPara()
def end(self, c):
c.clearFrag()
class pisaTagMETA(pisaTag):
def start(self, c):
name = self.attr.name.lower()
if name in ("author" , "subject", "keywords"):
c.meta[name] = self.attr.content
class pisaTagSUP(pisaTag):
def start(self, c):
c.frag.super = 1
class pisaTagSUB(pisaTag):
def start(self, c):
c.frag.sub = 1
class pisaTagA(pisaTag):
rxLink = re.compile("^(#|[a-z]+\:).*")
def start(self, c):
attr = self.attr
# XXX Also support attr.id ?
if attr.name:
# Important! Make sure that cbDefn is not inherited by other
# fragments because of a bug in Reportlab!
afrag = c.frag.clone()
# These 3 lines are needed to fix an error with non internal fonts
afrag.fontName = "Helvetica"
afrag.bold = 0
afrag.italic = 0
afrag.cbDefn = ABag(
kind="anchor",
name=attr.name,
label="anchor")
c.fragAnchor.append(afrag)
c.anchorName.append(attr.name)
if attr.href and self.rxLink.match(attr.href):
c.frag.link = attr.href
def end(self, c):
pass
class pisaTagFONT(pisaTag):
# Source: http://www.w3.org/TR/CSS21/fonts.html#propdef-font-size
def start(self, c):
if self.attr["color"] is not None:
c.frag.textColor = getColor(self.attr["color"])
if self.attr["face"] is not None:
c.frag.fontName = c.getFontName(self.attr["face"])
if self.attr["size"] is not None:
size = getSize(self.attr["size"], c.frag.fontSize, c.baseFontSize)
c.frag.fontSize = max(size, 1.0)
def end(self, c):
pass
class pisaTagP(pisaTag):
def start(self, c):
# save the type of tag; it's used in PmlBaseDoc.afterFlowable()
# to check if we need to add an outline-entry
# c.frag.tag = self.tag
if self.attr.align is not None:
#print self.attr.align, getAlign(self.attr.align)
c.frag.alignment = getAlign(self.attr.align)
class pisaTagDIV(pisaTagP): pass
class pisaTagH1(pisaTagP): pass
class pisaTagH2(pisaTagP): pass
class pisaTagH3(pisaTagP): pass
class pisaTagH4(pisaTagP): pass
class pisaTagH5(pisaTagP): pass
class pisaTagH6(pisaTagP): pass
def listDecimal(c):
c.listCounter += 1
return unicode("%d." % c.listCounter)
_bullet = u"\u2022"
_list_style_type = {
"none": u"",
"disc": _bullet,
"circle": _bullet, # XXX PDF has no equivalent
"square": _bullet, # XXX PDF has no equivalent
"decimal": listDecimal,
"decimal-leading-zero": listDecimal,
"lower-roman": listDecimal,
"upper-roman": listDecimal,
"hebrew": listDecimal,
"georgian": listDecimal,
"armenian": listDecimal,
"cjk-ideographic": listDecimal,
"hiragana": listDecimal,
"katakana": listDecimal,
"hiragana-iroha": listDecimal,
"katakana-iroha": listDecimal,
"lower-latin": listDecimal,
"lower-alpha": listDecimal,
"upper-latin": listDecimal,
"upper-alpha": listDecimal,
"lower-greek": listDecimal,
}
class pisaTagUL(pisaTagP):
def start(self, c):
self.counter, c.listCounter = c.listCounter, 0
def end(self, c):
c.addPara()
# XXX Simulate margin for the moment
c.addStory(Spacer(width=1, height=c.fragBlock.spaceAfter))
c.listCounter = self.counter
class pisaTagOL(pisaTagUL):
pass
class pisaTagLI(pisaTag):
def start(self, c):
lst = _list_style_type.get(c.frag.listStyleType or "disc", _bullet)
#log.debug("frag %r", c.copyFrag(
# text=lst,
# bulletFontName=c.getFontName("helvetica"),
# fontName=c.getFontName("helvetica")))
# c.addFrag("")
#frag = ParaFrag()
#frag.fontName = frag.bulletFontName = c.getFontName("helvetica")
#frag.fontSize = c.frag.fontSize
#c.frag.fontName = c.getFontName("helvetica")
frag = copy.copy(c.frag)
#print "###", c.frag.fontName
#frag.fontName = "au_00" # c.getFontName("helvetica")
#frag.bulletFontName = "au_00" # c.getFontName("helvetica")
self.offset = 0
if frag.listStyleImage is not None:
frag.text = u""
f = frag.listStyleImage
if f and (not f.notFound()):
img = PmlImage(
f.getData(),
width=None,
height=None)
img.drawHeight *= dpi96
img.drawWidth *= dpi96
img.pisaZoom = frag.zoom
img.drawWidth *= img.pisaZoom
img.drawHeight *= img.pisaZoom
frag.image = img
self.offset = max(0, img.drawHeight - c.frag.fontSize)
else:
if type(lst) == type(u""):
frag.text = lst
else:
# XXX This should be the recent font, but it throws errors in Reportlab!
frag.text = lst(c)
# XXX This should usually be done in the context!!!
frag.fontName = frag.bulletFontName = tt2ps(frag.fontName, frag.bold, frag.italic)
c.frag.bulletText = [frag]
def end(self, c):
c.fragBlock.spaceBefore += self.offset
#c.fragBlock.bulletText = self.bulletText
#print 999, self.bulletText
# c.addPara()
class pisaTagBR(pisaTag):
def start(self, c):
# print "BR", c.text[-40:]
c.frag.lineBreak = 1
c.addFrag()
c.fragStrip = True
del c.frag.lineBreak
c.force = True
class pisaTagIMG(pisaTag):
def start(self, c):
attr = self.attr
if attr.src and (not attr.src.notFound()):
try:
align = attr.align or c.frag.vAlign or "baseline"
# print "align", align, attr.align, c.frag.vAlign
width = c.frag.width
height = c.frag.height
if attr.width:
width = attr.width * dpi96
if attr.height:
height = attr.height * dpi96
img = PmlImage(
attr.src.getData(),
width=None,
height=None)
img.pisaZoom = c.frag.zoom
img.drawHeight *= dpi96
img.drawWidth *= dpi96
if (width is None) and (height is not None):
factor = getSize(height) / img.drawHeight
img.drawWidth *= factor
img.drawHeight = getSize(height)
elif (height is None) and (width is not None):
factor = getSize(width) / img.drawWidth
img.drawHeight *= factor
img.drawWidth = getSize(width)
elif (width is not None) and (height is not None):
img.drawWidth = getSize(width)
img.drawHeight = getSize(height)
img.drawWidth *= img.pisaZoom
img.drawHeight *= img.pisaZoom
img.spaceBefore = c.frag.spaceBefore
img.spaceAfter = c.frag.spaceAfter
# print "image", id(img), img.drawWidth, img.drawHeight
'''
TODO:
- Apply styles
- vspace etc.
- Borders
- Test inside tables
'''
c.force = True
if align in ["left", "right"]:
c.image = img
c.imageData = dict(
align=align
)
else:
# Important! Make sure that cbDefn is not inherited by other
# fragments because of a bug in Reportlab!
# afrag = c.frag.clone()
valign = align
if valign in ["texttop"]:
valign = "top"
elif valign in ["absmiddle"]:
valign = "middle"
elif valign in ["absbottom", "baseline"]:
valign = "bottom"
afrag = c.frag.clone()
afrag.text = ""
afrag.fontName="Helvetica" # Fix for a nasty bug!!!
afrag.cbDefn = ABag(
kind="img",
image=img, #.getImage(), # XXX Inline?
valign=valign,
fontName="Helvetica",
fontSize=img.drawHeight,
width=img.drawWidth,
height=img.drawHeight)
# print "add frag", id(afrag), img.drawWidth, img.drawHeight
c.fragList.append(afrag)
c.fontSize = img.drawHeight
except Exception: # TODO: Kill catch-all
log.warn(c.warning("Error in handling image"), exc_info=1)
else:
log.warn(c.warning("Need a valid file name!"))
class pisaTagHR(pisaTag):
def start(self, c):
c.addPara()
c.addStory(HRFlowable(
color=self.attr.color,
thickness=self.attr.size,
width=self.attr.get('width', "100%") or "100%",
spaceBefore=c.frag.spaceBefore,
spaceAfter=c.frag.spaceAfter
))
# --- Forms
if 0:
class pisaTagINPUT(pisaTag):
def _render(self, c, attr):
width = 10
height = 10
if attr.type == "text":
width = 100
height = 12
c.addStory(xhtml2pdf_reportlab.PmlInput(attr.name,
type=attr.type,
default=attr.value,
width=width,
height=height,
))
def end(self, c):
c.addPara()
attr = self.attr
if attr.name:
self._render(c, attr)
c.addPara()
class pisaTagTEXTAREA(pisaTagINPUT):
def _render(self, c, attr):
c.addStory(xhtml2pdf_reportlab.PmlInput(attr.name,
default="",
width=100,
height=100))
class pisaTagSELECT(pisaTagINPUT):
def start(self, c):
c.select_options = ["One", "Two", "Three"]
def _render(self, c, attr):
c.addStory(xhtml2pdf_reportlab.PmlInput(attr.name,
type="select",
default=c.select_options[0],
options=c.select_options,
width=100,
height=40))
c.select_options = None
class pisaTagOPTION(pisaTag):
pass
# ============================================
class pisaTagPDFNEXTPAGE(pisaTag):
"""
<pdf:nextpage name="" />
"""
def start(self, c):
# deprecation("pdf:nextpage")
c.addPara()
if self.attr.name:
c.addStory(NextPageTemplate(self.attr.name))
c.addStory(PageBreak())
class pisaTagPDFNEXTTEMPLATE(pisaTag):
"""
<pdf:nexttemplate name="" />
"""
def start(self, c):
# deprecation("pdf:frame")
c.addStory(NextPageTemplate(self.attr["name"]))
class pisaTagPDFNEXTFRAME(pisaTag):
"""
<pdf:nextframe name="" />
"""
def start(self, c):
c.addPara()
c.addStory(FrameBreak())
class pisaTagPDFSPACER(pisaTag):
"""
<pdf:spacer height="" />
"""
def start(self, c):
c.addPara()
c.addStory(Spacer(1, self.attr.height))
class pisaTagPDFPAGENUMBER(pisaTag):
"""
<pdf:pagenumber example="" />
"""
def start(self, c):
c.frag.pageNumber = True
c.addFrag(self.attr.example)
c.frag.pageNumber = False
class pisaTagPDFTOC(pisaTag):
"""
<pdf:toc />
"""
def end(self, c):
c.multiBuild = True
c.addTOC()
class pisaTagPDFFRAME(pisaTag):
"""
<pdf:frame name="" static box="" />
"""
def start(self, c):
deprecation("pdf:frame")
attrs = self.attr
name = attrs["name"]
if name is None:
name = "frame%d" % c.UID()
x, y, w, h = attrs.box
self.frame = Frame(
x, y, w, h,
id=name,
leftPadding=0,
rightPadding=0,
bottomPadding=0,
topPadding=0,
showBoundary=attrs.border)
self.static = False
if self.attr.static:
self.static = True
c.addPara()
self.story = c.swapStory()
else:
c.frameList.append(self.frame)
def end(self, c):
if self.static:
c.addPara()
self.frame.pisaStaticStory = c.story
c.frameStaticList.append(self.frame)
c.swapStory(self.story)
class pisaTagPDFTEMPLATE(pisaTag):
"""
<pdf:template name="" static box="" >
<pdf:frame...>
</pdf:template>
"""
def start(self, c):
deprecation("pdf:template")
attrs = self.attr
#print attrs
name = attrs["name"]
c.frameList = []
c.frameStaticList = []
if c.templateList.has_key(name):
log.warn(c.warning("template '%s' has already been defined", name))
'''
self.oldpagesize = A4 # self._pagesize
self._pagesize = PML_PAGESIZES[attrs.format]
if attrs.orientation is not None:
if attrs.orientation == "landscape":
self._pagesize = landscape(self._pagesize)
elif attrs.orientation == "portrait":
self._pagesize = portrait(self._pagesize)
'''
# self._drawing = PmlPageDrawing(self._pagesize)
def end(self, c):
attrs = self.attr
name = attrs["name"]
if len(c.frameList) <= 0:
log.warn(c.warning("missing frame definitions for template"))
pt = PmlPageTemplate(
id=name,
frames=c.frameList,
pagesize=A4,
)
pt.pisaStaticList = c.frameStaticList
pt.pisaBackgroundList = c.pisaBackgroundList
pt.pisaBackground = self.attr.background
# self._pagesize)
# pt.pml_statics = self._statics
# pt.pml_draw = self._draw
# pt.pml_drawing = self._drawing
# pt.pml_background = attrs.background
# pt.pml_bgstory = self._bgstory
c.templateList[name] = pt
c.template = None
c.frameList = []
c.frameStaticList = []
class pisaTagPDFFONT(pisaTag):
"""
<pdf:fontembed name="" src="" />
"""
def start(self, c):
deprecation("pdf:font")
c.loadFont(self.attr.name, self.attr.src, self.attr.encoding)
class pisaTagPDFBARCODE(pisaTag):
_codeName = {
"I2OF5": "I2of5",
"ITF": "I2of5",
"CODE39": "Standard39",
"EXTENDEDCODE39": "Extended39",
"CODE93": "Standard93",
"EXTENDEDCODE93": "Extended93",
"MSI": "MSI",
"CODABAR": "Codabar",
"NW7": "Codabar",
"CODE11": "Code11",
"FIM": "FIM",
"POSTNET": "POSTNET",
"USPS4S": "USPS_4State",
"CODE128": "Code128",
"EAN13": "EAN13",
"EAN8": "EAN8",
}
class _barcodeWrapper(Flowable):
"""Wrapper for barcode widget
"""
def __init__(self, codeName="Code128", value="", **kw):
self.widget = createBarcodeDrawing(codeName, value=value, **kw)
def draw(self, canvas, xoffset=0, **kw):
# NOTE: `canvas' is mutable, so canvas.restoreState() is a MUST.
canvas.saveState()
canvas.translate(xoffset, 0)
self.widget.canv = canvas
self.widget.draw()
canvas.restoreState()
def wrap(self, aW, aH):
return self.widget.wrap(aW, aH)
def start(self, c):
attr = self.attr
codeName = attr.type or "Code128"
codeName = pisaTagPDFBARCODE._codeName[codeName.upper().replace("-", "")]
humanReadable = bool(attr.humanreadable)
barWidth = attr.barwidth or 0.01*inch
barHeight = attr.barheight or 0.5*inch
fontName = c.getFontName("OCRB10,OCR-B,OCR B,OCRB") # or "Helvetica"
fontSize = 2.75*mm
# Assure minimal size.
if codeName in ("EAN13", "EAN8"):
barWidth = max(barWidth, 0.264*mm)
fontSize = max(fontSize, 2.75*mm)
else: # Code39 etc.
barWidth = max(barWidth, 0.0075*inch)
#barHeight = max(barHeight, 25.93*mm)
barcode = pisaTagPDFBARCODE._barcodeWrapper(
codeName=codeName,
value=attr.value,
barWidth=barWidth,
barHeight=barHeight,
humanReadable=humanReadable,
fontName=fontName,
fontSize=fontSize,
)
width, height = barcode.wrap(c.frag.width, c.frag.height)
#barcode.spaceBefore = c.frag.spaceBefore
#barcode.spaceAfter = c.frag.spaceAfter
c.force = True
valign = attr.align or c.frag.vAlign or "baseline"
if valign in ["texttop"]:
valign = "top"
elif valign in ["absmiddle"]:
valign = "middle"
elif valign in ["absbottom", "baseline"]:
valign = "bottom"
afrag = c.frag.clone()
afrag.text = ""
afrag.fontName = fontName
afrag.cbDefn = ABag(
kind="barcode",
barcode=barcode,
width=width,
height=height,
valign=valign,
)
c.fragList.append(afrag)
| apache-2.0 |
Grirrane/odoo | addons/stock/wizard/orderpoint_procurement.py | 8 | 2776 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# Order Point Method:
# - Order if the virtual stock of today is bellow the min of the defined order point
#
import threading
from openerp.osv import fields,osv
from openerp.api import Environment
class procurement_compute(osv.osv_memory):
_name = 'procurement.orderpoint.compute'
_description = 'Compute Minimum Stock Rules'
def _procure_calculation_orderpoint(self, cr, uid, ids, context=None):
"""
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
"""
with Environment.manage():
proc_obj = self.pool.get('procurement.order')
#As this function is in a new thread, I need to open a new cursor, because the old one may be closed
new_cr = self.pool.cursor()
user_obj = self.pool.get('res.users')
company_id = user_obj.browse(new_cr, uid, uid, context=context).company_id.id
proc_obj._procure_orderpoint_confirm(new_cr, uid, use_new_cursor=new_cr.dbname, company_id = company_id, context=context)
#close the new cursor
new_cr.close()
return {}
def procure_calculation(self, cr, uid, ids, context=None):
"""
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
"""
threaded_calculation = threading.Thread(target=self._procure_calculation_orderpoint, args=(cr, uid, ids, context))
threaded_calculation.start()
return {'type': 'ir.actions.act_window_close'}
| agpl-3.0 |
hanicker/odoo | addons/hr_expense/report/__init__.py | 380 | 1071 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_expense_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ivanlmj/Android-DNSSL | Flask/lib/python2.7/site-packages/pip/vcs/git.py | 473 | 7898 | import tempfile
import re
import os.path
from pip.util import call_subprocess
from pip.util import display_path, rmtree
from pip.vcs import vcs, VersionControl
from pip.log import logger
from pip.backwardcompat import url2pathname, urlparse
urlsplit = urlparse.urlsplit
urlunsplit = urlparse.urlunsplit
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = ('git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file')
bundle_file = 'git-clone.txt'
guide = ('# This was a Git repo; to make it a repo again run:\n'
'git init\ngit remote add origin %(url)s -f\ngit checkout %(rev)s\n')
def __init__(self, url=None, *args, **kwargs):
# Works around an apparent Git bug
# (see http://article.gmane.org/gmane.comp.version-control.git/146500)
if url:
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = initial_slashes + url2pathname(path).replace('\\', '/').lstrip('/')
url = urlunsplit((scheme, netloc, newpath, query, fragment))
after_plus = scheme.find('+') + 1
url = scheme[:after_plus] + urlunsplit((scheme[after_plus:], netloc, newpath, query, fragment))
super(Git, self).__init__(url, *args, **kwargs)
def parse_vcs_bundle_file(self, content):
url = rev = None
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
url_match = re.search(r'git\s*remote\s*add\s*origin(.*)\s*-f', line)
if url_match:
url = url_match.group(1).strip()
rev_match = re.search(r'^git\s*checkout\s*-q\s*(.*)\s*', line)
if rev_match:
rev = rev_match.group(1).strip()
if url and rev:
return url, rev
return None, None
def export(self, location):
"""Export the Git repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
if not location.endswith('/'):
location = location + '/'
call_subprocess(
[self.cmd, 'checkout-index', '-a', '-f', '--prefix', location],
filter_stdout=self._filter, show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def check_rev_options(self, rev, dest, rev_options):
"""Check the revision options before checkout to compensate that tags
and branches may need origin/ as a prefix.
Returns the SHA1 of the branch or tag if found.
"""
revisions = self.get_refs(dest)
origin_rev = 'origin/%s' % rev
if origin_rev in revisions:
# remote branch
return [revisions[origin_rev]]
elif rev in revisions:
# a local tag or branch name
return [revisions[rev]]
else:
logger.warn("Could not find a tag or branch '%s', assuming commit." % rev)
return rev_options
def switch(self, dest, url, rev_options):
call_subprocess(
[self.cmd, 'config', 'remote.origin.url', url], cwd=dest)
call_subprocess(
[self.cmd, 'checkout', '-q'] + rev_options, cwd=dest)
self.update_submodules(dest)
def update(self, dest, rev_options):
# First fetch changes from the default remote
call_subprocess([self.cmd, 'fetch', '-q'], cwd=dest)
# Then reset to wanted revision (maby even origin/master)
if rev_options:
rev_options = self.check_rev_options(rev_options[0], dest, rev_options)
call_subprocess([self.cmd, 'reset', '--hard', '-q'] + rev_options, cwd=dest)
#: update submodules
self.update_submodules(dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to %s)' % rev
else:
rev_options = ['origin/master']
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Cloning %s%s to %s' % (url, rev_display, display_path(dest)))
call_subprocess([self.cmd, 'clone', '-q', url, dest])
#: repo may contain submodules
self.update_submodules(dest)
if rev:
rev_options = self.check_rev_options(rev, dest, rev_options)
# Only do a checkout if rev_options differs from HEAD
if not self.get_revision(dest).startswith(rev_options[0]):
call_subprocess([self.cmd, 'checkout', '-q'] + rev_options, cwd=dest)
def get_url(self, location):
url = call_subprocess(
[self.cmd, 'config', 'remote.origin.url'],
show_stdout=False, cwd=location)
return url.strip()
def get_revision(self, location):
current_rev = call_subprocess(
[self.cmd, 'rev-parse', 'HEAD'], show_stdout=False, cwd=location)
return current_rev.strip()
def get_refs(self, location):
"""Return map of named refs (branches or tags) to commit hashes."""
output = call_subprocess([self.cmd, 'show-ref'],
show_stdout=False, cwd=location)
rv = {}
for line in output.strip().splitlines():
commit, ref = line.split(' ', 1)
ref = ref.strip()
ref_name = None
if ref.startswith('refs/remotes/'):
ref_name = ref[len('refs/remotes/'):]
elif ref.startswith('refs/heads/'):
ref_name = ref[len('refs/heads/'):]
elif ref.startswith('refs/tags/'):
ref_name = ref[len('refs/tags/'):]
if ref_name is not None:
rv[ref_name] = commit.strip()
return rv
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('git:'):
repo = 'git+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
refs = self.get_refs(location)
# refs maps names to commit hashes; we need the inverse
# if multiple names map to a single commit, this arbitrarily picks one
names_by_commit = dict((commit, ref) for ref, commit in refs.items())
if current_rev in names_by_commit:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, names_by_commit[current_rev])
else:
full_egg_name = '%s-dev' % egg_project_name
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
def get_url_rev(self):
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes doesn't
work with a ssh:// scheme (e.g. Github). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
if not '://' in self.url:
assert not 'file:' in self.url
self.url = self.url.replace('git+', 'git+ssh://')
url, rev = super(Git, self).get_url_rev()
url = url.replace('ssh://', '')
else:
url, rev = super(Git, self).get_url_rev()
return url, rev
def update_submodules(self, location):
if not os.path.exists(os.path.join(location, '.gitmodules')):
return
call_subprocess([self.cmd, 'submodule', 'update', '--init', '--recursive', '-q'],
cwd=location)
vcs.register(Git)
| gpl-2.0 |
emacsway/django-oembed | oembed/models.py | 1 | 1251 | import datetime
from django.db import models
from django.utils import simplejson
from django.utils.translation import ugettext_lazy as _
JSON = 1
XML = 2
FORMAT_CHOICES = (
(JSON, "JSON"),
(XML, "XML"),
)
class ProviderRule(models.Model):
name = models.CharField(_("name"), max_length=128, null=True, blank=True)
regex = models.CharField(_("regex"), max_length=2000)
endpoint = models.CharField(_("endpoint"), max_length=2000)
format = models.IntegerField(_("format"), choices=FORMAT_CHOICES)
def __unicode__(self):
return self.name or self.endpoint
class StoredOEmbed(models.Model):
match = models.TextField(_("match"))
max_width = models.IntegerField(_("max width"))
max_height = models.IntegerField(_("max height"))
html = models.TextField(_("html"))
json = models.TextField(_("json"))
date_added = models.DateTimeField(
_("date added"), default=datetime.datetime.now)
class Meta:
ordering = ('-max_width',) # larger ones take precedence
def __unicode__(self):
return self.match
def get_json(self, name):
""" Convenience for JSON properties; e.g. get_json('thumbnail_url') """
return simplejson.loads(self.json).get(name, None)
| bsd-3-clause |
lupien/pyHegel | pyHegel/instruments/agilent_smu.py | 1 | 68624 | # -*- coding: utf-8 -*-
########################## Copyrights and license ############################
# #
# Copyright 2019-2019 Christian Lupien <christian.lupien@usherbrooke.ca> #
# #
# This file is part of pyHegel. http://github.com/lupien/pyHegel #
# #
# pyHegel is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# pyHegel is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with pyHegel. If not, see <http://www.gnu.org/licenses/>. #
# #
##############################################################################
from __future__ import absolute_import
import numpy as np
import time
import types
from ..instruments_base import visaInstrument, visaInstrumentAsync,\
BaseDevice, scpiDevice, MemoryDevice, ReadvalDev,\
ChoiceMultiple, Choice_bool_OnOff, _repr_or_string,\
quoted_string, quoted_list, quoted_dict, ChoiceLimits,\
ChoiceStrings, ChoiceDevDep, ChoiceDev, ChoiceDevSwitch, ChoiceIndex,\
decode_float64, decode_float64_avg, decode_float64_meanstd,\
decode_uint16_bin, _decode_block_base, decode_float64_2col,\
decode_complex128, sleep, locked_calling, visa_wrap, _encode_block,\
dict_improved, _general_check, _tostr_helper, ChoiceBase, ProxyMethod,\
OrderedDict
from ..instruments_registry import register_instrument, register_usb_name, register_idn_alias
#######################################################
## Agilent E5270B mainframe with E5281B precision medium power SMU modules
#######################################################
# decorator to cache values for 100 ms
def cache_result(func):
def wrapped_func(self, *args, **kwargs):
last, cache, prev_args, prev_kwargs = self._wrapped_cached_results.get(func, (None, None, None, None))
now = time.time()
if last is None or now - last > 0.1 or args != prev_args or kwargs != prev_kwargs:
#print 'Updating cache'
cache = func(self, *args, **kwargs)
self._wrapped_cached_results[func] = (now, cache, args, kwargs)
return cache
wrapped_func._internal_func = func
return wrapped_func
class ProxyMethod_cached(ProxyMethod):
def __init__(self, bound_method, *args, **kwargs):
super(ProxyMethod_cached, self).__init__(bound_method, *args, **kwargs)
try:
self._internal_func = bound_method._internal_func
except AttributeError:
pass
class MemoryDevice_update(MemoryDevice):
def __init__(self, update_func, *args, **kwargs):
""" update_func is called after a set to update the instrument
nch, when set is the number of channels of data to save internally.
With this, it enables the use of ch as an option for set/get
"""
self._update_func = func_or_proxy(update_func)
nch = self._nch = kwargs.pop('nch', None)
super(MemoryDevice_update, self).__init__(*args, **kwargs)
if nch is not None:
val = self.getcache(local=True)
self._internal_vals = [val]*nch
def _ch_helper(self, ch=None):
args = ()
if self._nch is not None:
ch = self.instr._ch_helper(ch)
args += (ch, )
elif ch is not None:
raise ValueError(self.perror('You cannnot specify a channel for this device.'))
return ch, args
def _getdev(self, ch=None):
ch, args = self._ch_helper(ch)
if ch is None:
return super(MemoryDevice_update, self)._getdev(self)
return self._internal_vals[ch-1]
def _setdev(self, val, ch=None):
ch, args = self._ch_helper(None) # Channel already changed in check
if ch is not None:
self._internal_vals[ch-1] = val
super(MemoryDevice_update, self)._setdev(val)
if self._update_func is not None:
self._update_func(*args)
def _checkdev(self, val, ch=None):
ch, args = self._ch_helper(ch)
super(MemoryDevice_update, self)._checkdev(val)
def func_or_proxy(func):
if isinstance(func, types.MethodType):
if func.im_self is not None:
return ProxyMethod_cached(func)
return func
class CommonDevice(BaseDevice):
# This is hard coded to use
# self.instr._ch_helper
# self._reset_wrapped_cache
# self.choices.tostr or self.type
def __init__(self, subfunc, getfunc, setstr, *args, **kwargs):
"""
Need subfunc (called as subfunc(); with not parameters) and cache reset after set.
getfunc is called as getfunc(subfunc(), ch) (ch is present only if ch_mode option is not None)
ch_mode can be False(default) or True
setstr is the string to set with the {val} and {ch} arguments properly placed.
or it can be a function that will be called as setstr(self, val, ch) (again ch only present if required)
You need to set either a type or a choice.
"""
self._ch_mode = kwargs.pop('ch_mode', False)
self.type = kwargs.pop('type', None)
self._getfunc = func_or_proxy(getfunc)
self._subfunc = func_or_proxy(subfunc)
super(CommonDevice, self).__init__(*args, **kwargs)
if not isinstance(setstr, basestring):
setstr = func_or_proxy(setstr)
self._setdev_p = setstr
self._getdev_p = True
if self.choices is not None and isinstance(self.choices, ChoiceBase):
self.type = self.choices
def _ch_helper(self, ch=None):
args = ()
if self._ch_mode:
ch = self.instr._ch_helper(ch)
args += (ch, )
elif ch is not None:
raise ValueError(self.perror('You cannnot specify a channel for this device.'))
return ch, args
def _getdev(self, ch=None):
ch, args = self._ch_helper(ch)
subval = self._subfunc()
args = (subval,) + args
return self._getfunc(*args)
def _setdev(self, val, ch=None):
ch, args = self._ch_helper(None) # Channel already changed in check
if isinstance(self._setdev_p, basestring):
kwargs = dict(val=_tostr_helper(val, self.type))
if ch is not None:
kwargs['ch'] = '%i'%ch
outstr = self._setdev_p.format(**kwargs)
self.instr.write(outstr)
else:
args = (self, val) + args
self._setdev_p(*args)
self.instr._reset_wrapped_cache(self._subfunc)
def _checkdev(self, val, ch=None):
ch, args = self._ch_helper(ch)
super(CommonDevice, self)._checkdev(val)
#@register_instrument('Agilent Technologies', 'E5270B', 'B.01.13')
@register_instrument('Agilent Technologies', 'E5270B', alias='E5270B SMU')
class agilent_SMU(visaInstrumentAsync):
"""
This is to control the E5281B precision medium power SMU modules within
an E5270B mainframe.
"""
def __init__(self, *args, **kwargs):
self._wrapped_cached_results = {}
super(agilent_SMU, self).__init__(*args, **kwargs)
def init(self, full=False):
self.write('FMT21')
self.calibration_auto_en.set(False)
#self.sendValueToOther('Auto Calibration Enable', False)
# Calibration is performed every 30 min after all outputs are off.
self.remote_display_en.set(True)
#super(agilent_SMU, self).init(full=full) # don't use this, it sets *esr which does not exist for SMU
# self.clear() # SMU does not have *cls
self.write('*sre 0') # disable trigger (we enable it only when needed)
self._async_trigger_helper_string = None
def _async_trigger_helper(self):
async_string = self._async_trigger_helper_string
if async_string is None:
return
self._async_trig_current_data = None
self.write(async_string)
def _async_cleanup_after(self):
self.write('*sre 0') # disable trigger on data ready to prevent unread status byte from showing up
super(agilent_SMU, self)._async_cleanup_after()
def _async_detect(self, max_time=.5): # 0.5 s max by default
async_string = self._async_trigger_helper_string
if async_string is None:
return True
ret = super(agilent_SMU, self)._async_detect(max_time)
if not ret:
# This cycle is not finished
return ret
# we got a trigger telling data is available. so read it, before we turn off triggering in cleanup
data = self.read()
self._async_trig_current_data = data
return ret
@locked_calling
def _async_trig(self):
async_string = self._async_trigger_helper_string
if async_string != '*cal?':
if self.measurement_spot_en.get():
async_string = None
else:
async_string = 'XE'
self.write('BC') # empty buffer
# Trigger on Set Ready. This generates an event which will need to be cleaned up.
# *opc? is used to make sure we waited long enough to see the event if it was to occur.
# Note that the event is not always detected by NI autopoll so this is why
# we wait and then empty the buffer of all/any status.
# (see details in comment section below to class code.)
self.ask('*sre 16;*opc?')
# absorb all status bytes created.
#i=0
while self.read_status_byte()&0x40:
# i += 1
pass
# print 'skipped %i'%i
self._async_trigger_helper_string = async_string
super(agilent_SMU, self)._async_trig()
def _get_esr(self):
# does not have esr register
return 0
def get_error(self):
errors = self.ask('ERR?')
errn = [int(s) for s in errors.split(',')]
errm = ['%i: %s'%(e, self.ask('EMG? %i'%e)) for e in errn]
return ', '.join(errm)
@locked_calling
def _current_config(self, dev_obj=None, options={}):
opts = []
conf_gen = self.conf_general()
opts += ['conf_general=%s'%conf_gen]
opts += ['conf_ch=%s'%self.conf_ch()]
opts += ['conf_integration=%s'%self.conf_integration()]
if not conf_gen['measurement_spot_en']:
opts += ['set_mode=%s'%self.set_mode()]
return opts+self._conf_helper(options)
def _reset_wrapped_cache(self, func):
self._wrapped_cached_results[func._internal_func] = (None, None, None, None)
@locked_calling
def reset(self):
self.write('*rst')
self.init()
@locked_calling
def perform_calibration(self):
prev_str = self._async_trigger_helper_string
try:
self._async_trigger_helper_string = '*cal?'
self.run_and_wait()
res = int(self._async_trig_current_data)
finally:
self._async_trigger_helper_string = prev_str
del self._async_trig_current_data
if res != 0:
raise RuntimeError(self.perror('Calibration failed (at least one module failed). Returned value is %i'%res))
def _fetch_opt_helper(self, chs=None, auto='all'):
mode = 'spot'
if not self.measurement_spot_en.get():
mode = self.set_mode().mode
full_chs = [[c, 'ch'] for c in self.set_mode()['channels']]
return full_chs, auto, mode
auto = auto.lower()
if auto not in ['all', 'i', 'v']:
raise ValueError(self.perror("Invalid auto setting"))
if chs is None:
chs = [i+1 for i,v in enumerate(self._get_enabled_state()) if v]
if len(chs) == 0:
raise RuntimeError(self.perror('All channels are off so cannot fetch.'))
if not isinstance(chs, (list, tuple, np.ndarray)):
chs = [chs]
full_chs = []
for ch in chs:
if isinstance(ch, basestring):
meas = ch[0].lower()
c = int(ch[1:])
if meas not in ['v', 'i']:
raise ValueError(self.perror("Invalid measurement requested, should be 'i' or 'v'"))
if c not in self._valid_ch:
raise ValueError(self.perror('Invalid channel requested'))
full_chs.append([c, meas])
else:
if ch not in self._valid_ch:
raise ValueError(self.perror('Invalid channel requested'))
if auto in ['all', 'i']:
full_chs.append([ch, 'i'])
if auto in ['all', 'v']:
full_chs.append([ch, 'v'])
return full_chs, auto, mode
def _fetch_getformat(self, **kwarg):
chs = kwarg.get('chs', None)
auto = kwarg.get('auto', 'all')
status = kwarg.get('status', False)
xaxis = kwarg.get('xaxis', True)
full_chs, auto, mode = self._fetch_opt_helper(chs, auto)
multi = []
graph = []
for i, (c, m) in enumerate(full_chs):
base = '%s%i'%(m, c)
if status:
multi.extend([base, base+'_stat'])
graph.append(2*i)
else:
multi.append(base)
graph.append(i)
if mode == 'stair':
graph = []
if xaxis:
multi = ['force']+multi
multi = tuple(multi)
fmt = self.fetch._format
fmt.update(multi=multi, graph=graph)
return BaseDevice.getformat(self.fetch, **kwarg)
def _fetch_getdev(self, chs=None, auto='all', status=False, xaxis=True):
"""
auto/chs can are only used when measurement_spot_en is True
auto can be: 'all' (both V and I), 'I' or 'V' to get just one,
force/compliance to get the force value (source) or
the complicance value
auto is used when chs is None (all enabled channels)
or chs is a list of channel numbers.
Otherwise, chs can also use strings like 'v1' to read the voltage of channel 1
'i2' to read the current of channel 2.
status when True, adds the status of every reading to the return value.
xaxis when True and when getting stair data, will add the xaxis as a first column
"""
full_chs, auto, mode = self._fetch_opt_helper(chs, auto)
if mode != 'spot':
try:
data = self._async_trig_current_data
if mode == 'stair':
x_data = self._x_axis
except AttributeError:
raise RuntimeError(self.perror('No data is available. Probably prefer to use readval.'))
data = data.split(',')
# _parse_data returns: value, channel, status, type
ret = map(self._parse_data, data)
if status:
ret = map(lambda x: [x[0], x[2]], ret)
else:
ret = map(lambda x: x[0], ret)
ret = np.array(ret)
if status and mode == 'single':
ret.shape = (-1, 2)
elif mode == 'stair':
N = len(x_data)
ret.shape = (N, -1)
if xaxis:
ret = np.concatenate([x_data[:, None], ret], axis=1)
ret = ret.T
return ret
# TODO, do run and wait for long TI/TV?
# The longest measurement time for TI/TV seems to be for PLC mode (100/50 or 100/60) so a max of 2s.
# so just use ask? for short times and run_and_wait (note that it needs to behave properly under async.)
ret = []
ch_orig = self.current_channel.get()
for ch, meas in full_chs:
if meas == 'v':
val = self.measV.get(ch=ch)
else:
val = self.measI.get(ch=ch)
ret.append(val)
if status:
ret.append(self.meas_last_status.get())
self.current_channel.set(ch_orig)
ret = np.array(ret)
if status:
ret.shape = (-1, 2)
return ret
#self._async_trigger_helper_string = 'XE'
#self.run_and_wait()
#res = self._read_after_wait()
def _ch_helper(self, ch):
curr_ch = self.current_channel
if ch is None:
ch = curr_ch.get()
else:
curr_ch.set(ch)
return ch
def _level_comp_check_helper(self, fnc, val, comp=False):
if fnc.mode == 'disabled':
raise RuntimeError(self.perror('The output is currently disabled.'))
mode = fnc.mode
if comp:
mode = 'current' if mode == 'voltage' else 'voltage'
if mode == 'current':
_general_check(val, min=-.1, max=.1)
else:
_general_check(val, min=-100, max=100)
def _function_getdev(self, ch=None):
""" Possible values are 'voltage', 'current', 'disabled'
ch is the option to select the channel number.
compliance can be use to set the compliance.
It defaults to the current one when staying in the same state.
When switching function it is 0.1 mA for voltage and 0.1 V for current.
Also will update the force/compliance range and polarity according to:
voltage_range_mode, voltage_range, current_range_mode, current_range
compliance_polarity_auto_en
"""
ch = self._ch_helper(ch)
fnc = self._get_function_cached(ch)
return fnc.mode
def _function_setdev(self, val, ch=None, compliance=None):
ch = self._ch_helper(ch)
if val == 'disabled':
self.write('CL %i'%ch)
else:
# When channel was off, this always enables voltage mode.
self.write('CN %i'%ch)
fnc = self._get_function_cached(ch)
if compliance is not None:
fnc.compliance_val = compliance
if fnc.mode != val:
fnc.level = 0.
if compliance is None:
if val == 'voltage':
fnc.compliance_val = 0.1e-3 # A
else: # current
fnc.compliance_val = 0.1 # V
fnc.mode = val
self._set_level_comp(ch, fnc)
self._reset_wrapped_cache(self._get_function_cached)
def _function_checkdev(self, val, ch=None, compliance=None):
ch = self._ch_helper(ch)
BaseDevice._checkdev(self.function, val)
if val != 'disabled' and compliance is not None:
fnc = dict_improved(mode=val)
self._level_comp_check_helper(fnc, compliance, comp=True)
def _level_getdev(self, ch=None):
""" ch is the option to select the channel number.
if inactive, it returns 0
"""
ch = self._ch_helper(ch)
fnc = self._get_function_cached(ch)
return fnc.level
def _level_setdev(self, val, ch=None):
ch = self._ch_helper(None) # Channel already changed in check
fnc = self._get_function_cached(ch)
self._set_level_comp(ch, fnc, level=val)
self._reset_wrapped_cache(self._get_function_cached)
def _level_checkdev(self, val, ch=None):
ch = self._ch_helper(ch)
fnc = self._get_function_cached(ch)
self._level_comp_check_helper(fnc, val, comp=False)
def _compliance_getdev(self, ch=None):
""" ch is the option to select the channel number.
a current compliance of 0 is changed to 1 pA.
"""
ch = self._ch_helper(ch)
fnc = self._get_function_cached(ch)
return fnc.compliance_val
def _compliance_setdev(self, val, ch=None):
ch = self._ch_helper(None) # Channel already changed in check
fnc = self._get_function_cached(ch)
self._set_level_comp(ch, fnc, comp=val)
self._reset_wrapped_cache(self._get_function_cached)
def _compliance_checkdev(self, val, ch=None):
ch = self._ch_helper(ch)
fnc = self._get_function_cached(ch)
self._level_comp_check_helper(fnc, val, comp=True)
def _set_level_comp(self, ch, fnc=None, level=None, comp=None):
if fnc is None:
fnc = self._get_function_cached(ch)
if fnc.mode == 'disabled':
# We might get here when using range_voltage and company
# silent abort
return
if level is None:
level = fnc.level
if comp is None:
comp = fnc.compliance_val
mode = fnc.mode
comp_mode = 'current' if mode == 'voltage' else 'voltage'
sRange = self._conv_range(mode)
sCompRange = self._conv_range(comp_mode)
sCompPolarity = '0' if self.compliance_polarity_auto_en.get() else '1'
root = dict(voltage='DV', current='DI')[mode]
# Use %.7e since instruments chop the resolution instead of rounding it (we get 99.99 instead of 100 sometimes)
self.write(root+"%i,%s,%.7e,%.7e,%s,%s"%(ch, sRange, level, comp, sCompPolarity, sCompRange))
def _conv_range(self, signal='current'):
if signal == 'current':
rg = self.range_current
else:
rg = self.range_voltage
val = rg.get()
sVal = rg.choices.tostr(val)
return sVal
def get_status(self):
ret = self.ask('LOP?')
if not ret.startswith('LOP'):
raise RuntimeError(self.perror('Problem reading the status'))
ret = ret[3:]
ret = ret.split(',')
conv = {'00':'output off', '01':'force voltage', '02':'force positive current', '03':'force negative current',
'11':'compliance voltage', '12':'compliance positive current', '13':'compliance negative current'}
return [conv[s] for s in ret]
def _active_range_current_getdev(self, ch=None):
""" ch is the option to select the channel number.
This is the force or compliance range. Not the measurement one.
"""
ch = self._ch_helper(ch)
fnc = self._get_function(ch)
return fnc.active_Irange
def _active_range_voltage_getdev(self, ch=None):
""" ch is the option to select the channel number.
This is the force or compliance range. Not the measurement one.
"""
ch = self._ch_helper(ch)
fnc = self._get_function(ch)
return fnc.active_Vrange
def _measIV_helper(self, voltage, ch, range, rgdev):
ch = self._ch_helper(ch) # this set ch for the next entries
if range is None:
if self.range_meas_use_compliance_en.get():
range = 'comp'
else:
range = rgdev.get()
else:
if not (range == 'comp' or range in rgdev.choices):
raise ValueError(self.perror('Invalid range selected'))
quest = 'TV' if voltage else 'TI'
quest += '%i'%ch
if range != 'comp':
quest += ',%s'%rgdev.choices.tostr(range)
result_str = self.ask(quest)
value, channel, status, type = self._parse_data(result_str)
if type is not None and type != {True:'V', False:'I'}[voltage]:
raise RuntimeError(self.perror('Read back the wrong signal type'))
if channel is not None and channel != ch:
raise RuntimeError(self.perror('Read back the wrong channel'))
self.meas_last_status.set(status)
return value
def _measV_getdev(self, ch=None, range=None):
""" This returns the spot measurement.
ch is the option to select the channel number.
specifying range does not change the other devices so the effect is temporary.
range will only be effective for the compliance measurement.
For force side measurement it always use the force channel range.
range is range_voltage_meas/range_meas_use_compliance_en if None
to specify complicance range use: 'comp'
otherwise use the same entries as range_voltage_meas
"""
return self._measIV_helper(voltage=True, ch=ch, range=range, rgdev=self.range_voltage_meas)
def _measI_getdev(self, ch=None, range=None):
""" This returns the spot measurement.
ch is the option to select the channel number.
specifying range does not change the other devices so the effect is temporary.
range will only be effective for the compliance measurement.
For force side measurement it always use the force channel range.
range is range_current_meas/range_meas_use_compliance_en if None
to specify complicance range use: 'comp'
otherwise use the same entries as range_voltage_meas
"""
return self._measIV_helper(voltage=False, ch=ch, range=range, rgdev=self.range_current_meas)
def _integration_set_helper(self, speed=True, mode=None, time=None):
prev_result = self._get_avg_time_and_autozero()
if speed:
prev_result = prev_result['high_speed']
base = 'AIT 0,%s,%i'
else:
prev_result = prev_result['high_res']
base = 'AIT 1,%s,%i'
if mode is None:
mode = prev_result[0]
if time is None:
time = prev_result[1]
if mode == 'plc':
time = min(time, 100) # limit to 100.
mode = self._integ_choices.tostr(mode)
self.write(base%(mode, time))
def conf_general(self, autozero=None, remote_display=None, auto_calib=None):
para_dict = dict(autozero=self.auto_zero_en,
remote_display=self.remote_display_en,
auto_calib=self.calibration_auto_en,
measurement_spot_en=self.measurement_spot_en )
params = locals()
if all(params.get(k) is None for k in para_dict):
return {k:dev.get() for k, dev in para_dict.items()}
for k, dev in para_dict.items():
val = params.get(k)
if val is not None:
dev.set(val)
def conf_ch(self, ch=None, function=None, level=None, range=None, compliance=None, comp_range=None,
polarity=None, integrator=None, Vmeas_range=None, Imeas_range=None, meas_range_comp=None,
filter=None, series_r=None, meas_auto_type=None):
""" when call with no parameters, returns all channels settings,
when called with only one ch selected, only returns its settings.
Otherwise modifies the settings that are not None
"""
para_dict = OrderedDict(function=self.function,
level=self.level,
range=None,
compliance=self.compliance,
comp_range=None,
polarity=self.compliance_polarity_auto_en,
integrator=self.integration_type,
Vmeas_range=self.range_voltage_meas,
Imeas_range=self.range_current_meas,
meas_range_comp=self.range_meas_use_compliance_en,
filter=self.output_filter_en,
series_r=self.series_resistor_en,
meas_auto_type=self.meas_auto_type)
params = locals()
def adjust_range(func):
if func == 'current':
para_dict['range'] = self.range_current
para_dict['comp_range'] = self.range_voltage
else:
para_dict['range'] = self.range_voltage
para_dict['comp_range'] = self.range_current
if all(params.get(k) is None for k in para_dict):
if ch is None:
ch = self._valid_ch
if not isinstance(ch, (list, tuple)):
ch = [ch]
result_dict = {}
for c in ch:
func = self.function.get(ch=c) # we set ch here
adjust_range(func)
result_dict[c] = {k:dev.get() for k, dev in para_dict.items()}
return result_dict
for k, dev in para_dict.items():
func = self.function.get(ch=ch)
adjust_range(func)
val = params.get(k)
if val is not None:
dev.set(val)
def conf_integration(self, speed_mode=None, speed_time=None, resol_mode=None, resol_time=None):
para_dict = dict(speed_mode=self.integration_high_speed_mode,
speed_time=self.integration_high_speed_time,
resol_mode=self.integration_high_resolution_mode,
resol_time=self.integration_high_resolution_time)
params = locals()
if all(params.get(k) is None for k in para_dict):
return {k:dev.get() for k, dev in para_dict.items()}
for k, dev in para_dict.items():
val = params.get(k)
if val is not None:
dev.set(val)
def set_mode(self, mode=None, channels=None, **kwargs):
"""
To use one of these mode, set measurement_spot_en to False
if no options are given, it returns the current setting
mode can be 'single' or 'stair'
channels is a list of channels to read. When not specified it uses
the current instrument set, and if never set, all the active channels.
when using 'stair' extra keywords are passed to conf_staircase
"""
res = self._get_tn_av_cm_fmt_mm()
res_mode = res['meas_mode']
res_channels = [i+1 for i,v in enumerate(res['enabled']) if v]
if mode == channels == None:
mode = res_mode
channels = res_channels
ret = dict_improved([('mode', mode), ('channels', channels)])
if mode == 'stair':
ret['stair'] = self.conf_staircase()
return ret
valid_modes = dict(single=1, stair=16)
if mode is None:
mode = res['meas_mode']
elif mode not in valid_modes:
raise ValueError(self.perror('Selected an invalide mode'))
if channels is None:
channels = res_channels
elif not isinstance(channels, (list, tuple, np.ndarray)):
channels = [channels]
if any(c not in self._valid_ch for c in channels):
raise ValueError(self.perror('Invalid channel selection'))
if len(channels) == 0:
en_ch = self._get_enabled_state()
channels = [i+1 for i,v in enumerate(en_ch) if v]
if len(channels) == 0:
raise RuntimeError(self.perror('All channels are disabled. You should enable at least one.'))
self.write('MM %i,%s'%(valid_modes[mode], ','.join(map(str, channels))))
N_kwargs = len(kwargs)
if mode == 'stair' and N_kwargs > 0:
self.conf_staircase(**kwargs)
elif N_kwargs > 0:
raise ValueError(self.perror('extra arguments are invalid'))
def _calc_x_axis(self, conf):
if conf.func is None:
return
sweep_mode_opt = {'linear':(False, False),
'log':(True, False),
'linear_updown':(False, True),
'log_updown':(True, True)}
isLog, isUpDown = sweep_mode_opt[conf.mode]
if isLog:
x = np.logspace(np.log10(conf.start), np.log10(conf.stop), conf.nsteps)
else:
x = np.linspace(conf.start, conf.stop, conf.nsteps)
if isUpDown:
x = np.concatenate( (x, x[::-1]) )
self._x_axis = x
def conf_staircase(self, ch=None, start=None, stop=None, nsteps=None, mode=None, end_to=None, hold=None, delay=None):
"""
call with no values to see current setup.
When setting it uses the current settings of ch for func, range and compliance.
When reading there are the values that will be used.
WARNING: you probably don't want the settings of ch after calling this function.
end_to can be 'start' or 'stop'
mode can be 'linear', 'log', 'linear_updown', 'log_updown'
updown makes it go from start to stop then from stop to start.
"""
func = None
para_val = locals()
params = ['func', 'ch', 'start', 'stop', 'nsteps', 'mode', 'end_to', 'hold', 'delay']
params_prev = ['sweep_var', 'sweep_ch', 'start', 'stop', 'steps', 'mode', 'ending_value', 'hold_time', 'delay_time']
conf = dict_improved([(p,para_val[p]) for p in params])
allnone = False
if all(v is None for v in conf.values()):
allnone = True
prev_stair = self._get_staircase_settings()
for k in prev_stair.keys():
if k == 'abort':
continue
if k in ['active_range', 'power', 'compliance']:
conf[k] = prev_stair[k]
else:
kp = params[params_prev.index(k)]
if conf[kp] is None:
conf[kp] = prev_stair[k]
if allnone:
self._calc_x_axis(conf)
return conf
del conf['func']
if any(v is None for v in conf.values()):
raise ValueError(self.perror('Some values (None) need to be specified: {conf}', conf=conf))
if conf.ch not in self._valid_ch:
raise ValueError(self.perror("Invalid ch selection."))
func = self.function.get(ch=conf.ch)
if func not in ['voltage', 'current']:
raise ValueError(self.perror("Selected channel is disabled"))
else:
if func == 'voltage':
base = 'WV'
minmax = 100.
rgdev = self.range_voltage
else:
base = 'WI'
minmax = 0.1
rgdev = self.range_current
range = rgdev.get()
sRange = rgdev.choices.tostr(range)
compliance = self.compliance.get()
#base += "%i,%i,%s,%.7e,%.7e,%i,%.7e"
base += "%i,%i,%s,%.7e,%.7e,%i"
if not (-minmax <= conf.start <= minmax):
raise ValueError(self.perror("Invalid start."))
if not (-minmax <= conf.stop <= minmax):
raise ValueError(self.perror("Invalid stop."))
if not (1 <= conf.nsteps <= 1001):
raise ValueError(self.perror("Invalid steps (must be 1-1001)."))
if not (0 <= conf.hold <= 655.35):
raise ValueError(self.perror("Invalid hold (must be 0-655.35)."))
if not (0 <= conf.hold <= 65.535):
raise ValueError(self.perror("Invalid delay (must be 0-65.535)."))
mode_ch = {'linear':1, 'log':2, 'linear_updown':3, 'log_updown':4}
if conf.mode not in mode_ch:
raise ValueError(self.perror("Invalid mode (must be one of %r)."%mode_ch.keys()))
end_to_ch = dict(start=1, stop=2)
mode = mode_ch[conf.mode]
#self.write(base%(conf.ch, mode, sRange, conf.start, conf.stop, conf.nsteps, compliance))
self.write(base%(conf.ch, mode, sRange, conf.start, conf.stop, conf.nsteps))
self.write('WT %.7e,%.7e'%(conf.hold, conf.delay))
self.write('WM 1,%i'%end_to_ch[conf.end_to])
conf.func = func
self._calc_x_axis(conf)
def _create_devs(self):
self.write('BC') # make sure to empty output buffer
valid_ch, options_dict, Nmax = self._get_unit_conf()
self._valid_ch = valid_ch
self._Nvalid_ch = len(valid_ch)
self._unit_conf = options_dict
self._N_channels = Nmax
self.current_channel = MemoryDevice(valid_ch[0], choices=valid_ch)
# E5281B/E5287A also has 5:0.5, 50:5.; 5280B/E5290A also has 2000:200.
v_range = ChoiceIndex({0:0., 5:0.5, 20:2., 50:5., 200:20., 400:40., 1000:100.})
v_range_meas = ChoiceIndex({0:0., 5:0.5, 20:2., 50:5., 200:20., 400:40., 1000:100.,
-5:-0.5, -20:-2., -50:-5., -200:-20., -400:-40., -1000:-100.})
self._v_range_meas_choices = v_range_meas
# E5287A+E5288A ASU has: 8:1e-12
# E5287A has: 9:10e-12, 10: 100e-12
# E5280B/E5281B/E5287A has: 11:1e-9, 12:10e-9
# E5291A has: 20:200e-3
# E5280B/E5290A has: 20:1.
i_range = ChoiceIndex({0:0., 11:1e-9, 12:10e-9, 13:100e-9, 14:1e-6, 15:10e-6, 16:100e-6, 17:1e-3, 18:10e-3, 19:100e-3})
i_range_meas = ChoiceIndex({0:0., 11:1e-9, 12:10e-9, 13:100e-9, 14:1e-6, 15:10e-6, 16:100e-6, 17:1e-3, 18:10e-3, 19:100e-3,
-11:-1e-9, -12:-10e-9, -13:-100e-9, -14:-1e-6, -15:-10e-6, -16:-100e-6, -17:-1e-3, -18:-10e-3, -19:-100e-3})
self._i_range_meas_choices = i_range_meas
def MemoryDevice_ch(*args, **kwargs):
args = (self._set_level_comp,) + args
kwargs['nch'] = Nmax
return MemoryDevice_update(*args, **kwargs)
self.range_voltage = MemoryDevice_ch(0., choices=v_range, doc="""
This is for compliance/force. Not for measurement.
It is a MemoryDevice (so cannot be read from instrument.)
See active_range_voltage to see what is the instrument using.
0. means auto range.
Otherwise the range set is a minimum one. It will use higher ones if necessary.
""")
self.range_current = MemoryDevice_ch(0., choices=i_range, doc="""
This is for compliance/force. Not for measurement.
It is a MemoryDevice (so cannot be read from instrument.)
See active_range_current to see what is the instrument using.
0. means auto range.
Otherwise the range set is a minimum one. It will use higher ones if necessary.
""")
self.compliance_polarity_auto_en = MemoryDevice_ch(True, choices=[True, False],
doc="""
When True, polarity of compliance is the same as force (0 force is positive).
When False, the polarity is the one set by the compliance (Described as Manual mode
in instrument manual, see figures 6.1, 6.2 and 6.3)
It is a MemoryDevice (so cannot be read from instrument.)
""")
self.range_current_meas = CommonDevice(self._get_meas_ranges,
lambda v, ch: v[ch][0],
'RI {ch},{val}', choices=i_range_meas, ch_mode=True,
doc='This does not apply on the force channel. Measurement then use the force range.')
self.range_voltage_meas = CommonDevice(self._get_meas_ranges,
lambda v, ch: v[ch][1],
'RV {ch},{val}', choices=v_range_meas, ch_mode=True,
doc='This does not apply on the force channel. Measurement then use the force range.')
self.range_meas_use_compliance_en = MemoryDevice_update(None, False, choices=[True, False], nch=Nmax)
self.remote_display_en = CommonDevice(self._get_display_settings,
lambda v: v['remote_dsp_en'],
'RED {val}', type=bool)
self.calibration_auto_en = CommonDevice(self._get_tn_av_cm_fmt_mm,
lambda v: v['auto_cal_en'],
'CM{val}', type=bool)
self.series_resistor_en = CommonDevice(self._get_series_resistor_en,
lambda v, ch: v[ch-1],
'SSR{ch},{val}', type=bool, ch_mode=True,
doc=""" When enabled, add a ~1M series resitor to the output ch""")
self.output_filter_en = CommonDevice(self._get_filters,
lambda v, ch: v[ch-1],
'FL{val},{ch}', type=bool, ch_mode=True)
self.integration_type = CommonDevice(self._get_ad_converter_highres_en,
lambda v, ch: v[ch-1],
'AAD {ch},{val}', ch_mode=True,
choices=ChoiceIndex(['speed', 'resolution']))
self.auto_zero_en = CommonDevice(self._get_avg_time_and_autozero,
lambda v: v['autozero_en'],
'AZ {val}', type=bool)
self.meas_auto_type = CommonDevice(self._get_meas_operation_mode,
lambda v, ch: v[ch-1],
'CMM {ch},{val}', ch_mode=True,
choices=ChoiceIndex(['compliance', 'current', 'voltage', 'force']))
self._integ_choices = ChoiceIndex(['auto', 'manual', 'plc'])
self.integration_high_speed_mode = CommonDevice(self._get_avg_time_and_autozero,
lambda v: v['high_speed'][0],
lambda self, val: self.instr._integration_set_helper(speed=True, mode=val),
choices=self._integ_choices)
self.integration_high_resolution_mode = CommonDevice(self._get_avg_time_and_autozero,
lambda v: v['high_res'][0],
lambda self, val: self.instr._integration_set_helper(speed=False, mode=val),
choices=self._integ_choices)
self.integration_high_speed_time = CommonDevice(self._get_avg_time_and_autozero,
lambda v: v['high_speed'][1],
lambda self, val: self.instr._integration_set_helper(speed=True, time=val),
type=int, min=1, max=1023, setget=True,
doc=""" time is internally limited to 100 for plc mode """)
self.integration_high_resolution_time = CommonDevice(self._get_avg_time_and_autozero,
lambda v: v['high_res'][1],
lambda self, val: self.instr._integration_set_helper(speed=False, time=val),
type=int, min=1, max=127, setget=True,
doc=""" time is internally limited to 100 for plc mode """)
self.measurement_spot_en = MemoryDevice(True, choices=[True, False],
doc="""
With this False, you need to use set_mode
""")
self._devwrap('function', choices=['voltage', 'current', 'disabled'])
self._devwrap('level', setget=True)
self._devwrap('compliance', setget=True)
self._devwrap('active_range_current')
self._devwrap('active_range_voltage')
self._devwrap('measV', autoinit=False, trig=True)
self.meas_last_status = MemoryDevice_update(None, None, nch=Nmax)
self._devwrap('measI', autoinit=False, trig=True)
self._devwrap('fetch', autoinit=False, trig=True)
self.readval = ReadvalDev(self.fetch)
self.alias = self.readval
# This needs to be last to complete creation
super(type(self),self)._create_devs()
@cache_result
def _get_enabled_state(self):
"Returns the enabled state of each channels"
N = self._N_channels
ret = self.ask("*LRN? 0")
state = [False]*N
if ret == 'CL':
pass
elif ret.startswith('CN'):
for i_s in ret[2:].split(','):
state[int(i_s)-1] = True
else:
raise RuntimeError(self.perror('Unexpected format for get_enabled_state'))
return state
@cache_result
def _get_function_cached(self, ch):
return self._get_function(ch)
# Don't use cache here because active_Vrange, active_Irange can change.
#@cache_result
def _get_function(self, ch):
ret = self.ask('*LRN? %i'% ch)
d = dict_improved()
if int(ret[2]) != ch:
raise RuntimeError(self.perror('Unexpected channel in get_function'))
mode = ret[:2]
if mode == 'DV':
# Vrange, voltage, Icomp, Icomp_pol, Irange. Icomp_pol is 0 when both voltage and Icomp have same polarity.
# That is not the way we want to use it, so do not use it.
# Both ranges are the active ones (never autorange or fix)
vs = self._parse_block_helper(ret, 'DV', [int, int, float, float, int, int])[1:]
d.mode = 'voltage'
d.level = vs[1]
d.active_Vrange = vs[0]/10.
d.active_Irange = 10**(vs[4]-11-9)
d.compliance_val = vs[2]
d.polarity = vs[3]
elif mode == 'DI':
# Irange, current, Vcomp, Vcomp_pol, Vrange. Vcomp behaves similarly to Icomp (see above).
# Both ranges are the active ones (never autorange or fix)
vs = self._parse_block_helper(ret, 'DI', [int, int, float, float, int, int])[1:]
d.mode = 'current'
d.level = vs[1]
d.active_Vrange = vs[4]/10.
d.active_Irange = 10**(vs[0]-11-9)
d.compliance_val = vs[2]
d.polarity = vs[3]
elif mode == 'CL':
d.mode = 'disabled'
d.level = 0
d.compliance_val = 0
d.active_Vrange = 0.
d.active_Irange = 0.
else:
raise RuntimeError(self.perror('Unexpected mode in get_function'))
return d
@cache_result
def _get_filters(self):
"Returns the filter enabled state of each channels"
N = self._N_channels
ret = self.ask("*LRN? 30")
state = [False]*N
if ret == 'FL0':
pass
elif ret == 'FL1':
state = [True]*N
else:
r = ret.split(';')
if not (r[0].startswith('FL0,') and r[1].startswith('FL1,')):
raise RuntimeError(self.perror('Unexpected filter structure data'))
state = [None]*N
for i in [int(v) for v in r[0].split(',')[1:]]:
if state[i-1] is not None:
raise RuntimeError(self.perror('Unexpected filter date (repeat)'))
state[i-1] = False
for i in [int(v) for v in r[1].split(',')[1:]]:
if state[i-1] is not None:
raise RuntimeError(self.perror('Unexpected filter date (repeat)'))
state[i-1] = True
for ch in self._valid_ch:
i = ch-1
if state[i] is None:
raise RuntimeError(self.perror('Unexpected missing entry for _get_filters'))
return state
def _parse_block_helper(self, root_string, basename, types=[int], Nv_min=None):
Nv = len(types)
if not root_string.startswith(basename):
raise RuntimeError(self.perror('Unexpected entry start for %s (%s)'%(basename, root_string)))
r = root_string[len(basename):]
vs = r.split(',')
if Nv_min is None:
Nv_min = Nv
if not (Nv_min <= len(vs) <= Nv):
raise RuntimeError(self.perror('Invalid number of values for %s (%s)'%(basename, root_string)))
vals = [types[i](v) for i,v in enumerate(vs)]
return vals
def _get_values_helper(self, lrn_type, basename, types=[int], Nv_min=None):
""" This parses entries like (for basename='AA', types=[float, int])
AA1,1.,3;AA2,5.,6;AA3...
with the first integer after AA the ch num
if basename=['AA', 'BB'] it parses
AA1,a1;BB1,b1;AA2,a2;BB2,b2 ...
and returns result as [[a1,b1], [a1,b2], ...]
"""
Nch = self._N_channels
Nvalid_ch = self._Nvalid_ch
if not isinstance(basename, (list, tuple)):
basename = [basename]
Nbasename = len(basename)
basename = basename*Nvalid_ch
base_offsets = list(range(Nbasename))*Nvalid_ch
N = Nbasename*Nch
Nvalid = Nbasename*Nvalid_ch
state = [None]*N
one_val = False
if not isinstance(types, (list, tuple)):
types = [types]
one_val = True
types = [int] + types # first entry is ch number
Nv = len(types)
if Nv_min is None:
Nv_min = Nv
else:
Nv_min += 1
ret = self.ask("*LRN? %i"%lrn_type)
rs = ret.split(';')
if len(rs) != Nvalid:
raise RuntimeError(self.perror('Invalid number of entries for %i'%lrn_type))
for r, b, off in zip(rs, basename, base_offsets):
vals = self._parse_block_helper(r, b, types, Nv_min)
ch = vals[0]
i = (ch-1)*Nbasename + off
if state[i] is not None:
raise RuntimeError(self.perror('Unexpected repeat entry for %i'%lrn_type))
state[i] = vals[1] if one_val else vals[1:]
for ch in self._valid_ch:
i = ch-1
b = i*Nbasename
if None in state[b:b+Nbasename]:
raise RuntimeError(self.perror('Unexpected missing entry for %i'%lrn_type))
if Nbasename > 1:
state = [[state[i*Nbasename+j] for j in range(Nbasename)] for i in range(Nch)]
return state
def _only_valid_ch(self, list):
return [l for i,l in enumerate(list) if i+1 in self._valid_ch]
def _apply_ch_changes(self, quant_ch, states):
for i, state in enumerate(states):
ch = i+1
self.setValue(self._quant_ch(quant_ch, ch), state)
@cache_result
def _get_series_resistor_en(self):
states = self._get_values_helper(53, 'SSR', lambda s: bool(int(s)))
return states
@cache_result
def _get_current_autorange(self):
autorg = self._get_values_helper(54, 'RM', [int, int], Nv_min=1)
return autorg
# This is not used or handled.
@cache_result
def _get_ad_converter_highres_en(self): # vs high_speed
option_type = self.integration_type.choices
states = self._get_values_helper(55, 'AAD', option_type )
return states
@cache_result
def _get_meas_operation_mode(self):
choices = self.meas_auto_type.choices
modes = self._get_values_helper(46, 'CMM', choices)
return modes
@cache_result
def _get_meas_ranges(self):
ranges = self._get_values_helper(32, ['RI', 'RV'], lambda x: x) # keep as string
ich = lambda v: self._i_range_meas_choices(v) if v is not None else None
vch = lambda v: self._v_range_meas_choices(v) if v is not None else None
ranges = [[ich(i), vch(v)] for i,v in ranges]
return ranges
@cache_result
def _get_avg_time_and_autozero(self):
ret = self.ask("*LRN? 56")
rs = ret.split(';')
if len(rs) != 3:
raise RuntimeError(self.perror('Invalid number of elemnts for lrn 56'))
mode_type = self._integ_choices
high_speed = self._parse_block_helper(rs[0], 'AIT0,', [mode_type, int]) # mode(0=auto, 1=manual, 2=PLC), time
high_res = self._parse_block_helper(rs[1], 'AIT1,', [mode_type, int]) # mode(0=auto, 1=manual, 2=PLC), time
autozero_en = self._parse_block_helper(rs[2], 'AZ', [lambda s: bool(int(s))])
return dict(high_speed=high_speed,
high_res=high_res,
autozero_en=autozero_en[0])
@cache_result
def _get_tn_av_cm_fmt_mm(self):
ret = self.ask("*LRN? 31")
rs = ret.split(';')
N = len(rs)
if not (4 <= len(rs) <= 5):
raise RuntimeError(self.perror('Invalid number of elements for lrn 31'))
trigger = self._parse_block_helper(rs[0], 'TM', [int])
average_high_speed_adc = self._parse_block_helper(rs[1], 'AV', [int, int], Nv_min=1) # number, mode
auto_cal_en = self._parse_block_helper(rs[2], 'CM', [lambda s: bool(int(s))])
outfmt = self._parse_block_helper(rs[3], 'FMT', [int, int]) # format, mode
enabled = [False]*self._N_channels
if N == 5:
mm_modes = {1:'single', 2:'staircase', 3:'pulsed spot', 4:'pulsed sweep', 5:'staircase pulsed bias',
9:'quasi-pulsed spot', 14:'linear search', 15:'binary search', 16:'stair'}
mm = self._parse_block_helper(rs[4], 'MM', [int]*9, Nv_min=1) # mode, chnum, chnum ... (max of 8 chnum)
meas_mode = mm_modes[mm[0]]
for m in mm[1:]:
enabled[m-1] = True
else:
meas_mode = 'none'
mm = None
return dict(meas_mode=meas_mode,
trigger=trigger[0],
average_high_speed_adc=average_high_speed_adc,
auto_cal_en=auto_cal_en[0],
outfmt=outfmt,
enabled=enabled)
# trigger, outfmt not handled. average_high_speed_adc not handled but same as get_avg_time_and_autozero
@cache_result
def _get_display_settings(self):
ret = self.ask("*LRN? 61")
rs = ret.split(';')
if len(rs) != 8:
raise RuntimeError(self.perror('Invalid number of elements for lrn 61'))
bool_int = lambda s: bool(int(s))
remote_dsp_en = self._parse_block_helper(rs[0], 'RED', [bool_int])
front_panel_lock_en = self._parse_block_helper(rs[1], 'KLC', [bool_int])
display_scientific_en = self._parse_block_helper(rs[2], 'DFM', [bool_int]) # False is Engineering
source_display_line1 = self._parse_block_helper(rs[3], 'SPA1,', [int]) # 1=source, 2=compliance, 3=Volt meas range, 4=current meas range, 5: last error
source_display_line2 = self._parse_block_helper(rs[4], 'SPA2,', [int]) # 1=source, 2=compliance, 3=Volt meas range, 4=current meas range, 5: last error
measurement_display = self._parse_block_helper(rs[5], 'MPA', [int]) # 1=compliance side, 2=compliance and force, 3=resistance, 4=power
source_ch_disp = self._parse_block_helper(rs[6], 'SCH', [int])
measurement_ch_disp = self._parse_block_helper(rs[7], 'MCH', [int])
return dict(remote_dsp_en=remote_dsp_en[0],
front_panel_lock_en=front_panel_lock_en[0],
display_scientific_en=display_scientific_en[0],
source_display_line1=source_display_line1[0],
source_display_line2=source_display_line2[0],
measurement_display=measurement_display[0],
source_ch_disp=source_ch_disp[0],
measurement_ch_disp=measurement_ch_disp[0])
@cache_result
def _get_staircase_settings(self):
ret = self.ask("*LRN? 33")
rs = ret.split(';')
if not (2 <= len(rs) <= 3):
raise RuntimeError(self.perror('Invalid number of elements for lrn 33'))
abort, end = self._parse_block_helper(rs[0], 'WM', [int, int])
delays = self._parse_block_helper(rs[1], 'WT', [float]*5)
ret_dict = dict_improved(ending_value = {1:'start', 2:'end'}[end],
hold_time = delays[0],
delay_time = delays[1],
abort=abort)
if len(rs) == 3:
if rs[2][1] == 'I':
stair = self._parse_block_helper(rs[2], 'WI', [int, int, int, float, float, int, float, float], Nv_min=6)
ch = stair[0]
ret_dict['sweep_var'] = 'current'
ret_dict['active_range'] = 10**(stair[2]-11-9)
ret_dict['compliance'] = stair[6]
ret_dict['power'] = stair[7]
else:
stair = self._parse_block_helper(rs[2], 'WV', [int, int, int, float, float, int, float, float], Nv_min=6)
ch = stair[0]
ret_dict['sweep_var'] = 'voltage'
ret_dict['active_range'] = stair[2]/10.
comp = None if len(stair) < 7 else stair[6]
power = None if len(stair) < 8 else stair[7]
mode_opt = {1:'linear', 2:'log', 3:'linear updown', 4:'log updown'}
ret_dict.update(dict(sweep_ch = ch,
mode = mode_opt[stair[1]],
start = stair[3],
stop = stair[4],
steps = stair[5],
power = power,
compliance = comp))
else:
ret_dict['sweep_var'] = None
return ret_dict
_status_letter_2_num = dict(N=0, T=4, C=8, V=1, X=2, G=16, S=32)
def _parse_data(self, data_string):
""" Automatically parses the data into value, channel, status, type """
# FMT12 and FMT22 seem to be the same
if data_string[2] in 'VIT': # FMT1 or FMT5 (12 digits data), or FMT11 or FMT15 (13 digits data)
status = data_string[0] # W/E E is for last sweep step data for source, N<G<S<T<C<V<X<F (pulse is N<T<C<V<X<G or S)
# N: No error, T: Another channel compliance, C: This channel Compliance, V: over range
# X: channel oscillating, G: search not found or over time on quasi-pulse, S: search stopped or quasi-pulse too slow
status = self._status_letter_2_num[status]
channel = data_string[1] # A-H = 1-8
type = data_string[2] # V/I/T for Volt, Current, Time
value = float(data_string[3:])
elif data_string[4] in 'VvIiTZz': # FMT21 or FMT25
if data_string.startswith(' '):
status = 128 if data_string[2] == 'E' else 0 # W/E E is for last sweep step data for source
else:
status = int(data_string[:3]) # Status, 1=A/D overflow(V), 2:some unit oscillating(X), 4: Another unit reached compliance(T), 8: This unit reached compliance(C)
# 16: Target not found (G), 32: Search stopped (S), 64: Invalid data (), 128: End of data
channel = data_string[3] # A-H = 1-8, V=GNDU, Z=extra or TSQ or invalid
type = data_string[4] # V/v/I/i/T/Z/z is Volt/Volt source/Current/Current source/Time/Invalid/Invalid
value = float(data_string[5:])
else: # FMT2 (12 digits), FMT12 or FMT22 (13 digits)
status = 0
channel = None
type = None
value = float(data_string)
if channel is not None and channel in 'ABCDEFGH':
channel = ord(channel) - ord('A') + 1
return value, channel, status, type
def _get_unit_conf(self):
# obtain list of model_slot_1, rev_slot_1; model_slot_2m rev_slot_2
# like 'E5281B,0;E5281B,0;E5281B,0;E5281B,0;0,0;0,0;0,0;0,0'
ret = self.ask('UNT?')
rs = ret.split(';')
Nmax = len(rs) # should be 8 for E5270B mainframe
options = [r.split(',')for r in rs]
options_en = [o[0] != '0' for o in options]
options_dict = {}
valid_ch = []
N = 0
for i, opt in enumerate(options):
if options_en[i]:
N += 1
ch = i+1
valid_ch.append(ch)
options_dict[ch] = opt
return valid_ch, options_dict, Nmax
# When switching from DI to DV, The compliance is required
# when switching from off to DI/DV the outputs first need to be enabled.
# *LRN? 0 return CL or CN1,2,3,4 or CN1,2,3
# *LRN? 1 returns DV1,200,+00.0000E+00,+100.000E-06,0,16 or DI... or CL1
# ..4
# *LRN? 30 returns FL0 or FL1 or FL0,1,2,3;FL1,4
# *LRN? 31 returns TM1;AV1,0;CM1;FMT1,0;MM16,1
# *LRN? 32 returns RI1,0;RV1,0;RI2,0;RV2,0;RI3,0;RV3,0;RI4,0;RV4,0
# 33 staircase, 34 pules, 37 quasi-pulse, 38 is io ports, 40 channel mapping (ACH), 50 linear search, 51 binary search, 58 is trigger, 59 multi channel sweep
# 61 is display settings, 62,64,64 ASU setting (atto),
# *LRN? 46 returns CMM1,0;CMM2,0;CMM3,0;CMM4,0
# *LRN? 53 returns SSR1,0;SSR2,0;SSR3,0;SSR4,0
# *LRN? 54 returns RM1,1,50;RM2,1,50;RM3,1,50;RM4,1,50
# *LRN? 55 returns AAD1,1;AAD2,1;AAD3,1;AAD4,1
# *LRN? 56 returns AIT0,0,1;AIT1,0,6;AZ0
# *LRN? 57 returns WAT1,1.0,0.0000;WAT2,1.0,0.0000
# *LRN? 60 returns TSC0
# When doing the calibration, the channels are openned and the settings are changed.
# The output should be left floating. Voltage spikes of around +-400 mV (100 us to 1 ms long) can be observed during calibration.
# Calibration is automatically started after every 30 min once all channels are off (unless autocal is off)
# AV and AIT0 have the same information. Setting one changes the other.
# TV and TI use the high-resolution or high-speed depending on the settings of the channel
# To be able to use High-speed measurement in parallel:
# - need to use Measurement mode 16 (multi channel sweep) (it does not work with the others like spot=1 or staircase=2) (MM)
# - need to set the measurement range for all the channels to read to a fixed range (not auto nor limited auto) (RV or RI)
# - need to use high-speed measurement (not high-resolution) (AAD)
# Example: AAD1,0; RI1,-14; RI2,14; AAD2,0; MM 16,1,2; WV1,1,0,.1,.2,1
# Note that the above sets a sweep with just one point. It stays at the point after if WM post is 1. It goes to last value if it is 2.
# Even for up/down (WV1,4), WM post=1,2 use start,stop value so here .1, .2 at the end.
# Up/down repeats the last point (so for 3 pts sweep does: A,B,C,C,B,A)
#
# For timing use WT hold,delay,[Sdelay,[Tdelay,[Mdelay]]]
# for staircase or multi channel sweep:
# hold: delay before first step (forces first point and then waits this time)
# This first setup trigger is sent after this.
# delay: time between force start and measurement start
# Sdelay: delay after start measurement before next force start
# if measurement is longer than Sdelay, Start force immediately after measurement
# The next two are for triggers (no effect if triggers are not used).
# Tdelay: delay between step output setup and outputing a step output setup completion trigger
# Mdelay: delay between receiving a step measurement trigger and starting a step measurement.
#
# Also for timing is WAT1,N, offset and WAT2, N, offset
# where the wait time = N*(initial time) + offset
# N goes from 0 to 10, offset is 0 to 1s
# WAT1 is a time to stay in force at a new value before doing a measurement or changing the force again.
# The programming manual (edition 4, page 1.44) says it is the time before changing source.
# But at least for the first point, the extra time is added after the change.
# It is added to hold or delay. It can be absorbed in the Sdelay time that is in extra of the measurement time.
# It even affects DV/DI
# So if the output has been at the first point of the sweep for a little while,
# then WAT1 does not add anything.
# I think this time can be repeated when autoranging
# WAT2 is a time to wait before data when autoranging (can be multiple times)
# It can be absorbed in the Sdelay time that is in extra of the measurement time.
# both WAT can overlap. The longest is the larger of them (except when used multiple times because of autoranging)
#
# The filter Provides a Rise time (10-90% from scope) of 0.42 ms (at least for voltage under certain conditions)
# This risetime would meand a time constant of 0.2 ms (f3db = 800 Hz)
# Compliance Polarity behaves as described in User Guide (see Figure 6-1, 6-2, 6-3)
# Volt measurement range: (value in () can be used, but instrument returns the other one.
# 0=auto, 5=0.5, 20(or 11)=2 (lim), 50=5 (lim), 200(or 12)=20 (lim), 400(or 13)=40 (lim), 1000(or 14)=100 (lim)
# -5=0.5 (fix), -20(or -11)=2 (fix), -50=5 (fix), -200(or -12)=20 (fix), -400(or -13)=40 (fix), -1000(or -14)=100 (fix)
# Current measurement range:
# 0=auto, 11=1n (lim), 12=10n (lim), 13=100n (lim), 14=1u (lim), 15=10u (lim), 16=100u (lim), 17=1m (lim), 18=10m (lim),19=100m (lim)
# -11=1n (fix), -12=10n (fix), -13=100n (fix), -14=1u (fix), -15=10u (fix), -16=100u (fix), -17=1m (fix), -18=10m (fix),-19=100m (fix)
# For output range selection, only use auto or limited ones (not the fixed ranges).
# The fixed ones can be used, but if they are too small, we have a parameter error and nothing is changed.
# Observations about RQS (some of it with NI trace, some with a scope on pin 10 of cable):
# When using *sre 16 (that bit is high except when executing a command)
# The SRQ line should and does get activated at the end of execution
# However, it does seem to also produce a glitch when writing a command.
# The SRQ line momentarally get activated, but only for about 20 us.
# This probably causes NI autopoll to sometimes misbehave (it can miss some events.)
# NI autopoll is disabled by board level calls (but I check and the iblck calls don't seem
# to be a problem.) and also by Stuck SRQ line (ESRQ error return from ibwait). But I
# was unable to observe the ESRQ error (and it was not stuck on, more more like it was
# already off when the autopoll tried to see the source.)
# Even when using going back to *sre 0, there is a glitch (the first time) on the SRQ line.
# Again it is short (20 us) and probably skipped by autopoll code
# (could depend on the number of device opened on the gpib (my test uses only one)
# and computer speed ...) It could cause some extraneous status cleanup (unread))
# The solution:
# my event wait internnally (the NI visa library) uses ibwait which restarts autopoll.
# some I just need to make sure to properly clean the status_byte buffer before
# using it. The safest, after *sre 16 (which does create the event, sometimes),
# is to wait a little to make sure the instruments did trigger it (I use *OPC?)
# and then empty the buffer.
| lgpl-3.0 |
brion/cerbero | cerbero/tools/osxuniversalgenerator.py | 16 | 9325 | #!/usr/bin/env python
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Thiago Santos <thiago.sousa.santos@collabora.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import subprocess
import shutil
import tempfile
from cerbero.utils import shell
from cerbero.tools.osxrelocator import OSXRelocator
file_types = [
('Mach-O', 'merge'),
('ar archive', 'merge'),
('libtool archive', 'skip'),
('libtool library', 'copy-la'),
('symbolic link', 'link'),
('data', 'copy'),
('text', 'copy'),
('document', 'copy'),
('catalog', 'copy'),
('python', 'copy'),
('image', 'copy'),
('icon', 'copy'),
('FORTRAN', 'copy'),
('LaTeX', 'copy'),
('Zip', 'copy'),
('empty', 'copy'),
('data', 'copy'),
]
class OSXUniversalGenerator(object):
'''
Wrapper for OS X's lipo command to help generating universal binaries
from single arch binaries.
It takes multiple input directories and parses through them. For every
file it finds, it looks at file type and compare with the file types
of the other input directories, in case the file is a single arch executable
or dynamic library, it will be merged into a universal binary and copied to
the output directory. All the other files (text/headers/scripts) are just
copied directly.
This tool assumes that the input roots have the same structures and files
as they should be results from building the same project to different
architectures
'''
LIPO_CMD = 'lipo'
FILE_CMD = 'file'
def __init__(self, output_root):
'''
@output_root: the output directory where the result will be generated
'''
self.output_root = output_root
if self.output_root.endswith('/'):
self.output_root = self.output_root[:-1]
self.missing = []
def merge_files(self, filelist, dirs):
if len(filelist) == 0:
return
for f in filelist:
self.do_merge(f, dirs)
def merge_dirs(self, input_roots):
if not os.path.exists(self.output_root):
os.mkdir(self.output_root)
self.parse_dirs(input_roots)
def create_universal_file(self, output, inputlist, dirs):
tmp_inputs = []
# relocate all files with the prefix of the merged file.
# which must be done before merging them.
for f in inputlist:
# keep the filename in the suffix to preserve the filename extension
tmp = tempfile.NamedTemporaryFile(suffix=os.path.basename(f))
tmp_inputs.append(tmp)
shutil.copy(f, tmp.name)
prefix_to_replace = [d for d in dirs if d in f][0]
relocator = OSXRelocator (self.output_root, prefix_to_replace, self.output_root,
False)
# since we are using a temporary file, we must force the library id
# name to real one and not based on the filename
relocator.relocate_file(tmp.name,
id=f.replace(prefix_to_replace, self.output_root))
cmd = '%s -create %s -output %s' % (self.LIPO_CMD,
' '.join([f.name for f in tmp_inputs]), output)
self._call(cmd)
for tmp in tmp_inputs:
tmp.close()
def get_file_type(self, filepath):
cmd = '%s -bh "%s"' % (self.FILE_CMD, filepath)
return self._call(cmd)[0:-1] #remove trailing \n
def _detect_merge_action(self, files_list):
actions = []
for f in files_list:
if not os.path.exists(f):
continue #TODO what can we do here? fontconfig has
#some random generated filenames it seems
ftype = self.get_file_type(f)
action = ''
for ft in file_types:
if ft[0] in ftype:
if ft[0] == 'text' and f.endswith('.pc'):
action = 'copy-pc'
else:
action = ft[1]
break
if not action:
if ftype.startswith('ERROR') and f.endswith('.h'):
action = 'copy'
else:
raise Exception, 'Unexpected file type %s %s' % (str(ftype), f)
actions.append(action)
if len(actions) == 0:
return 'skip' #we should skip this one, the file doesn't exist
all_same = all(x == actions[0] for x in actions)
if not all_same:
raise Exception, 'Different file types found: %s : %s' \
% (str(ftype), str(files_list))
return actions[0]
def do_merge(self, filepath, dirs):
full_filepaths = [os.path.join(d, filepath) for d in dirs]
action = self._detect_merge_action(full_filepaths)
#pick the first file as the base one in case of copying/linking
current_file = full_filepaths[0]
output_file = os.path.join(self.output_root, filepath)
output_dir = os.path.dirname(output_file)
if action == 'copy':
self._copy(current_file, output_file)
elif action == 'copy-la' or action == 'copy-pc':
self._copy_and_replace_paths(current_file, output_file, dirs)
elif action == 'link':
self._link(current_file, output_file, filepath)
elif action == 'merge':
if not os.path.exists(output_dir):
os.makedirs(output_dir)
self.create_universal_file(output_file, full_filepaths, dirs)
elif action == 'skip':
pass #just pass
else:
raise Exception, 'unexpected action %s' % action
def parse_dirs(self, dirs, filters=None):
self.missing = []
dir_path = dirs[0]
if dir_path.endswith('/'):
dir_path = dir_path[:-1]
for dirpath, dirnames, filenames in os.walk(dir_path):
current_dir = ''
token = ' '
remaining_dirpath = dirpath
while remaining_dirpath != dir_path or token == '':
remaining_dirpath, token = os.path.split(remaining_dirpath)
current_dir = os.path.join(token, current_dir)
for f in filenames:
if filters is not None and os.path.splitext(f)[1] not in filters:
continue
current_file = os.path.join(current_dir, f)
self.do_merge(current_file, dirs)
def _copy(self, src, dest):
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
shutil.copy(src, dest)
def _copy_and_replace_paths(self, src, dest, dirs):
self._copy(src, dest)
replacements = {}
for d in dirs:
replacements[d]=self.output_root
shell.replace(dest, replacements)
def _link(self, src, dest, filepath):
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
if os.path.lexists(dest):
return #link exists, skip it
# read the link, and extract the relative filepath
target = os.readlink(src)
# if it's a relative path use it directly
if not os.path.isabs(target):
os.symlink(target, dest)
return
# if it's an absolute path, make it relative for sanity
rel_path = os.path.relpath(os.path.dirname(target), os.path.dirname(dest))
dest_target = os.path.join(rel_path, os.path.basename(target))
os.symlink(dest_target, dest)
def _call(self, cmd, cwd=None):
cmd = cmd or self.root
process = subprocess.Popen(cmd, cwd=cwd,
stdout=subprocess.PIPE, shell=True)
output, unused_err = process.communicate()
return output
class Main(object):
def run(self):
# We use OptionParser instead of ArgumentsParse because this script might
# be run in OS X 10.6 or older, which do not provide the argparse module
import optparse
usage = "usage: %prog [options] outputdir inputdir1 inputdir2 ..."
description='Merges multiple architecture build trees into a single '\
'universal binary build tree'
parser = optparse.OptionParser(usage=usage, description=description)
options, args = parser.parse_args()
if len(args) < 3:
parser.print_usage()
exit(1)
generator = OSXUniversalGenerator(args[0])
generator.merge_dirs(args[1:])
exit(0)
if __name__ == "__main__":
main = Main()
main.run()
| lgpl-2.1 |
rmhyman/DataScience | Lesson3/exploratory_data_analysis_subway_data.py | 1 | 1558 | import numpy as np
import pandas
import matplotlib.pyplot as plt
def entries_histogram(turnstile_weather):
'''
Before we perform any analysis, it might be useful to take a
look at the data we're hoping to analyze. More specifically, let's
examine the hourly entries in our NYC subway data and determine what
distribution the data follows. This data is stored in a dataframe
called turnstile_weather under the ['ENTRIESn_hourly'] column.
Let's plot two histograms on the same axes to show hourly
entries when raining vs. when not raining. Here's an example on how
to plot histograms with pandas and matplotlib:
turnstile_weather['column_to_graph'].hist()
Your histogram may look similar to bar graph in the instructor notes below.
You can read a bit about using matplotlib and pandas to plot histograms here:
http://pandas.pydata.org/pandas-docs/stable/visualization.html#histograms
You can see the information contained within the turnstile weather data here:
https://www.dropbox.com/s/meyki2wl9xfa7yk/turnstile_data_master_with_weather.csv
'''
plt.figure()
#print turnstile_weather['rain'] == 1
turnstile_weather[turnstile_weather['rain' ]== 0]['ENTRIESn_hourly'].hist() # your code here to plot a historgram for hourly entries when it is raining
turnstile_weather[turnstile_weather['rain'] == 1]['ENTRIESn_hourly'].hist() # your code here to plot a historgram for hourly entries when it is not raining
return plt
| mit |
wglass/zoonado | examples/locking.py | 1 | 1246 | import logging
import random
from tornado import gen
log = logging.getLogger()
def arguments(parser):
parser.add_argument(
"--workers", "-w", type=int, default=3,
help="Number of workers to launch."
)
parser.add_argument(
"--lock-path", "-p", type=str, default="examplelock",
help="ZNode path to use for the lock."
)
@gen.coroutine
def run(client, args):
log.info("Launching %d workers.", args.workers)
yield client.start()
order = list(range(args.workers))
random.shuffle(order)
yield [work(i, client, args) for i in order]
yield client.close()
@gen.coroutine
def work(number, client, args):
lock = client.recipes.Lock(args.lock_path)
num_iterations = 3
log.info("[WORKER #%d] Acquiring lock...", number)
with (yield lock.acquire()) as check:
log.info("[WORKER #%d] Got lock!", number)
for _ in range(num_iterations):
wait = random.choice([1, 2, 3])
if not check():
log.warn("[WORKER #%d] lost my lock!", number)
break
log.info("[WORKER #%d] working %d secs", number, wait)
yield gen.sleep(wait)
log.info("[WORKER #%d] Done!", number)
| apache-2.0 |
wxdublin/CuckooSploit | modules/reporting/mmdef.py | 6 | 12571 | # Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file "docs/LICENSE" for copying permission.
import os
import hashlib
import lib.maec.maec11 as maec
from lib.cuckoo.common.abstracts import Report
from lib.cuckoo.common.exceptions import CuckooReportError
from lib.cuckoo.common.utils import datetime_to_iso
class MMDef(Report):
"""Generates a MAEC Malware Metadata Sharing report."""
def run(self, results):
"""Writes report.
@param results: Cuckoo results dict.
@raise CuckooReportError: if fails to write report.
"""
# Save results.
self.results = results
# Reporting steps.
self.addMetadata()
self.addObjects()
self.addRelations()
# Write report.
self.output()
def addMetadata(self):
"""Generates header for MAEC xml and root components."""
if "target" in self.results and self.results["target"]["category"] == "file":
id = "cuckoo:%s" % self.results["target"]["file"]["md5"]
elif "target" in self.results and self.results["target"]["category"] == "url":
id = "cuckoo:%s" % hashlib.md5(self.results["target"]["url"]).hexdigest()
else:
raise CuckooReportError("Unknown target type or targetinfo module disabled")
self.m = maec.malwareMetaData(
version="1.1",
id=id,
author="Cuckoo Sandbox %s" % self.results["info"]["version"],
comment="Report created with Cuckoo Sandbox %s automated and open source malware sandbox: http://www.cuckoosandbox.org" % self.results["info"]["version"],
timestamp=datetime_to_iso(self.results["info"]["started"])
)
# Objects
self.objects = maec.objectsType()
self.m.set_objects(self.objects)
# Object Properties
self.properties = maec.objectPropertiesType()
self.m.set_objectProperties(self.properties)
# Relationships
self.relationships = maec.relationshipsType()
self.m.set_relationships(self.relationships)
def addObjects(self):
"""Adds objects elements."""
# File objects
# Subject
if self.results["target"]["category"] == "file":
self.objects.add_file(self.createFileObject(self.results["target"]["file"]))
elif self.results["target"]["category"] == "url":
self.objects.add_uri(maec.uriObject(
id=hashlib.md5(self.results["target"]["url"]).hexdigest(),
uriString=self.results["target"]["url"])
)
else:
raise CuckooReportError("Unknown target type")
# Dropped files
if "dropped" in self.results and isinstance(self.results["dropped"], list):
for f in self.results["dropped"]:
found = False
for exist in self.objects.get_file():
if exist.get_md5() == f["md5"]:
found = True
if not found:
self.objects.add_file(self.createFileObject(f))
# URI objects
if "network" in self.results and isinstance(self.results["network"], dict):
if "http" in self.results["network"] and isinstance(self.results["network"]["http"], list):
for req in self.results["network"]["http"]:
found = False
for exist in self.objects.get_uri():
if exist.get_id() == req["uri"]:
found = True
if not found:
self.objects.add_uri(self.createUriObject(req))
def createFileObject(self, f):
"""Creates a file object.
@param f: file hash representation from cuckoo dict results.
@return: file object.
"""
file = maec.fileObject(
id=f["md5"],
fileType=[f["type"]],
size=f["size"],
crc32=f["crc32"],
md5=f["md5"],
sha1=f["sha1"],
sha512=f["sha512"]
)
file.add_extraHash(maec.extraHashType("ssdeep", f["ssdeep"]))
# Add related filename
prop = maec.objectProperty()
prop.add_property(maec.property(
type_="filename",
valueOf_=f["name"]
)
)
prop.set_references(
maec.reference(
valueOf_="file[@id='%s']" % f["md5"]
)
)
self.properties.add_objectProperty(prop)
return file
def getRelId(self):
"""Generates incremental relation id.
@return: generated id
"""
try:
self.relId = self.relId +1
except AttributeError:
self.relId = 1
return self.relId
def addRelations(self):
"""Adds relationships."""
if self.results["target"]["category"] == "file":
src = "file[@id='%s']" % self.results["target"]["file"]["md5"]
elif self.results["target"]["category"] == "url":
src = "url[@id='%s']" % hashlib.md5(self.results["target"]["url"]).hexdigest()
# Dropped files
for file in self.results["dropped"]:
self.relationships.add_relationship(self.createRelation(
action="installed",
src=src,
dst="file[@id='%s']" % file["md5"]
)
)
# Network
if "network" in self.results and isinstance(self.results["network"], dict):
# DNS requests
for req in self.objects.get_uri():
# Get IP
if "domains" in self.results["network"] and isinstance(self.results["network"]["domains"], list):
for res in self.results["network"]["domains"]:
if res["domain"] == req.get_hostname():
ip = res["ip"]
# Check if obj exist
found = None
for obj in self.objects.get_ip():
if ip == obj.get_startAddress().get_valueOf_():
found = obj
# Create obj
if found is None:
found = self.createIpObject(ip)
self.objects.add_ip(found)
# Create relation
self.relationships.add_relationship(self.createRelation(
action="isServerOfService",
src="ip[@id='%s']" % found.id,
dst="uri[@id='%s']" % req.id
)
)
# HTTP requests
if "http" in self.results["network"] and isinstance(self.results["network"]["http"], list):
for req in self.results["network"]["http"]:
self.relationships.add_relationship(self.createRelation(
action="contactedBy",
src=src,
dst="uri[@id='%s']" % req["uri"]
)
)
def createRelation(self, action, src, dst):
"""Creates a relation between objects.
@param action: relation type
@param src: relation source
@param dst: relation target
@return: relation object
"""
return maec.relationship(
id=self.getRelId(),
type_=action,
source=maec.reference(
valueOf_=src
),
target=maec.reference(
valueOf_=dst
)
)
def createIpObject(self, ip):
"""Creates an single IP object, not an IP range object.
@param ip: IP address
@return: IP object
"""
return maec.IPObject(
id="%s-%s" % (ip, ip),
startAddress=maec.IPAddress(
type_="ipv4",
valueOf_=ip
),
endAddress=maec.IPAddress(
type_="ipv4",
valueOf_=ip
)
)
def createUriObject(self, req):
"""Creates URI object
@param req: HTTP request as described in cuckoo dict
@return: created URI object
"""
uri = maec.uriObject(
id=req["uri"],
uriString=req["uri"],
protocol="http",
hostname=req["host"],
port=req["port"],
path=req["path"],
ipProtocol="tcp"
)
# Add details
prop = maec.objectProperty()
prop.add_property(maec.property(
type_="httpMethod",
valueOf_=req["method"]
)
)
if req["method"] == "POST":
prop.add_property(maec.property(
type_="postData",
valueOf_="<![CDATA[%s]]>" % req["body"]
)
)
if "user-agent" in req:
prop.add_property(maec.property(
type_="userAgent",
valueOf_=req["user-agent"]
)
)
prop.set_references(
maec.reference(
valueOf_="uri[@id='%s']" % req["uri"]
)
)
self.properties.add_objectProperty(prop)
return uri
def output(self):
"""Writes report to disk."""
try:
report = open(os.path.join(self.reports_path, "report.metadata.xml"), "w")
report.write("<?xml version='1.0' ?>\n")
report.write("<!--\n")
report.write("Cuckoo Sandbox malware analysis report\n")
report.write("http://www.cuckoosandbox.org\n")
report.write("-->\n")
self.m.export(report, 0, namespace_="", namespacedef_="xmlns='http://xml/metadataSharing.xsd' xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' xsi:schemaLocation='http://xml/metadataSharing.xsd'")
report.close()
except (TypeError, IOError) as e:
raise CuckooReportError("Failed to generate MAEC Metadata report: %s" % e)
| gpl-3.0 |
TribeMedia/synapse | tests/util/test_lrucache.py | 2 | 7584 | # -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import unittest
from synapse.util.caches.lrucache import LruCache
from synapse.util.caches.treecache import TreeCache
from mock import Mock
class LruCacheTestCase(unittest.TestCase):
def test_get_set(self):
cache = LruCache(1)
cache["key"] = "value"
self.assertEquals(cache.get("key"), "value")
self.assertEquals(cache["key"], "value")
def test_eviction(self):
cache = LruCache(2)
cache[1] = 1
cache[2] = 2
self.assertEquals(cache.get(1), 1)
self.assertEquals(cache.get(2), 2)
cache[3] = 3
self.assertEquals(cache.get(1), None)
self.assertEquals(cache.get(2), 2)
self.assertEquals(cache.get(3), 3)
def test_setdefault(self):
cache = LruCache(1)
self.assertEquals(cache.setdefault("key", 1), 1)
self.assertEquals(cache.get("key"), 1)
self.assertEquals(cache.setdefault("key", 2), 1)
self.assertEquals(cache.get("key"), 1)
cache["key"] = 2 # Make sure overriding works.
self.assertEquals(cache.get("key"), 2)
def test_pop(self):
cache = LruCache(1)
cache["key"] = 1
self.assertEquals(cache.pop("key"), 1)
self.assertEquals(cache.pop("key"), None)
def test_del_multi(self):
cache = LruCache(4, 2, cache_type=TreeCache)
cache[("animal", "cat")] = "mew"
cache[("animal", "dog")] = "woof"
cache[("vehicles", "car")] = "vroom"
cache[("vehicles", "train")] = "chuff"
self.assertEquals(len(cache), 4)
self.assertEquals(cache.get(("animal", "cat")), "mew")
self.assertEquals(cache.get(("vehicles", "car")), "vroom")
cache.del_multi(("animal",))
self.assertEquals(len(cache), 2)
self.assertEquals(cache.get(("animal", "cat")), None)
self.assertEquals(cache.get(("animal", "dog")), None)
self.assertEquals(cache.get(("vehicles", "car")), "vroom")
self.assertEquals(cache.get(("vehicles", "train")), "chuff")
# Man from del_multi say "Yes".
def test_clear(self):
cache = LruCache(1)
cache["key"] = 1
cache.clear()
self.assertEquals(len(cache), 0)
class LruCacheCallbacksTestCase(unittest.TestCase):
def test_get(self):
m = Mock()
cache = LruCache(1)
cache.set("key", "value")
self.assertFalse(m.called)
cache.get("key", callbacks=[m])
self.assertFalse(m.called)
cache.get("key", "value")
self.assertFalse(m.called)
cache.set("key", "value2")
self.assertEquals(m.call_count, 1)
cache.set("key", "value")
self.assertEquals(m.call_count, 1)
def test_multi_get(self):
m = Mock()
cache = LruCache(1)
cache.set("key", "value")
self.assertFalse(m.called)
cache.get("key", callbacks=[m])
self.assertFalse(m.called)
cache.get("key", callbacks=[m])
self.assertFalse(m.called)
cache.set("key", "value2")
self.assertEquals(m.call_count, 1)
cache.set("key", "value")
self.assertEquals(m.call_count, 1)
def test_set(self):
m = Mock()
cache = LruCache(1)
cache.set("key", "value", callbacks=[m])
self.assertFalse(m.called)
cache.set("key", "value")
self.assertFalse(m.called)
cache.set("key", "value2")
self.assertEquals(m.call_count, 1)
cache.set("key", "value")
self.assertEquals(m.call_count, 1)
def test_pop(self):
m = Mock()
cache = LruCache(1)
cache.set("key", "value", callbacks=[m])
self.assertFalse(m.called)
cache.pop("key")
self.assertEquals(m.call_count, 1)
cache.set("key", "value")
self.assertEquals(m.call_count, 1)
cache.pop("key")
self.assertEquals(m.call_count, 1)
def test_del_multi(self):
m1 = Mock()
m2 = Mock()
m3 = Mock()
m4 = Mock()
cache = LruCache(4, 2, cache_type=TreeCache)
cache.set(("a", "1"), "value", callbacks=[m1])
cache.set(("a", "2"), "value", callbacks=[m2])
cache.set(("b", "1"), "value", callbacks=[m3])
cache.set(("b", "2"), "value", callbacks=[m4])
self.assertEquals(m1.call_count, 0)
self.assertEquals(m2.call_count, 0)
self.assertEquals(m3.call_count, 0)
self.assertEquals(m4.call_count, 0)
cache.del_multi(("a",))
self.assertEquals(m1.call_count, 1)
self.assertEquals(m2.call_count, 1)
self.assertEquals(m3.call_count, 0)
self.assertEquals(m4.call_count, 0)
def test_clear(self):
m1 = Mock()
m2 = Mock()
cache = LruCache(5)
cache.set("key1", "value", callbacks=[m1])
cache.set("key2", "value", callbacks=[m2])
self.assertEquals(m1.call_count, 0)
self.assertEquals(m2.call_count, 0)
cache.clear()
self.assertEquals(m1.call_count, 1)
self.assertEquals(m2.call_count, 1)
def test_eviction(self):
m1 = Mock(name="m1")
m2 = Mock(name="m2")
m3 = Mock(name="m3")
cache = LruCache(2)
cache.set("key1", "value", callbacks=[m1])
cache.set("key2", "value", callbacks=[m2])
self.assertEquals(m1.call_count, 0)
self.assertEquals(m2.call_count, 0)
self.assertEquals(m3.call_count, 0)
cache.set("key3", "value", callbacks=[m3])
self.assertEquals(m1.call_count, 1)
self.assertEquals(m2.call_count, 0)
self.assertEquals(m3.call_count, 0)
cache.set("key3", "value")
self.assertEquals(m1.call_count, 1)
self.assertEquals(m2.call_count, 0)
self.assertEquals(m3.call_count, 0)
cache.get("key2")
self.assertEquals(m1.call_count, 1)
self.assertEquals(m2.call_count, 0)
self.assertEquals(m3.call_count, 0)
cache.set("key1", "value", callbacks=[m1])
self.assertEquals(m1.call_count, 1)
self.assertEquals(m2.call_count, 0)
self.assertEquals(m3.call_count, 1)
class LruCacheSizedTestCase(unittest.TestCase):
def test_evict(self):
cache = LruCache(5, size_callback=len)
cache["key1"] = [0]
cache["key2"] = [1, 2]
cache["key3"] = [3]
cache["key4"] = [4]
self.assertEquals(cache["key1"], [0])
self.assertEquals(cache["key2"], [1, 2])
self.assertEquals(cache["key3"], [3])
self.assertEquals(cache["key4"], [4])
self.assertEquals(len(cache), 5)
cache["key5"] = [5, 6]
self.assertEquals(len(cache), 4)
self.assertEquals(cache.get("key1"), None)
self.assertEquals(cache.get("key2"), None)
self.assertEquals(cache["key3"], [3])
self.assertEquals(cache["key4"], [4])
self.assertEquals(cache["key5"], [5, 6])
| apache-2.0 |
jcoady9/python-for-android | python3-alpha/extra_modules/gdata/youtube/__init__.py | 297 | 25623 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = ('api.stephaniel@gmail.com (Stephanie Liu)'
', api.jhartmann@gmail.com (Jochen Hartmann)')
import atom
import gdata
import gdata.media as Media
import gdata.geo as Geo
YOUTUBE_NAMESPACE = 'http://gdata.youtube.com/schemas/2007'
YOUTUBE_FORMAT = '{http://gdata.youtube.com/schemas/2007}format'
YOUTUBE_DEVELOPER_TAG_SCHEME = '%s/%s' % (YOUTUBE_NAMESPACE,
'developertags.cat')
YOUTUBE_SUBSCRIPTION_TYPE_SCHEME = '%s/%s' % (YOUTUBE_NAMESPACE,
'subscriptiontypes.cat')
class Username(atom.AtomBase):
"""The YouTube Username element"""
_tag = 'username'
_namespace = YOUTUBE_NAMESPACE
class QueryString(atom.AtomBase):
"""The YouTube QueryString element"""
_tag = 'queryString'
_namespace = YOUTUBE_NAMESPACE
class FirstName(atom.AtomBase):
"""The YouTube FirstName element"""
_tag = 'firstName'
_namespace = YOUTUBE_NAMESPACE
class LastName(atom.AtomBase):
"""The YouTube LastName element"""
_tag = 'lastName'
_namespace = YOUTUBE_NAMESPACE
class Age(atom.AtomBase):
"""The YouTube Age element"""
_tag = 'age'
_namespace = YOUTUBE_NAMESPACE
class Books(atom.AtomBase):
"""The YouTube Books element"""
_tag = 'books'
_namespace = YOUTUBE_NAMESPACE
class Gender(atom.AtomBase):
"""The YouTube Gender element"""
_tag = 'gender'
_namespace = YOUTUBE_NAMESPACE
class Company(atom.AtomBase):
"""The YouTube Company element"""
_tag = 'company'
_namespace = YOUTUBE_NAMESPACE
class Hobbies(atom.AtomBase):
"""The YouTube Hobbies element"""
_tag = 'hobbies'
_namespace = YOUTUBE_NAMESPACE
class Hometown(atom.AtomBase):
"""The YouTube Hometown element"""
_tag = 'hometown'
_namespace = YOUTUBE_NAMESPACE
class Location(atom.AtomBase):
"""The YouTube Location element"""
_tag = 'location'
_namespace = YOUTUBE_NAMESPACE
class Movies(atom.AtomBase):
"""The YouTube Movies element"""
_tag = 'movies'
_namespace = YOUTUBE_NAMESPACE
class Music(atom.AtomBase):
"""The YouTube Music element"""
_tag = 'music'
_namespace = YOUTUBE_NAMESPACE
class Occupation(atom.AtomBase):
"""The YouTube Occupation element"""
_tag = 'occupation'
_namespace = YOUTUBE_NAMESPACE
class School(atom.AtomBase):
"""The YouTube School element"""
_tag = 'school'
_namespace = YOUTUBE_NAMESPACE
class Relationship(atom.AtomBase):
"""The YouTube Relationship element"""
_tag = 'relationship'
_namespace = YOUTUBE_NAMESPACE
class Recorded(atom.AtomBase):
"""The YouTube Recorded element"""
_tag = 'recorded'
_namespace = YOUTUBE_NAMESPACE
class Statistics(atom.AtomBase):
"""The YouTube Statistics element."""
_tag = 'statistics'
_namespace = YOUTUBE_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['viewCount'] = 'view_count'
_attributes['videoWatchCount'] = 'video_watch_count'
_attributes['subscriberCount'] = 'subscriber_count'
_attributes['lastWebAccess'] = 'last_web_access'
_attributes['favoriteCount'] = 'favorite_count'
def __init__(self, view_count=None, video_watch_count=None,
favorite_count=None, subscriber_count=None, last_web_access=None,
extension_elements=None, extension_attributes=None, text=None):
self.view_count = view_count
self.video_watch_count = video_watch_count
self.subscriber_count = subscriber_count
self.last_web_access = last_web_access
self.favorite_count = favorite_count
atom.AtomBase.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
class Status(atom.AtomBase):
"""The YouTube Status element"""
_tag = 'status'
_namespace = YOUTUBE_NAMESPACE
class Position(atom.AtomBase):
"""The YouTube Position element. The position in a playlist feed."""
_tag = 'position'
_namespace = YOUTUBE_NAMESPACE
class Racy(atom.AtomBase):
"""The YouTube Racy element."""
_tag = 'racy'
_namespace = YOUTUBE_NAMESPACE
class Description(atom.AtomBase):
"""The YouTube Description element."""
_tag = 'description'
_namespace = YOUTUBE_NAMESPACE
class Private(atom.AtomBase):
"""The YouTube Private element."""
_tag = 'private'
_namespace = YOUTUBE_NAMESPACE
class NoEmbed(atom.AtomBase):
"""The YouTube VideoShare element. Whether a video can be embedded or not."""
_tag = 'noembed'
_namespace = YOUTUBE_NAMESPACE
class Comments(atom.AtomBase):
"""The GData Comments element"""
_tag = 'comments'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link',
[gdata.FeedLink])
def __init__(self, feed_link=None, extension_elements=None,
extension_attributes=None, text=None):
self.feed_link = feed_link
atom.AtomBase.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
class Rating(atom.AtomBase):
"""The GData Rating element"""
_tag = 'rating'
_namespace = gdata.GDATA_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['min'] = 'min'
_attributes['max'] = 'max'
_attributes['numRaters'] = 'num_raters'
_attributes['average'] = 'average'
def __init__(self, min=None, max=None,
num_raters=None, average=None, extension_elements=None,
extension_attributes=None, text=None):
self.min = min
self.max = max
self.num_raters = num_raters
self.average = average
atom.AtomBase.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
class YouTubePlaylistVideoEntry(gdata.GDataEntry):
"""Represents a YouTubeVideoEntry on a YouTubePlaylist."""
_tag = gdata.GDataEntry._tag
_namespace = gdata.GDataEntry._namespace
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link',
[gdata.FeedLink])
_children['{%s}description' % YOUTUBE_NAMESPACE] = ('description',
Description)
_children['{%s}rating' % gdata.GDATA_NAMESPACE] = ('rating', Rating)
_children['{%s}comments' % gdata.GDATA_NAMESPACE] = ('comments', Comments)
_children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics)
_children['{%s}location' % YOUTUBE_NAMESPACE] = ('location', Location)
_children['{%s}position' % YOUTUBE_NAMESPACE] = ('position', Position)
_children['{%s}group' % gdata.media.MEDIA_NAMESPACE] = ('media', Media.Group)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None, title=None,
updated=None, feed_link=None, description=None,
rating=None, comments=None, statistics=None,
location=None, position=None, media=None,
extension_elements=None, extension_attributes=None):
self.feed_link = feed_link
self.description = description
self.rating = rating
self.comments = comments
self.statistics = statistics
self.location = location
self.position = position
self.media = media
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id,
link=link, published=published, title=title,
updated=updated,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
class YouTubeVideoCommentEntry(gdata.GDataEntry):
"""Represents a comment on YouTube."""
_tag = gdata.GDataEntry._tag
_namespace = gdata.GDataEntry._namespace
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
class YouTubeSubscriptionEntry(gdata.GDataEntry):
"""Represents a subscription entry on YouTube."""
_tag = gdata.GDataEntry._tag
_namespace = gdata.GDataEntry._namespace
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}username' % YOUTUBE_NAMESPACE] = ('username', Username)
_children['{%s}queryString' % YOUTUBE_NAMESPACE] = (
'query_string', QueryString)
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link',
[gdata.FeedLink])
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None, title=None,
updated=None, username=None, query_string=None, feed_link=None,
extension_elements=None, extension_attributes=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published, title=title, updated=updated)
self.username = username
self.query_string = query_string
self.feed_link = feed_link
def GetSubscriptionType(self):
"""Retrieve the type of this subscription.
Returns:
A string that is either 'channel, 'query' or 'favorites'
"""
for category in self.category:
if category.scheme == YOUTUBE_SUBSCRIPTION_TYPE_SCHEME:
return category.term
class YouTubeVideoResponseEntry(gdata.GDataEntry):
"""Represents a video response. """
_tag = gdata.GDataEntry._tag
_namespace = gdata.GDataEntry._namespace
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}rating' % gdata.GDATA_NAMESPACE] = ('rating', Rating)
_children['{%s}noembed' % YOUTUBE_NAMESPACE] = ('noembed', NoEmbed)
_children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics)
_children['{%s}racy' % YOUTUBE_NAMESPACE] = ('racy', Racy)
_children['{%s}group' % gdata.media.MEDIA_NAMESPACE] = ('media', Media.Group)
def __init__(self, author=None, category=None, content=None, atom_id=None,
link=None, published=None, title=None, updated=None, rating=None,
noembed=None, statistics=None, racy=None, media=None,
extension_elements=None, extension_attributes=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published, title=title, updated=updated)
self.rating = rating
self.noembed = noembed
self.statistics = statistics
self.racy = racy
self.media = media or Media.Group()
class YouTubeContactEntry(gdata.GDataEntry):
"""Represents a contact entry."""
_tag = gdata.GDataEntry._tag
_namespace = gdata.GDataEntry._namespace
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}username' % YOUTUBE_NAMESPACE] = ('username', Username)
_children['{%s}status' % YOUTUBE_NAMESPACE] = ('status', Status)
def __init__(self, author=None, category=None, content=None, atom_id=None,
link=None, published=None, title=None, updated=None,
username=None, status=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published, title=title, updated=updated)
self.username = username
self.status = status
class YouTubeVideoEntry(gdata.GDataEntry):
"""Represents a video on YouTube."""
_tag = gdata.GDataEntry._tag
_namespace = gdata.GDataEntry._namespace
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}rating' % gdata.GDATA_NAMESPACE] = ('rating', Rating)
_children['{%s}comments' % gdata.GDATA_NAMESPACE] = ('comments', Comments)
_children['{%s}noembed' % YOUTUBE_NAMESPACE] = ('noembed', NoEmbed)
_children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics)
_children['{%s}recorded' % YOUTUBE_NAMESPACE] = ('recorded', Recorded)
_children['{%s}racy' % YOUTUBE_NAMESPACE] = ('racy', Racy)
_children['{%s}group' % gdata.media.MEDIA_NAMESPACE] = ('media', Media.Group)
_children['{%s}where' % gdata.geo.GEORSS_NAMESPACE] = ('geo', Geo.Where)
def __init__(self, author=None, category=None, content=None, atom_id=None,
link=None, published=None, title=None, updated=None, rating=None,
noembed=None, statistics=None, racy=None, media=None, geo=None,
recorded=None, comments=None, extension_elements=None,
extension_attributes=None):
self.rating = rating
self.noembed = noembed
self.statistics = statistics
self.racy = racy
self.comments = comments
self.media = media or Media.Group()
self.geo = geo
self.recorded = recorded
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published, title=title, updated=updated,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
def GetSwfUrl(self):
"""Return the URL for the embeddable Video
Returns:
URL of the embeddable video
"""
if self.media.content:
for content in self.media.content:
if content.extension_attributes[YOUTUBE_FORMAT] == '5':
return content.url
else:
return None
def AddDeveloperTags(self, developer_tags):
"""Add a developer tag for this entry.
Developer tags can only be set during the initial upload.
Arguments:
developer_tags: A list of developer tags as strings.
Returns:
A list of all developer tags for this video entry.
"""
for tag_text in developer_tags:
self.media.category.append(gdata.media.Category(
text=tag_text, label=tag_text, scheme=YOUTUBE_DEVELOPER_TAG_SCHEME))
return self.GetDeveloperTags()
def GetDeveloperTags(self):
"""Retrieve developer tags for this video entry."""
developer_tags = []
for category in self.media.category:
if category.scheme == YOUTUBE_DEVELOPER_TAG_SCHEME:
developer_tags.append(category)
if len(developer_tags) > 0:
return developer_tags
def GetYouTubeCategoryAsString(self):
"""Convenience method to return the YouTube category as string.
YouTubeVideoEntries can contain multiple Category objects with differing
schemes. This method returns only the category with the correct
scheme, ignoring developer tags.
"""
for category in self.media.category:
if category.scheme != YOUTUBE_DEVELOPER_TAG_SCHEME:
return category.text
class YouTubeUserEntry(gdata.GDataEntry):
"""Represents a user on YouTube."""
_tag = gdata.GDataEntry._tag
_namespace = gdata.GDataEntry._namespace
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}username' % YOUTUBE_NAMESPACE] = ('username', Username)
_children['{%s}firstName' % YOUTUBE_NAMESPACE] = ('first_name', FirstName)
_children['{%s}lastName' % YOUTUBE_NAMESPACE] = ('last_name', LastName)
_children['{%s}age' % YOUTUBE_NAMESPACE] = ('age', Age)
_children['{%s}books' % YOUTUBE_NAMESPACE] = ('books', Books)
_children['{%s}gender' % YOUTUBE_NAMESPACE] = ('gender', Gender)
_children['{%s}company' % YOUTUBE_NAMESPACE] = ('company', Company)
_children['{%s}description' % YOUTUBE_NAMESPACE] = ('description',
Description)
_children['{%s}hobbies' % YOUTUBE_NAMESPACE] = ('hobbies', Hobbies)
_children['{%s}hometown' % YOUTUBE_NAMESPACE] = ('hometown', Hometown)
_children['{%s}location' % YOUTUBE_NAMESPACE] = ('location', Location)
_children['{%s}movies' % YOUTUBE_NAMESPACE] = ('movies', Movies)
_children['{%s}music' % YOUTUBE_NAMESPACE] = ('music', Music)
_children['{%s}occupation' % YOUTUBE_NAMESPACE] = ('occupation', Occupation)
_children['{%s}school' % YOUTUBE_NAMESPACE] = ('school', School)
_children['{%s}relationship' % YOUTUBE_NAMESPACE] = ('relationship',
Relationship)
_children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics)
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link',
[gdata.FeedLink])
_children['{%s}thumbnail' % gdata.media.MEDIA_NAMESPACE] = ('thumbnail',
Media.Thumbnail)
def __init__(self, author=None, category=None, content=None, atom_id=None,
link=None, published=None, title=None, updated=None,
username=None, first_name=None, last_name=None, age=None,
books=None, gender=None, company=None, description=None,
hobbies=None, hometown=None, location=None, movies=None,
music=None, occupation=None, school=None, relationship=None,
statistics=None, feed_link=None, extension_elements=None,
extension_attributes=None, text=None):
self.username = username
self.first_name = first_name
self.last_name = last_name
self.age = age
self.books = books
self.gender = gender
self.company = company
self.description = description
self.hobbies = hobbies
self.hometown = hometown
self.location = location
self.movies = movies
self.music = music
self.occupation = occupation
self.school = school
self.relationship = relationship
self.statistics = statistics
self.feed_link = feed_link
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id,
link=link, published=published,
title=title, updated=updated,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
class YouTubeVideoFeed(gdata.GDataFeed, gdata.LinkFinder):
"""Represents a video feed on YouTube."""
_tag = gdata.GDataFeed._tag
_namespace = gdata.GDataFeed._namespace
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [YouTubeVideoEntry])
class YouTubePlaylistEntry(gdata.GDataEntry):
"""Represents a playlist in YouTube."""
_tag = gdata.GDataEntry._tag
_namespace = gdata.GDataEntry._namespace
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}description' % YOUTUBE_NAMESPACE] = ('description',
Description)
_children['{%s}private' % YOUTUBE_NAMESPACE] = ('private',
Private)
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link',
[gdata.FeedLink])
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None, title=None,
updated=None, private=None, feed_link=None,
description=None, extension_elements=None,
extension_attributes=None):
self.description = description
self.private = private
self.feed_link = feed_link
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id,
link=link, published=published, title=title,
updated=updated,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
class YouTubePlaylistFeed(gdata.GDataFeed, gdata.LinkFinder):
"""Represents a feed of a user's playlists """
_tag = gdata.GDataFeed._tag
_namespace = gdata.GDataFeed._namespace
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[YouTubePlaylistEntry])
class YouTubePlaylistVideoFeed(gdata.GDataFeed, gdata.LinkFinder):
"""Represents a feed of video entry on a playlist."""
_tag = gdata.GDataFeed._tag
_namespace = gdata.GDataFeed._namespace
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[YouTubePlaylistVideoEntry])
class YouTubeContactFeed(gdata.GDataFeed, gdata.LinkFinder):
"""Represents a feed of a users contacts."""
_tag = gdata.GDataFeed._tag
_namespace = gdata.GDataFeed._namespace
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[YouTubeContactEntry])
class YouTubeSubscriptionFeed(gdata.GDataFeed, gdata.LinkFinder):
"""Represents a feed of a users subscriptions."""
_tag = gdata.GDataFeed._tag
_namespace = gdata.GDataFeed._namespace
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[YouTubeSubscriptionEntry])
class YouTubeVideoCommentFeed(gdata.GDataFeed, gdata.LinkFinder):
"""Represents a feed of comments for a video."""
_tag = gdata.GDataFeed._tag
_namespace = gdata.GDataFeed._namespace
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[YouTubeVideoCommentEntry])
class YouTubeVideoResponseFeed(gdata.GDataFeed, gdata.LinkFinder):
"""Represents a feed of video responses."""
_tag = gdata.GDataFeed._tag
_namespace = gdata.GDataFeed._namespace
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[YouTubeVideoResponseEntry])
def YouTubeVideoFeedFromString(xml_string):
return atom.CreateClassFromXMLString(YouTubeVideoFeed, xml_string)
def YouTubeVideoEntryFromString(xml_string):
return atom.CreateClassFromXMLString(YouTubeVideoEntry, xml_string)
def YouTubeContactFeedFromString(xml_string):
return atom.CreateClassFromXMLString(YouTubeContactFeed, xml_string)
def YouTubeContactEntryFromString(xml_string):
return atom.CreateClassFromXMLString(YouTubeContactEntry, xml_string)
def YouTubeVideoCommentFeedFromString(xml_string):
return atom.CreateClassFromXMLString(YouTubeVideoCommentFeed, xml_string)
def YouTubeVideoCommentEntryFromString(xml_string):
return atom.CreateClassFromXMLString(YouTubeVideoCommentEntry, xml_string)
def YouTubeUserFeedFromString(xml_string):
return atom.CreateClassFromXMLString(YouTubeVideoFeed, xml_string)
def YouTubeUserEntryFromString(xml_string):
return atom.CreateClassFromXMLString(YouTubeUserEntry, xml_string)
def YouTubePlaylistFeedFromString(xml_string):
return atom.CreateClassFromXMLString(YouTubePlaylistFeed, xml_string)
def YouTubePlaylistVideoFeedFromString(xml_string):
return atom.CreateClassFromXMLString(YouTubePlaylistVideoFeed, xml_string)
def YouTubePlaylistEntryFromString(xml_string):
return atom.CreateClassFromXMLString(YouTubePlaylistEntry, xml_string)
def YouTubePlaylistVideoEntryFromString(xml_string):
return atom.CreateClassFromXMLString(YouTubePlaylistVideoEntry, xml_string)
def YouTubeSubscriptionFeedFromString(xml_string):
return atom.CreateClassFromXMLString(YouTubeSubscriptionFeed, xml_string)
def YouTubeSubscriptionEntryFromString(xml_string):
return atom.CreateClassFromXMLString(YouTubeSubscriptionEntry, xml_string)
def YouTubeVideoResponseFeedFromString(xml_string):
return atom.CreateClassFromXMLString(YouTubeVideoResponseFeed, xml_string)
def YouTubeVideoResponseEntryFromString(xml_string):
return atom.CreateClassFromXMLString(YouTubeVideoResponseEntry, xml_string)
| apache-2.0 |
varunarya10/boto | boto/opsworks/exceptions.py | 185 | 1288 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import JSONResponseError
class ResourceNotFoundException(JSONResponseError):
pass
class ValidationException(JSONResponseError):
pass
| mit |
AndyLavr/Aspire-SW5-012_Kernel_4.8 | tools/perf/scripts/python/netdev-times.py | 268 | 15299 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
callchain, irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi,
dev_name, work=None, budget=None):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name, work, budget)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name,
work, budget) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time,
'work':work, 'budget':budget}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
nazo/ansible | lib/ansible/modules/network/lenovo/cnos_showrun.py | 19 | 4981 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to display running config of Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_showrun
author: "Dave Kasberg (@dkasberg)"
short_description: Collect the current running configuration on devices running Lenovo CNOS
description:
- This module allows you to view the switch running configuration. It executes the display running-config CLI
command on a switch and returns a file containing the current running configuration of the target network
device. This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_showrun.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options: {}
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_showrun. These are written in the main.yml file of the tasks directory.
---
- name: Run show running-config
cnos_showrun:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_showrun_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
return value: |
On successful execution, the method returns a message in JSON format
[Running Configuration saved in file]
Upon any failure, the method returns an error display string.
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
cliCommand = "display running-config"
outputfile = module.params['outputfile']
hostIP = module.params['host']
output = ""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Send the CLi command
output = output + cnos.waitForDeviceResponse(cliCommand + "\n", "#", 2, remote_conn)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Running Configuration saved in file ")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 |
josiahseaman/DNAResearch | Repeat_Graph.py | 1 | 8201 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
%matplotlib inline
from pylab import *
import matplotlib.pyplot as plt
from IPython.core.display import Image
# <codecell>
data = []
for y in range(10):
data.append([y+x for x in range(10)])
# print(data)
Image(data=data)
# <headingcell level=1>
# Matplot Lib
# <codecell>
alpha = 0.7
phi_ext = 2 * pi * 0.5
def flux_qubit_potential(phi_m, phi_p):
return 2 + alpha - 2 * cos(phi_p)*cos(phi_m) - alpha * cos(phi_ext - 2*phi_p)
phi_m = linspace(0, 2*pi, 100)
phi_p = linspace(0, 2*pi, 100)
X,Y = meshgrid(phi_p, phi_m)
Z = flux_qubit_potential(X, Y).T
# <codecell>
fig, ax = plt.subplots()
p = ax.pcolor(X/(2*pi), Y/(2*pi), Z, cmap=cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max())
cb = fig.colorbar(p, ax=ax)
# <codecell>
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import random
bignum = 10
data = []
for i in range(bignum):
data.append([random.random() for x in range(bignum)])
mat = np.array(data) #random.random((bignum, bignum))
X, Y = np.mgrid[:bignum, :bignum]
fig = plt.figure()
ax = fig.add_subplot(1,1,1, projection='3d')
surf = ax.plot_surface(X,Y,mat)
plt.show()
# <headingcell level=2>
# Most simple Pixel Map
# <codecell>
def basic_func(x, y):
return x+y
X, Y = np.mgrid[:bignum, :bignum]
Z = basic_func(X, Y)
# <codecell>
fig, ax = plt.subplots()
p = ax.pcolor(X, Y, Z, cmap=cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max())
cb = fig.colorbar(p, ax=ax)
# <headingcell level=2>
# Basic Repeat Map
# <codecell>
raster_width = 11
seq = 'TCTCGTGAACCGTTCTTTCCCGCGGACGTGATGGATGTGGGTGCCTTTATTTGCGACGATATGGTCCGTAAATTAGGTCTCGTTGTTTGTACCCTCTCACTTGGCCGCTTCAACTTTTTTCCGATAATGTCTAATGCACCGACGGAATTATTGTACAGAGTAGCAAGCTCAGGTTGCACGGCAGACCTTGCCGCGTCGGGTCTGCGCACCACCCAAATCTGGGCGCGTCTGGGCCTCGCTGCTACACTGGTTAACCATGCTTCAGACTCTGTGACGATGAAATATCCAAACGACGTTGAATAAAAACGACGGGGAGCGGCGGTGATTTTTATCAATCGCGGTGAAGCAGTTATGCTCGACATCTATTAACAACAGGAGAAAGGCGCCACCGCTCCGGTGTATTATACACTGGGCCGTTTGACCGTCTCATCGACGGGCAACATGACCAAACCGCACATGCATTTCTCGGGCCGAATCGCCCGCGCCTACTGGAAAGCCGGCTCTGGCGATTATGCCGATTTTGAAAGTTTTCTTTCATCCAAAGCGTATATTATTCAGTTTCAAATATCGACCTTGCTGAAGAAAATCAAAGTGATCTTTTTATTTAAAGGCAATGATGGCGATGTACTTAATCGTGCGATCGCTTTGCGGCAGGGCCCCCGTTGGAATAGATTTGATATGCAGGAGCTGTATCCGATCTGGCATATTCTGTCCAATTAACAGCGCAATATCAACGCTGCGCTGTCTCTGCTGGTCGGCGAACACGGACTGATTCAGTCTCCTTTGGCAGGTTTCGTACAAGGTACCACGCTGAGCGCCCTGGGCCAACGGGACTTTGCACTGCGTAAGGACGCAGTGGAAGTGGGCTCCCTGAACCCTGAAGCCGGTGAAGACAAACGTACGACCATCATCTTTACCTATGTACTGCAGCAGCAAGGTTACAAATCCGGTAAATGTTGCGGCGAGGATAAATATGACGTTATTCTGAAAGAAGGGATTATCTACTATACCGTAGTTCTGATCATCCGGGGCTTCAAAGATTCAGACAAGGACGAAGATGACGGACTTAAACATGCGCTTGAAGGATTCGAAGGCGAACGTGGCGCTGCTCTGTCGACTGTAGCATCCGCGTCCGCATGGAGGAGTGGTCAACATAACGGCACCACCCCTTCGTCAAAGGTGGCGCAAGAACTCCGCCAGAAACGCTGCAATTCCAATACAAACATCACCTGCCCACACGTAAACCTTGAACTTAACAAGATATATCGGCTCTTCCCGCTCCAAAACTAAAAGATACCGGACGTGATCGCGATCAGAGGCAAATACTTGACTCATAAGCTGTCAACGGTTGATTTACTGGGTTTTTCTCCGCCAACCTGTCTGCGCTTGCATGATTATGAAGCCGTGTCAGATCCGATGAAAGTGGCGAATTTCCATAACCAGATGGGTTTCTTGGTAGGCGATGCCATCTTCGTTCAGGAACTCATCAAACAGACGGTCGCGCTGATCATTAACAAAGTAAAAAACCCTGGTGGCCTGAAACAGCGAGCCTCAGAAAAACCGAACTCTCAGCTAGTTTGAGGTGGGTCTAATCATGAGCCAGCACTGCGCGACCGTGGGTCTCGTATTCTGGGTGAGCGCGTGCGTGACGATATTCTGTATCTTGTTAACATGGGTTTTAAACATTCGTTCTTGGCTGACCGTGTCATCATGATCAAGATTGAAGAAGAGCTGCATTTTCATACCCAGAGCTACGAGGTCACCTCGCTCGGACAGGGGGTCAGTAATTACCTGGTCACAGCCGATGCGAAAGCCCCAAAACGTCGCCAACTGGCATATCATCTTGGTACTGGGTTCTCATCATTCTACGCTGGGGCGGATGATCAGGCGTCGCGCGTGGAAGTCAAACAGATGCAACGGATCCTGATTGCAGCCGCCCTGCCGGGCCTCCGAAAGAAATTGCGCCTGGATGCACACAATGAATTTATTGTCCCAATCATGACCGAGTTCGACCAGACCGGCCCCTTAACCTTAGGCTACGCATCAGAAAAACGCGCGCTCGATAACATCATGGTGAGTCAGGATTCTGTGCTGGGGAATCTCTTTATGAAATTTTTAGGTGTGCTGGTGGTCGGTATCAGCCGGACAGCGATAGCGGACCCAGATAAGTATATGGCTATTCTGCTGGGTGCGGTTTTCGACATGCTGGCGATGAAAATCATTGAAGTCTTAGATGTTACGTCCAACCGCAACTATTTGACCAATCGCCGTACGACGGAAATCGCAGCTGTGGCAGAAACCTGTGAGGACGGAGCGTTTGTGATGCTGCTGACCACGTGGCTGGGCAAGAAGTCGGATTCCCTGAAGTTCCCTAACTTAGTGATTGTCTATTATATAGTTATGGTCGGCGGCCCGTGCACCGGAGAGCAGCAGAAACGTGCTACAGCAGCCATGAGTAGCGAAATTGCGCTCCAGCCGTATTTCCGCTTCCGCCGGATTGAGCACACTGTCCGCGGCCGCGTCTTTTGACTGGAAAAAAGTTTCGGCGAAGACGCCGGCGATAATCTGGTCTCCAACAAAACCAAACGTCGCGGTAAAGGGCCGCAGTTTAAATATGTGGAACTGGCAGAACTGACCTTAATCAAGCTGTCGATTTGAGGCGGTGTAGCTAACATGGGAGGTAATGCACGTCATGGAATGAAAGGCATTCTGGGTCCGCTGCGCGTTGCCTCTTTAGCTTATCAGGCGAAAGGTGTCATCGGTTTATCTATGTTAAAAAACTGGGCTCCGGCCTAACAAAAAAATCTGCTGTCAGTTGCTGTACTGGTCCCGCTGAGCGCGAGCACAGGGAGCGCCCTGGAAATGGTGCGCGGTCTGAAAGAAGGCAACGCAGTCTTGGTGGCGAAGATGGGGATCGCCAAAGGAGCGACAGGTCGCTGGGCGGCTGTGGCAGATGGTAACGTCGCACCTCCGCTTCGCGAGCAATTAAACTTTCAGGCT'
# <codecell>
seq = 'CTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTACTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGC'
# <codecell>
#seq[1*x_size : 1*x_size + raster_width]
seq[7:15]
# <codecell>
sum([True, False, True,True])
# <codecell>
raster_width = 11
x_size = 75 # frequency range
y_size = int(len(seq) / raster_width) # number of lines: (cut off the end)
raster_width
# <codecell>
def repeat_score(x, y):
start_str = seq[y*raster_width : (y+1)*raster_width]
target_str = seq[y*raster_width + x : (y+1)*raster_width + x]
actual_width = min(len(start_str), len(target_str))
return sum([start_str[i] == target_str[i] for i in range(actual_width)])
# <codecell>
[[repeat_score(x,y) for x in range(1,x_size-1)] for y in range(y_size-10)]
# <codecell>
X, Y = np.mgrid[:x_size, :y_size]
Z = np.array([[repeat_score(x,y) for x in range(1,x_size+1)] for y in range(y_size)]).T
# <codecell>
fig, ax = plt.subplots()
p = ax.pcolor(X, Y, Z,
cmap=cm.Greys_r,
vmin=0, vmax=raster_width)
cb = fig.colorbar(p, ax=ax)
# <codecell>
x, y = 20, 7
print( seq[y*x_size : y*x_size + raster_width])
print( seq[y*x_size + x : y*x_size + raster_width + x])
sum([start_str[i] == target_str[i] for i in range(raster_width)])
# <headingcell level=3>
# Notes
# <markdowncell>
# I most of the trouble that I had make this was because I am unfamiliar with NumPy arrays and matplotlib. The lines for Z and p = ax.pcolor(X, Y, Z, cmap=cm.Greys_r, vmin=0, vmax=raster_width) are very sensitive. The good and the bad of having a graphing platform is that I get scale axes for free. It will often squish the pixels. I prefer square pixels. I need to figure out how to generate a highly non-square graph since the Repeat Map is usually 25 wide x 200 high.
# <headingcell level=1>
# Finished Product
# <codecell>
from Sequence_Utils import debugSequence, weighted_sequence
# <codecell>
class RepeatMap():
def __init__(self, sequence):
self.seq = sequence
self.raster_width = 11
self.x_size = 25 # frequency range
self.y_size = int(len(self.seq) / self.raster_width) # number of lines: (cut off the end)
def repeat_score(self, x, y):
start_str = self.seq[y*self.raster_width : (y+1)*self.raster_width]
target_str = self.seq[y*self.raster_width + x : (y+1)*self.raster_width + x]
actual_width = min(len(start_str), len(target_str))
return sum([start_str[i] == target_str[i] for i in range(actual_width)])
def render(self):
X, Y = np.mgrid[:self.x_size, :self.y_size]
Z = np.array([[self.repeat_score(x,y) for x in range(1,self.x_size+1)] for y in range(self.y_size)]).T
fig, ax = plt.subplots()
fig.set_size_inches(self.x_size /10, self.y_size /10)
p = ax.pcolor(X, Y, Z,
cmap=cm.Greys_r,
vmin=0, vmax=self.raster_width)
cb = fig.colorbar(p, ax=ax)
plt.gca().invert_yaxis()
# <codecell>
rp = RepeatMap(debugSequence(25, 200, 5))
rp.render()
# <codecell>
| apache-2.0 |
jakubbrindza/gtg | GTG/plugins/export/templates.py | 1 | 6180 | # -*- coding: utf-8 -*-
# Copyright (c) 2010 - Luca Invernizzi <invernizzi.l@gmail.com>
# 2012 - Izidor Matušov <izidor.matusov@gmail.com>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
""" Module for discovering templates and work with templates """
from glob import glob
import os.path
import subprocess
import sys
import tempfile
import threading
from GTG.core.dirs import plugin_configuration_dir
from Cheetah.Template import Template as CheetahTemplate
from gi.repository import GObject
TEMPLATE_PATHS = [
os.path.join(plugin_configuration_dir('export'), "export_templates"),
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "export_templates"),
]
def get_templates_paths():
""" Returns a list containing the full path for all the
available templates. """
template_list = []
for a_dir in TEMPLATE_PATHS:
template_list += glob(os.path.join(a_dir, "template_*"))
return template_list
class Template:
""" Representation of a template """
def __init__(self, path):
self._template = path
self._document_path = None
self._image_path = self._find_file("thumbnail_")
self._script_path = self._find_file("script_")
self._title, self._description = self._load_description()
def _find_file(self, prefix, suffix=""):
""" Find a file for the template given prefix and suffix """
basename = os.path.basename(self._template)
basename = basename.replace("template_", prefix)
path = os.path.join(os.path.dirname(self._template), basename)
path = os.path.splitext(path)[0] + '*' + suffix
possible_filles = glob(path)
if len(possible_filles) > 0:
return possible_filles[0]
else:
return None
def _load_description(self):
""" Returns title and description of the template
template description are stored in python module for easier l10n.
thus, we need to import the module given its path """
path = self._find_file("description_", ".py")
if not path:
return "", ""
dir_path = os.path.dirname(path)
if dir_path not in sys.path:
sys.path.append(dir_path)
module_name = os.path.basename(path).replace(".py", "")
try:
module = __import__(module_name, globals(), locals(),
['description'], 0)
return module.title, module.description
except (ImportError, AttributeError):
return "", ""
def _get_suffix(self):
""" Return suffix of the template """
return os.path.splitext(self._template)[1]
def get_path(self):
""" Return path to the template """
return self._template
def get_image_path(self):
""" Return path to the image """
return self._image_path
def get_title(self):
""" Return title of the template """
return self._title
def get_description(self):
""" Return description of the template """
return self._description
def get_document_path(self):
""" Return path to generated document.
Return None until generate() was successful."""
return self._document_path
def generate(self, tasks, plugin_api, callback):
""" Fill template and run callback when finished.
Created files are saved with the same suffix as the template. Opening
the final file determines its type based on suffix. """
document = CheetahTemplate(file=self.get_path(),
searchList=[{'tasks': tasks,
'plugin_api': plugin_api}])
suffix = ".%s" % self._get_suffix()
output = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
output.write(str(document))
self._document_path = output.name
output.close()
if self._script_path:
self._run_script(callback)
else:
callback()
def _run_script(self, callback):
""" Run script in its own thread and in other thread wait
for the result. """
document_ready = threading.Event()
def script():
""" Run script using the shebang of the script
The script gets path to a document as it only argument and
this thread expects resulting file as the only output of
the script. """
with open(self._script_path, 'r') as script_file:
first_line = script_file.readline().strip()
if first_line.startswith('#!'):
cmd = [first_line[2:], self._script_path,
self._document_path]
else:
cmd = None
self._document_path = None
if cmd is not None:
try:
self._document_path = subprocess.Popen(
args=cmd, shell=False,
stdout=subprocess.PIPE).communicate()[0]
except Exception:
pass
if self._document_path and not os.path.exists(self._document_path):
self._document_path = None
document_ready.set()
def wait_for_document():
""" Wait for the completion of the script and finish generation """
document_ready.wait()
GObject.idle_add(callback)
threading.Thread(target=script).start()
threading.Thread(target=wait_for_document).start()
| gpl-3.0 |
hrashk/sympy | release/fabfile.py | 1 | 29364 | # -*- coding: utf-8 -*-
"""
Fab file for releasing
Please read the README in this directory.
Guide for this file
===================
Vagrant is a tool that gives us a reproducible VM, and fabric is a tool that
we use to run commands on that VM.
Each function in this file should be run as
fab vagrant func
Even those functions that do not use vagrant must be run this way, because of
the vagrant configuration at the bottom of this file.
Any function that should be made avaiable from the command line needs to have
the @task decorator.
Save any files that should be reset between runs somewhere in the repos
directory, so that the remove_userspace() function will clear it. It's best
to do a complete vagrant destroy before a full release, but that takes a
while, so the remove_userspace() ensures that things are mostly reset for
testing.
Do not enforce any naming conventions on the release branch. By tradition, the
name of the release branch is the same as the version being released (like
0.7.3), but this is not required. Use get_sympy_version() and
get_sympy_short_version() to get the SymPy version (the SymPy __version__
*must* be changed in __init__.py for this to work).
"""
from __future__ import print_function
from collections import defaultdict, OrderedDict
from contextlib import contextmanager
from fabric.api import env, local, run, sudo, cd, hide, task
from fabric.contrib.files import exists
from fabric.colors import blue
from fabric.utils import error
import unicodedata
import os.path
try:
# https://pypi.python.org/pypi/fabric-virtualenv/
from fabvenv import virtualenv, make_virtualenv
# Note, according to fabvenv docs, always use an absolute path with
# virtualenv().
except ImportError:
error("fabvenv is required. See https://pypi.python.org/pypi/fabric-virtualenv/")
# Note, it's actually good practice to use absolute paths
# everywhere. Otherwise, you will get surprising results if you call one
# function from another, because your current working directory will be
# whatever it was in the calling function, not ~. Also, due to what should
# probably be considered a bug, ~ is not treated as an absolute path. You have
# to explicitly write out /home/vagrant/
env.use_ssh_config = True
try:
# Only works in newer versions of fabric
env.colorize_errors = True
except AttributeError:
pass
def full_path_split(path):
"""
Function to do a full split on a path.
"""
# Based on http://stackoverflow.com/a/13505966/161801
rest, tail = os.path.split(path)
if not rest or rest == os.path.sep:
return (tail,)
return full_path_split(rest) + (tail,)
@contextmanager
def use_venv(pyversion):
"""
Change make_virtualenv to use a given cmd
pyversion should be '2' or '3'
"""
pyversion = str(pyversion)
if pyversion == '2':
yield
elif pyversion == '3':
oldvenv = env.virtualenv
env.virtualenv = 'virtualenv -p /usr/bin/python3'
yield
env.virtualenv = oldvenv
else:
raise ValueError("pyversion must be one of '2' or '3', not %s" % pyversion)
@task
def prepare():
"""
Setup the VM
This only needs to be run once. It downloads all the necessary software,
and a git cache. To reset this, use vagrant destroy and vagrant up. Note,
this may take a while to finish, depending on your internet connection
speed.
"""
prepare_apt()
checkout_cache()
@task
def prepare_apt():
"""
Download software from apt
Note, on a slower internet connection, this will take a while to finish,
because it has to download many packages, include latex and all its
dependencies.
"""
sudo("apt-get -qq update")
sudo("apt-get -y install git python3 make python-virtualenv zip python-dev")
# Needed to build the docs
sudo("apt-get -y install graphviz inkscape texlive texlive-xetex texlive-fonts-recommended texlive-latex-extra")
# Our Ubuntu is too old to include Python 3.3
sudo("apt-get -y install python-software-properties")
sudo("add-apt-repository -y ppa:fkrull/deadsnakes")
sudo("apt-get -y update")
sudo("apt-get -y install python3.3")
@task
def remove_userspace():
"""
Deletes (!) the SymPy changes. Use with great care.
This should be run between runs to reset everything.
"""
run("rm -rf repos")
@task
def checkout_cache():
"""
Checkout a cache of SymPy
This should only be run once. The cache is use as a --reference for git
clone. This makes deleting and recreating the SymPy a la
remove_userspace() and gitrepos() and clone very fast.
"""
run("rm -rf sympy-cache.git")
run("git clone --bare https://github.com/sympy/sympy.git sympy-cache.git")
@task
def gitrepos(branch=None, fork='sympy'):
"""
Clone the repo
fab vagrant prepare (namely, checkout_cache()) must be run first. By
default, the branch checked out is the same one as the one checked out
locally. The master branch is not allowed--use a release branch (see the
README). No naming convention is put on the release branch.
To test the release, create a branch in your fork, and set the fork
option.
"""
with cd("/home/vagrant"):
if not exists("sympy-cache.git"):
error("Run fab vagrant prepare first")
if not branch:
# Use the current branch (of this git repo, not the one in Vagrant)
branch = local("git rev-parse --abbrev-ref HEAD", capture=True)
if branch == "master":
raise Exception("Cannot release from master")
run("mkdir -p repos")
with cd("/home/vagrant/repos"):
run("git clone --reference ../sympy-cache.git https://github.com/{fork}/sympy.git".format(fork=fork))
with cd("/home/vagrant/repos/sympy"):
run("git checkout -t origin/%s" % branch)
@task
def get_sympy_version(version_cache=[]):
"""
Get the full version of SymPy being released (like 0.7.3.rc1)
"""
if version_cache:
return version_cache[0]
if not exists("/home/vagrant/repos/sympy"):
gitrepos()
with cd("/home/vagrant/repos/sympy"):
version = run('python -c "import sympy;print(sympy.__version__)"')
assert '\n' not in version
assert ' ' not in version
assert '\t' not in version
version_cache.append(version)
return version
@task
def get_sympy_short_version():
"""
Get the short version of SymPy being released, not including any rc tags
(like 0.7.3)
"""
version = get_sympy_version()
return '.'.join(version.split('.')[:3]) # Remove any rc tags
@task
def test_sympy():
"""
Run the SymPy test suite
"""
with cd("/home/vagrant/repos/sympy"):
run("./setup.py test")
@task
def test_tarball(release='2'):
"""
Test that the tarball can be unpacked and installed, and that sympy
imports in the install.
"""
if release not in {'2', '3'}: # TODO: Add win32
raise ValueError("release must be one of '2', '3', not %s" % release)
venv = "/home/vagrant/repos/test-{release}-virtualenv".format(release=release)
tarball_formatter_dict = tarball_formatter()
with use_venv(release):
make_virtualenv(venv)
with virtualenv(venv):
run("cp /vagrant/release/{source} releasetar.tar".format(**tarball_formatter_dict))
run("tar xvf releasetar.tar")
with cd("/home/vagrant/{source-orig-notar}".format(**tarball_formatter_dict)):
run("python setup.py install")
run('python -c "import sympy; print(sympy.__version__)"')
@task
def release(branch=None, fork='sympy'):
"""
Perform all the steps required for the release, except uploading
In particular, it builds all the release files, and puts them in the
release/ directory in the same directory as this one. At the end, it
prints some things that need to be pasted into various places as part of
the release.
To test the release, push a branch to your fork on GitHub and set the fork
option to your username.
"""
remove_userspace()
gitrepos(branch, fork)
# This has to be run locally because it itself uses fabric. I split it out
# into a separate script so that it can be used without vagrant.
local("../bin/mailmap_update.py")
source_tarball()
build_docs()
copy_release_files()
test_tarball('2')
test_tarball('3')
compare_tar_against_git()
print_authors()
GitHub_release()
@task
def source_tarball():
"""
Build the source tarball
"""
with cd("/home/vagrant/repos/sympy"):
run("git clean -dfx")
run("./setup.py clean")
run("./setup.py sdist")
run("./setup.py bdist_wininst")
run("mv dist/{win32-orig} dist/{win32}".format(**tarball_formatter()))
@task
def build_docs():
"""
Build the html and pdf docs
"""
with cd("/home/vagrant/repos/sympy"):
run("mkdir -p dist")
venv = "/home/vagrant/docs-virtualenv"
make_virtualenv(venv, dependencies=['sphinx==1.1.3', 'numpy'])
with virtualenv(venv):
with cd("/home/vagrant/repos/sympy/doc"):
run("make clean")
run("make html-errors")
with cd("/home/vagrant/repos/sympy/doc/_build"):
run("mv html {html-nozip}".format(**tarball_formatter()))
run("zip -9lr {html} {html-nozip}".format(**tarball_formatter()))
run("cp {html} ../../dist/".format(**tarball_formatter()))
run("make clean")
run("make latex")
with cd("/home/vagrant/repos/sympy/doc/_build/latex"):
run("make")
run("cp {pdf-orig} ../../../dist/{pdf}".format(**tarball_formatter()))
@task
def copy_release_files():
"""
Move the release files from the VM to release/ locally
"""
with cd("/home/vagrant/repos/sympy"):
run("mkdir -p /vagrant/release")
run("cp dist/* /vagrant/release/")
@task
def show_files(file, print_=True):
"""
Show the contents of a tarball.
The current options for file are
source: The source tarball
win: The Python 2 Windows installer (Not yet implemented!)
html: The html docs zip
Note, this runs locally, not in vagrant.
"""
# TODO: Test the unarchived name. See
# https://code.google.com/p/sympy/issues/detail?id=3988.
if file == 'source':
ret = local("tar tf release/{source}".format(**tarball_formatter()), capture=True)
elif file == 'win':
# TODO: Windows
raise NotImplementedError("Windows installers")
elif file == 'html':
ret = local("unzip -l release/{html}".format(**tarball_formatter()), capture=True)
else:
raise ValueError(file + " is not valid")
if print_:
print(ret)
return ret
# If a file does not end up in the tarball that should, add it to setup.py if
# it is Python, or MANIFEST.in if it is not. (There is a command at the top
# of setup.py to gather all the things that should be there).
# TODO: Also check that this whitelist isn't growning out of date from files
# removed from git.
# TODO: Address the "why?" comments below.
# Files that are in git that should not be in the tarball
git_whitelist = {
# Git specific dotfiles
'.gitattributes',
'.gitignore',
'.mailmap',
# Travis
'.travis.yml',
# This is the file you should edit if not enough ends up in the tarball
'MANIFEST.in',
# Experimental Cythonization support. Not for production
'Makefile',
# Nothing from bin/ should be shipped unless we intend to install it. Most
# of this stuff is for development anyway. To run the tests from the
# tarball, use setup.py test, or import sympy and run sympy.test() or
# sympy.doctest().
'bin/adapt_paths.py',
'bin/ask_update.py',
'bin/coverage_doctest.py',
'bin/coverage_report.py',
'bin/doctest',
'bin/generate_test_list.py',
'bin/get_sympy.py',
'bin/py.bench',
'bin/mailmap_update.py',
'bin/strip_whitespace',
'bin/sympy_time.py',
'bin/sympy_time_cache.py',
'bin/test',
'bin/test_import',
'bin/test_import.py',
'bin/test_isolated',
'bin/test_travis.sh',
# This is also related to Cythonization
'build.py',
# The notebooks are not ready for shipping yet. They need to be cleaned
# up, and preferrably doctested. See also
# https://code.google.com/p/sympy/issues/detail?id=2940.
'examples/advanced/identitysearch_example.ipynb',
'examples/beginner/plot_advanced.ipynb',
'examples/beginner/plot_colors.ipynb',
'examples/beginner/plot_discont.ipynb',
'examples/beginner/plot_gallery.ipynb',
'examples/beginner/plot_intro.ipynb',
'examples/intermediate/limit_examples_advanced.ipynb',
'examples/intermediate/schwarzschild.ipynb',
'examples/notebooks/density.ipynb',
'examples/notebooks/fidelity.ipynb',
'examples/notebooks/fresnel_integrals.ipynb',
'examples/notebooks/qubits.ipynb',
'examples/notebooks/sho1d_example.ipynb',
'examples/notebooks/spin.ipynb',
'examples/notebooks/trace.ipynb',
# This stuff :)
'release/.gitignore',
'release/README.md',
'release/Vagrantfile',
'release/fabfile.py',
# This is just a distribute version of setup.py. Used mainly for setup.py
# develop, which we don't care about in the release tarball
'setupegg.py',
# We don't ship the benchmarks (why?)
'sympy/benchmarks/bench_meijerint.py',
'sympy/benchmarks/bench_symbench.py',
'sympy/core/benchmarks/bench_arit.py',
'sympy/core/benchmarks/bench_assumptions.py',
'sympy/core/benchmarks/bench_basic.py',
'sympy/core/benchmarks/bench_expand.py',
'sympy/core/benchmarks/bench_numbers.py',
'sympy/core/benchmarks/bench_sympify.py',
'sympy/functions/elementary/benchmarks/bench_exp.py',
'sympy/functions/special/benchmarks/bench_special.py',
# More benchmarks
'sympy/integrals/benchmarks/bench_integrate.py',
'sympy/integrals/benchmarks/bench_trigintegrate.py',
'sympy/logic/benchmarks/input/10.cnf',
'sympy/logic/benchmarks/input/100.cnf',
'sympy/logic/benchmarks/input/105.cnf',
'sympy/logic/benchmarks/input/110.cnf',
'sympy/logic/benchmarks/input/115.cnf',
'sympy/logic/benchmarks/input/120.cnf',
'sympy/logic/benchmarks/input/125.cnf',
'sympy/logic/benchmarks/input/130.cnf',
'sympy/logic/benchmarks/input/135.cnf',
'sympy/logic/benchmarks/input/140.cnf',
'sympy/logic/benchmarks/input/145.cnf',
'sympy/logic/benchmarks/input/15.cnf',
'sympy/logic/benchmarks/input/150.cnf',
'sympy/logic/benchmarks/input/20.cnf',
'sympy/logic/benchmarks/input/25.cnf',
'sympy/logic/benchmarks/input/30.cnf',
'sympy/logic/benchmarks/input/35.cnf',
'sympy/logic/benchmarks/input/40.cnf',
'sympy/logic/benchmarks/input/45.cnf',
'sympy/logic/benchmarks/input/50.cnf',
'sympy/logic/benchmarks/input/55.cnf',
'sympy/logic/benchmarks/input/60.cnf',
'sympy/logic/benchmarks/input/65.cnf',
'sympy/logic/benchmarks/input/70.cnf',
'sympy/logic/benchmarks/input/75.cnf',
'sympy/logic/benchmarks/input/80.cnf',
'sympy/logic/benchmarks/input/85.cnf',
'sympy/logic/benchmarks/input/90.cnf',
'sympy/logic/benchmarks/input/95.cnf',
'sympy/logic/benchmarks/run-solvers.py',
'sympy/logic/benchmarks/test-solver.py',
'sympy/matrices/benchmarks/bench_matrix.py',
# More benchmarks...
'sympy/polys/benchmarks/__init__.py',
'sympy/polys/benchmarks/bench_galoispolys.py',
'sympy/polys/benchmarks/bench_groebnertools.py',
'sympy/polys/benchmarks/bench_solvers.py',
'sympy/series/benchmarks/bench_limit.py',
'sympy/solvers/benchmarks/bench_solvers.py',
# Example on how to use tox to test Sympy. For development.
'tox.ini.sample',
}
# Files that should be in the tarball should not be in git
tarball_whitelist = {
"PKG-INFO", # Generated by setup.py. Contains metadata for PyPI.
}
@task
def compare_tar_against_git():
"""
Compare the contents of the tarball against git ls-files
"""
with hide("commands"):
with cd("/home/vagrant/repos/sympy"):
git_lsfiles = set([i.strip() for i in run("git ls-files").split("\n")])
tar_output_orig = set(show_files('source', print_=False).split("\n"))
tar_output = set()
for file in tar_output_orig:
# The tar files are like sympy-0.7.3/sympy/__init__.py, and the git
# files are like sympy/__init__.py.
split_path = full_path_split(file)
if split_path[-1]:
# Exclude directories, as git ls-files does not include them
tar_output.add(os.path.join(*split_path[1:]))
# print tar_output
# print git_lsfiles
fail = False
print()
print(blue("Files in the tarball from git that should not be there:",
bold=True))
print()
for line in sorted(tar_output.intersection(git_whitelist)):
fail = True
print(line)
print()
print(blue("Files in git but not in the tarball:", bold=True))
print()
for line in sorted(git_lsfiles - tar_output - git_whitelist):
fail = True
print(line)
print()
print(blue("Files in the tarball but not in git:", bold=True))
print()
for line in sorted(tar_output - git_lsfiles - tarball_whitelist):
fail = True
print(line)
if fail:
error("Non-whitelisted files found or not found in the tarball")
@task
def md5(file='*', print_=True):
"""
Print the md5 sums of the release files
"""
out = local("md5sum release/" + file, capture=True)
# Remove the release/ part for printing. Useful for copy-pasting into the
# release notes.
out = [i.split() for i in out.strip().split('\n')]
out = '\n'.join(["%s\t%s" % (i, os.path.split(j)[1]) for i, j in out])
if print_:
print(out)
return out
descriptions = OrderedDict([
('source', "The SymPy source installer.",),
('win32', "Python Windows 32-bit installer.",),
('html', '''Html documentation for the Python 2 version. This is the same as
the <a href="http://docs.sympy.org/latest/index.html">online documentation</a>.''',),
('pdf', '''Pdf version of the <a href="http://docs.sympy.org/latest/index.html"> html documentation</a>.''',),
])
@task
def table():
"""
Make an html table of the downloads.
This is for pasting into the GitHub releases page. See GitHub_release().
"""
tarball_formatter_dict = tarball_formatter()
shortversion = get_sympy_short_version()
tarball_formatter_dict['version'] = shortversion
md5s = [i.split('\t') for i in md5(print_=False).split('\n')]
md5s_dict = {name: md5 for md5, name in md5s}
table = []
# http://docs.python.org/2/library/contextlib.html#contextlib.contextmanager. Not
# recommended as a real way to generate html, but it works better than
# anything else I've tried.
@contextmanager
def tag(name):
table.append("<%s>" % name)
yield
table.append("</%s>" % name)
with tag('table'):
with tag('tr'):
for headname in ["Filename", "Description", "md5"]:
with tag("th"):
table.append(headname)
for key in descriptions:
name = get_tarball_name(key)
with tag('tr'):
with tag('td'):
# code renders better than tt or pre
with tag('code'):
table.append(name)
with tag('td'):
table.append(descriptions[key].format(**tarball_formatter_dict))
with tag('td'):
table.append(md5s_dict[name])
out = ' '.join(table)
return out
@task
def GitHub_release():
"""
Generate text to put in the GitHub release Markdown box
"""
shortversion = get_sympy_short_version()
htmltable = table()
out = """\
See https://github.com/sympy/sympy/wiki/release-notes-for-{shortversion} for the release notes.
{htmltable}
**Note**: Do not download the `Source code (zip)` or the `Source code (tar.gz)`
files below.
"""
out = out.format(shortversion=shortversion, htmltable=htmltable)
print(blue("Here are the release notes to copy into the GitHub release "
"Markdown form:", bold=True))
print()
print(out)
return out
@task
def get_tarball_name(file):
"""
Get the name of a tarball
file should be one of
source-orig: The original name of the source tarball
source-orig-notar: The name of the untarred directory
source: The source tarball (after renaming)
win32-orig: The original name of the win32 installer
win32: The name of the win32 installer (after renaming)
html: The name of the html zip
html-nozip: The name of the html, without ".zip"
pdf-orig: The original name of the pdf file
pdf: The name of the pdf file (after renaming)
"""
version = get_sympy_version()
doctypename = defaultdict(str, {'html': 'zip', 'pdf': 'pdf'})
winos = defaultdict(str, {'win32': 'win32', 'win32-orig': 'linux-i686'})
if file in {'source-orig', 'source'}:
name = 'sympy-{version}.tar.gz'
elif file == 'source-orig-notar':
name = "sympy-{version}"
elif file in {'win32', 'win32-orig'}:
name = "sympy-{version}.{wintype}.exe"
elif file in {'html', 'pdf', 'html-nozip'}:
name = "sympy-docs-{type}-{version}"
if not file.endswith('nozip'):
name += ".{extension}"
elif file == 'pdf-orig':
name = "sympy-{version}.pdf"
else:
raise ValueError(file + " is not a recognized argument")
ret = name.format(version=version, type=file,
extension=doctypename[file], wintype=winos[file])
return ret
tarball_name_types = {
'source-orig',
'source-orig-notar',
'source',
'win32-orig',
'win32',
'html',
'html-nozip',
'pdf-orig',
'pdf',
}
# This has to be a function, because you cannot call any function here at
# import time (before the vagrant() function is run).
def tarball_formatter():
return {name: get_tarball_name(name) for name in tarball_name_types}
@task
def get_previous_version_tag():
"""
Get the version of the previous release
"""
# We try, probably too hard, to portably get the number of the previous
# release of SymPy. Our strategy is to look at the git tags. The
# following assumptions are made about the git tags:
# - The only tags are for releases
# - The tags are given the consistent naming:
# sympy-major.minor.micro[.rcnumber]
# (e.g., sympy-0.7.2 or sympy-0.7.2.rc1)
# In particular, it goes back in the tag history and finds the most recent
# tag that doesn't contain the current short version number as a substring.
shortversion = get_sympy_short_version()
curcommit = "HEAD"
with cd("/home/vagrant/repos/sympy"):
while True:
curtag = run("git describe --abbrev=0 --tags " +
curcommit).strip()
if shortversion in curtag:
# If the tagged commit is a merge commit, we cannot be sure
# that it will go back in the right direction. This almost
# never happens, so just error
parents = local("git rev-list --parents -n 1 " + curtag,
capture=True).strip().split()
# rev-list prints the current commit and then all its parents
assert len(parents) == 2, curtag
curcommit = curtag + "^" # The parent of the tagged commit
else:
print(blue("Using {tag} as the tag for the previous "
"release.".format(tag=curtag), bold=True))
return curtag
error("Could not find the tag for the previous release.")
@task
def get_authors():
"""
Get the list of authors since the previous release
Returns the list in alphabetical order by last name. Authors who
contributed for the first time for this release will have a star appended
to the end of their names.
Note: it's a good idea to use ./bin/mailmap_update.py (from the base sympy
directory) to make AUTHORS and .mailmap up-to-date first before using
this. fab vagrant release does this automatically.
"""
def lastnamekey(name):
"""
Sort key to sort by last name
Note, we decided to sort based on the last name, because that way is
fair. We used to sort by commit count or line number count, but that
bumps up people who made lots of maintenance changes like updating
mpmath or moving some files around.
"""
# Note, this will do the wrong thing for people who have multi-word
# last names, but there are also people with middle initials. I don't
# know of a perfect way to handle everyone. Feel free to fix up the
# list by hand.
# Note, you must call unicode() *before* lower, or else it won't
# lowercase non-ASCII characters like Č -> č
text = unicode(name.strip().split()[-1], encoding='utf-8').lower()
# Convert things like Čertík to Certik
return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore')
old_release_tag = get_previous_version_tag()
with cd("/home/vagrant/repos/sympy"), hide('commands'):
releaseauthors = set(run('git --no-pager log {tag}.. --format="%aN"'.format(tag=old_release_tag)).strip().split('\n'))
priorauthors = set(run('git --no-pager log {tag} --format="%aN"'.format(tag=old_release_tag)).strip().split('\n'))
releaseauthors = {name.strip() for name in releaseauthors if name.strip()}
priorauthors = {name.strip() for name in priorauthors if name.strip()}
newauthors = releaseauthors - priorauthors
starred_newauthors = {name + "*" for name in newauthors}
authors = releaseauthors - newauthors | starred_newauthors
return (sorted(authors, key=lastnamekey), len(releaseauthors), len(newauthors))
@task
def print_authors():
"""
Print authors text to put at the bottom of the release notes
"""
authors, authorcount, newauthorcount = get_authors()
print(blue("Here are the authors to put at the bottom of the release "
"notes.", bold=True))
print()
print("""## Authors
The following people contributed at least one patch to this release (names are
given in alphabetical order by last name). A total of {authorcount} people
contributed to this release. People with a * by their names contributed a
patch for the first time for this release; {newauthorcount} people contributed
for the first time for this release.
Thanks to everyone who contributed to this release!
""".format(authorcount=authorcount, newauthorcount=newauthorcount))
for name in authors:
print("- " + name)
print()
# ------------------------------------------------
# PyPI
@task
def upload():
"""
Upload the files everywhere
For now, it is just PyPI, because GitHub doesn't seem to have an API.
"""
distutils_check()
#pypi_register()
pypi_upload()
@task
def distutils_check():
"""
Runs setup.py check
"""
with cd("/home/vagrant/repos/sympy"):
run("python setup.py check")
run("python3 setup.py check")
@task
def pypi_register():
"""
Register a release with PyPI
This should only be done for the final release. You need PyPI
authentication to do this.
"""
with cd("/home/vagrant/repos/sympy"):
run("python setup.py register")
@task
def pypi_upload():
"""
Upload files to PyPI
"""
with cd("/home/vagrant/repos/sympy"):
# XXX: Doesn't actually work yet
run("python setupegg.py upload")
# ------------------------------------------------
# Vagrant related configuration
@task
def vagrant():
"""
Run commands using vagrant
"""
vc = get_vagrant_config()
# change from the default user to 'vagrant'
env.user = vc['User']
# connect to the port-forwarded ssh
env.hosts = ['%s:%s' % (vc['HostName'], vc['Port'])]
# use vagrant ssh key
env.key_filename = vc['IdentityFile'].strip('"')
# Forward the agent if specified:
env.forward_agent = vc.get('ForwardAgent', 'no') == 'yes'
def get_vagrant_config():
"""
Parses vagrant configuration and returns it as dict of ssh parameters
and their values
"""
result = local('vagrant ssh-config', capture=True)
conf = {}
for line in iter(result.splitlines()):
parts = line.split()
conf[parts[0]] = ' '.join(parts[1:])
return conf
@task
def restart_network():
"""
Do this if the VM won't connect to the internet.
"""
run("sudo /etc/init.d/networking restart")
# ---------------------------------------
# Just a simple testing command:
@task
def uname():
"""
Get the uname in Vagrant. Useful for testing that Vagrant works.
"""
run('uname -a')
| bsd-3-clause |
CharlesGulian/Deconv | fits_tools_tesla.py | 1 | 5575 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 14 21:18:54 2016
@author: charlesgulian
"""
import os
#os.chdir('/Users/annepstein/Work/Deconv')
curr_dir = os.getcwd()
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
import matplotlib
#from photutils import aperture_photometry
#from photutils import CircularAperture
def binImage(pixelArray,M=3,N=3):
'''
- Bins pixels along image axes into MxN bins (default MxN = 3x3)
'''
pixels = pixelArray
imgDim1,imgDim2 = np.shape(pixels)
xBinSize,yBinSize = float(imgDim1)/float(M),float(imgDim2)/float(N)
imgBinDict = {} # Dictionary for storing
#print xBinSize,yBinSize
for i in range(M):
for j in range(N):
imgBinDict[i+1,j+1] = pixels[int(np.ceil(i*xBinSize)):int(np.floor((i+1)*xBinSize)),\
int(np.ceil(j*yBinSize)):int(np.floor((j+1)*yBinSize))]
#print ''
#print 'Bin: ',i,j
#print 'Shape: ',np.shape(imgBinDict[i,j])
return imgBinDict
def computeObjectFlux(x0,y0,radius,image):
'''
- Compute total flux within circular aperture of the given radius
from source at image coordinates (x0,y0)
'''
position = (x0,y0)
aperture = CircularAperture(position,r=radius)
return aperture_photometry(image,aperture)[0][0]
# getPixels() can be replaced by fits.getdata() (I did not know this)
def getPixels(image_file,delete=False):
hdulist = fits.open(image_file)
data = hdulist[0].data
hdulist.close()
if delete:
del hdulist[0].data
return data
def applyMask(image,mask):
'''
- Apply a binary mask to an array
'''
masked_image = np.multiply(image,mask)
return masked_image
def maskImage(image_file,mask_file,masked_image_file=None,Return=False):
'''
- Takes a .fits image file and .fits binary mask file as input
- Applies binary mask to .fits image data
- Rewrites masked image to new .fits file (masked_image_file)
'''
image = fits.getdata(image_file)
mask = fits.getdata(mask_file)
masked_image = applyMask(image,mask)
inds = np.where(masked_image == 0.0)
masked_image[inds] += 1e-12 # Prevent NaNs
if masked_image_file == None:
masked_image_file = image_file.replace('.fits','_masked.fits').replace('Good','MaskedImages').replace('Bad','MaskedImages')
fits.writeto(masked_image_file,masked_image,fits.getheader(image_file),clobber=True)
if Return:
return masked_image
def shift_image(image,x_offset,y_offset):
# Shifts image pixels from (x,y) to (x-x_offset),(y-y_offset)
dims = np.shape(image) # Image dimensions
dim1,dim2 = dims[0],dims[1]
blank = np.zeros(dims) + 1e-8 # Define blank array to receive new image data
shifted_image = blank
dy,dx = x_offset,y_offset # These are intentionally reversed
for i in range(dim1):
for j in range(dim2):
if (i+dx < dim1) and (i+dx >= 0) and (j+dy < dim2) and (j+dy >= 0):
shifted_image[i,j] = image[i+dx,j+dy] # Why does this work?
return shifted_image
def subtractBias(image_file,new_image_file=None,bias=0.0,Return=False):
'''
- Takes a .fits image file as input
- Subtracts median from image data, writes new data to new image file (new_image_file)
'''
if new_image_file == None:
new_image_file = image_file
image = fits.getdata(image_file)
image -= bias
fits.writeto(new_image_file,image,fits.getheader(image_file),clobber=True)
if Return:
return image
def subtractMedian(image_file,new_image_file=None,Return=False):
'''
- Takes a .fits image file as input
- Subtracts median from image data, writes new data to new image file (new_image_file)
'''
if new_image_file == None:
new_image_file = image_file
image = fits.getdata(image_file)
image -= np.median(image)
fits.writeto(new_image_file,image,fits.getheader(image_file),clobber=True)
if Return:
return image
def write_pixel_offset(x_offset,y_offset,image_file,new_image_file=None):
# Add (x,y) pixel offset to .FITS header of an image
header = fits.getheader(image_file) # Get original .FITS header
header['x_offset'] = x_offset # Add new keywords and values to header
header['y_offset'] = y_offset
# If no new image file specified, default writes new header to original image header
if new_image_file == None:
new_image_file = image_file
# Write header to new image
fits.writeto(new_image_file,fits.getdata(image_file),header,clobber=True)
'''
# Testing:
test_image_file = 'AstroImages/Good/fpC-6484-x4078-y134_stitched_alignCropped.fits'
test_image = fits.getdata(test_image_file)
catalog = 'Results/fpC-6484-x4078-y134_stitched_alignCropped_fpC-6484-x4078-y134_stitched_alignCropped_compare.cat'
import sex_stats
fig = sex_stats.data(catalog)
x = fig.get_data('X_IMAGE')
y = fig.get_data('Y_IMAGE')
xlow = np.where(x > 651.0)
xhigh = np.where(x < 658.9)
xin = np.intersect1d(xlow,xhigh)
ylow = np.where(y > 820.0)
yhigh = np.where(y < 826.0)
yin = np.intersect1d(ylow,yhigh)
obj = np.intersect1d(xin,yin)
DATA = fig.Data
x,y = 848.39102,727.23274
radius = 10.
flux = computeObjectFlux(x,y,radius,test_image)
print flux
#testMask = 'AstroImages/Masks/fpC-6484-x4078-y134_stitched_alignCropped_mask.fits'
#maskImage(testImage,testMask)
'''
| gpl-3.0 |
r39132/airflow | airflow/contrib/operators/mlengine_operator.py | 1 | 23245 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the 'License'); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from googleapiclient.errors import HttpError
from airflow.contrib.hooks.gcp_mlengine_hook import MLEngineHook
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.utils.log.logging_mixin import LoggingMixin
log = LoggingMixin().log
def _normalize_mlengine_job_id(job_id):
"""
Replaces invalid MLEngine job_id characters with '_'.
This also adds a leading 'z' in case job_id starts with an invalid
character.
Args:
job_id: A job_id str that may have invalid characters.
Returns:
A valid job_id representation.
"""
# Add a prefix when a job_id starts with a digit or a template
match = re.search(r'\d|\{{2}', job_id)
if match and match.start() == 0:
job = 'z_{}'.format(job_id)
else:
job = job_id
# Clean up 'bad' characters except templates
tracker = 0
cleansed_job_id = ''
for m in re.finditer(r'\{{2}.+?\}{2}', job):
cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_',
job[tracker:m.start()])
cleansed_job_id += job[m.start():m.end()]
tracker = m.end()
# Clean up last substring or the full string if no templates
cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_', job[tracker:])
return cleansed_job_id
class MLEngineBatchPredictionOperator(BaseOperator):
"""
Start a Google Cloud ML Engine prediction job.
NOTE: For model origin, users should consider exactly one from the
three options below:
1. Populate ``uri`` field only, which should be a GCS location that
points to a tensorflow savedModel directory.
2. Populate ``model_name`` field only, which refers to an existing
model, and the default version of the model will be used.
3. Populate both ``model_name`` and ``version_name`` fields, which
refers to a specific version of a specific model.
In options 2 and 3, both model and version name should contain the
minimal identifier. For instance, call::
MLEngineBatchPredictionOperator(
...,
model_name='my_model',
version_name='my_version',
...)
if the desired model version is
``projects/my_project/models/my_model/versions/my_version``.
See https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs
for further documentation on the parameters.
:param project_id: The Google Cloud project name where the
prediction job is submitted. (templated)
:type project_id: str
:param job_id: A unique id for the prediction job on Google Cloud
ML Engine. (templated)
:type job_id: str
:param data_format: The format of the input data.
It will default to 'DATA_FORMAT_UNSPECIFIED' if is not provided
or is not one of ["TEXT", "TF_RECORD", "TF_RECORD_GZIP"].
:type data_format: str
:param input_paths: A list of GCS paths of input data for batch
prediction. Accepting wildcard operator ``*``, but only at the end. (templated)
:type input_paths: list[str]
:param output_path: The GCS path where the prediction results are
written to. (templated)
:type output_path: str
:param region: The Google Compute Engine region to run the
prediction job in. (templated)
:type region: str
:param model_name: The Google Cloud ML Engine model to use for prediction.
If version_name is not provided, the default version of this
model will be used.
Should not be None if version_name is provided.
Should be None if uri is provided. (templated)
:type model_name: str
:param version_name: The Google Cloud ML Engine model version to use for
prediction.
Should be None if uri is provided. (templated)
:type version_name: str
:param uri: The GCS path of the saved model to use for prediction.
Should be None if model_name is provided.
It should be a GCS path pointing to a tensorflow SavedModel. (templated)
:type uri: str
:param max_worker_count: The maximum number of workers to be used
for parallel processing. Defaults to 10 if not specified.
:type max_worker_count: int
:param runtime_version: The Google Cloud ML Engine runtime version to use
for batch prediction.
:type runtime_version: str
:param gcp_conn_id: The connection ID used for connection to Google
Cloud Platform.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must
have domain-wide delegation enabled.
:type delegate_to: str
:raises: ``ValueError``: if a unique model/version origin cannot be
determined.
"""
template_fields = [
'_project_id',
'_job_id',
'_region',
'_input_paths',
'_output_path',
'_model_name',
'_version_name',
'_uri',
]
@apply_defaults
def __init__(self,
project_id,
job_id,
region,
data_format,
input_paths,
output_path,
model_name=None,
version_name=None,
uri=None,
max_worker_count=None,
runtime_version=None,
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
super(MLEngineBatchPredictionOperator, self).__init__(*args, **kwargs)
self._project_id = project_id
self._job_id = job_id
self._region = region
self._data_format = data_format
self._input_paths = input_paths
self._output_path = output_path
self._model_name = model_name
self._version_name = version_name
self._uri = uri
self._max_worker_count = max_worker_count
self._runtime_version = runtime_version
self._gcp_conn_id = gcp_conn_id
self._delegate_to = delegate_to
if not self._project_id:
raise AirflowException('Google Cloud project id is required.')
if not self._job_id:
raise AirflowException(
'An unique job id is required for Google MLEngine prediction '
'job.')
if self._uri:
if self._model_name or self._version_name:
raise AirflowException('Ambiguous model origin: Both uri and '
'model/version name are provided.')
if self._version_name and not self._model_name:
raise AirflowException(
'Missing model: Batch prediction expects '
'a model name when a version name is provided.')
if not (self._uri or self._model_name):
raise AirflowException(
'Missing model origin: Batch prediction expects a model, '
'a model & version combination, or a URI to a savedModel.')
def execute(self, context):
job_id = _normalize_mlengine_job_id(self._job_id)
prediction_request = {
'jobId': job_id,
'predictionInput': {
'dataFormat': self._data_format,
'inputPaths': self._input_paths,
'outputPath': self._output_path,
'region': self._region
}
}
if self._uri:
prediction_request['predictionInput']['uri'] = self._uri
elif self._model_name:
origin_name = 'projects/{}/models/{}'.format(
self._project_id, self._model_name)
if not self._version_name:
prediction_request['predictionInput'][
'modelName'] = origin_name
else:
prediction_request['predictionInput']['versionName'] = \
origin_name + '/versions/{}'.format(self._version_name)
if self._max_worker_count:
prediction_request['predictionInput'][
'maxWorkerCount'] = self._max_worker_count
if self._runtime_version:
prediction_request['predictionInput'][
'runtimeVersion'] = self._runtime_version
hook = MLEngineHook(self._gcp_conn_id, self._delegate_to)
# Helper method to check if the existing job's prediction input is the
# same as the request we get here.
def check_existing_job(existing_job):
return existing_job.get('predictionInput', None) == \
prediction_request['predictionInput']
try:
finished_prediction_job = hook.create_job(
self._project_id, prediction_request, check_existing_job)
except HttpError:
raise
if finished_prediction_job['state'] != 'SUCCEEDED':
self.log.error(
'MLEngine batch prediction job failed: %s', str(finished_prediction_job)
)
raise RuntimeError(finished_prediction_job['errorMessage'])
return finished_prediction_job['predictionOutput']
class MLEngineModelOperator(BaseOperator):
"""
Operator for managing a Google Cloud ML Engine model.
:param project_id: The Google Cloud project name to which MLEngine
model belongs. (templated)
:type project_id: str
:param model: A dictionary containing the information about the model.
If the `operation` is `create`, then the `model` parameter should
contain all the information about this model such as `name`.
If the `operation` is `get`, the `model` parameter
should contain the `name` of the model.
:type model: dict
:param operation: The operation to perform. Available operations are:
* ``create``: Creates a new model as provided by the `model` parameter.
* ``get``: Gets a particular model where the name is specified in `model`.
:type operation: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = [
'_model',
]
@apply_defaults
def __init__(self,
project_id,
model,
operation='create',
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
super(MLEngineModelOperator, self).__init__(*args, **kwargs)
self._project_id = project_id
self._model = model
self._operation = operation
self._gcp_conn_id = gcp_conn_id
self._delegate_to = delegate_to
def execute(self, context):
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id, delegate_to=self._delegate_to)
if self._operation == 'create':
return hook.create_model(self._project_id, self._model)
elif self._operation == 'get':
return hook.get_model(self._project_id, self._model['name'])
else:
raise ValueError('Unknown operation: {}'.format(self._operation))
class MLEngineVersionOperator(BaseOperator):
"""
Operator for managing a Google Cloud ML Engine version.
:param project_id: The Google Cloud project name to which MLEngine
model belongs.
:type project_id: str
:param model_name: The name of the Google Cloud ML Engine model that the version
belongs to. (templated)
:type model_name: str
:param version_name: A name to use for the version being operated upon.
If not None and the `version` argument is None or does not have a value for
the `name` key, then this will be populated in the payload for the
`name` key. (templated)
:type version_name: str
:param version: A dictionary containing the information about the version.
If the `operation` is `create`, `version` should contain all the
information about this version such as name, and deploymentUrl.
If the `operation` is `get` or `delete`, the `version` parameter
should contain the `name` of the version.
If it is None, the only `operation` possible would be `list`. (templated)
:type version: dict
:param operation: The operation to perform. Available operations are:
* ``create``: Creates a new version in the model specified by `model_name`,
in which case the `version` parameter should contain all the
information to create that version
(e.g. `name`, `deploymentUrl`).
* ``get``: Gets full information of a particular version in the model
specified by `model_name`.
The name of the version should be specified in the `version`
parameter.
* ``list``: Lists all available versions of the model specified
by `model_name`.
* ``delete``: Deletes the version specified in `version` parameter from the
model specified by `model_name`).
The name of the version should be specified in the `version`
parameter.
:type operation: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = [
'_model_name',
'_version_name',
'_version',
]
@apply_defaults
def __init__(self,
project_id,
model_name,
version_name=None,
version=None,
operation='create',
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
super(MLEngineVersionOperator, self).__init__(*args, **kwargs)
self._project_id = project_id
self._model_name = model_name
self._version_name = version_name
self._version = version or {}
self._operation = operation
self._gcp_conn_id = gcp_conn_id
self._delegate_to = delegate_to
def execute(self, context):
if 'name' not in self._version:
self._version['name'] = self._version_name
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id, delegate_to=self._delegate_to)
if self._operation == 'create':
if not self._version:
raise ValueError("version attribute of {} could not "
"be empty".format(self.__class__.__name__))
return hook.create_version(self._project_id, self._model_name,
self._version)
elif self._operation == 'set_default':
return hook.set_default_version(self._project_id, self._model_name,
self._version['name'])
elif self._operation == 'list':
return hook.list_versions(self._project_id, self._model_name)
elif self._operation == 'delete':
return hook.delete_version(self._project_id, self._model_name,
self._version['name'])
else:
raise ValueError('Unknown operation: {}'.format(self._operation))
class MLEngineTrainingOperator(BaseOperator):
"""
Operator for launching a MLEngine training job.
:param project_id: The Google Cloud project name within which MLEngine
training job should run (templated).
:type project_id: str
:param job_id: A unique templated id for the submitted Google MLEngine
training job. (templated)
:type job_id: str
:param package_uris: A list of package locations for MLEngine training job,
which should include the main training program + any additional
dependencies. (templated)
:type package_uris: str
:param training_python_module: The Python module name to run within MLEngine
training job after installing 'package_uris' packages. (templated)
:type training_python_module: str
:param training_args: A list of templated command line arguments to pass to
the MLEngine training program. (templated)
:type training_args: str
:param region: The Google Compute Engine region to run the MLEngine training
job in (templated).
:type region: str
:param scale_tier: Resource tier for MLEngine training job. (templated)
:type scale_tier: str
:param master_type: Cloud ML Engine machine name.
Must be set when scale_tier is CUSTOM. (templated)
:type master_type: str
:param runtime_version: The Google Cloud ML runtime version to use for
training. (templated)
:type runtime_version: str
:param python_version: The version of Python used in training. (templated)
:type python_version: str
:param job_dir: A Google Cloud Storage path in which to store training
outputs and other data needed for training. (templated)
:type job_dir: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param mode: Can be one of 'DRY_RUN'/'CLOUD'. In 'DRY_RUN' mode, no real
training job will be launched, but the MLEngine training job request
will be printed out. In 'CLOUD' mode, a real MLEngine training job
creation request will be issued.
:type mode: str
"""
template_fields = [
'_project_id',
'_job_id',
'_package_uris',
'_training_python_module',
'_training_args',
'_region',
'_scale_tier',
'_master_type',
'_runtime_version',
'_python_version',
'_job_dir'
]
@apply_defaults
def __init__(self,
project_id,
job_id,
package_uris,
training_python_module,
training_args,
region,
scale_tier=None,
master_type=None,
runtime_version=None,
python_version=None,
job_dir=None,
gcp_conn_id='google_cloud_default',
delegate_to=None,
mode='PRODUCTION',
*args,
**kwargs):
super(MLEngineTrainingOperator, self).__init__(*args, **kwargs)
self._project_id = project_id
self._job_id = job_id
self._package_uris = package_uris
self._training_python_module = training_python_module
self._training_args = training_args
self._region = region
self._scale_tier = scale_tier
self._master_type = master_type
self._runtime_version = runtime_version
self._python_version = python_version
self._job_dir = job_dir
self._gcp_conn_id = gcp_conn_id
self._delegate_to = delegate_to
self._mode = mode
if not self._project_id:
raise AirflowException('Google Cloud project id is required.')
if not self._job_id:
raise AirflowException(
'An unique job id is required for Google MLEngine training '
'job.')
if not package_uris:
raise AirflowException(
'At least one python package is required for MLEngine '
'Training job.')
if not training_python_module:
raise AirflowException(
'Python module name to run after installing required '
'packages is required.')
if not self._region:
raise AirflowException('Google Compute Engine region is required.')
if self._scale_tier is not None and self._scale_tier.upper() == "CUSTOM" and not self._master_type:
raise AirflowException(
'master_type must be set when scale_tier is CUSTOM')
def execute(self, context):
job_id = _normalize_mlengine_job_id(self._job_id)
training_request = {
'jobId': job_id,
'trainingInput': {
'scaleTier': self._scale_tier,
'packageUris': self._package_uris,
'pythonModule': self._training_python_module,
'region': self._region,
'args': self._training_args,
}
}
if self._runtime_version:
training_request['trainingInput']['runtimeVersion'] = self._runtime_version
if self._python_version:
training_request['trainingInput']['pythonVersion'] = self._python_version
if self._job_dir:
training_request['trainingInput']['jobDir'] = self._job_dir
if self._scale_tier is not None and self._scale_tier.upper() == "CUSTOM":
training_request['trainingInput']['masterType'] = self._master_type
if self._mode == 'DRY_RUN':
self.log.info('In dry_run mode.')
self.log.info('MLEngine Training job request is: %s', training_request)
return
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id, delegate_to=self._delegate_to)
# Helper method to check if the existing job's training input is the
# same as the request we get here.
def check_existing_job(existing_job):
return existing_job.get('trainingInput', None) == \
training_request['trainingInput']
try:
finished_training_job = hook.create_job(
self._project_id, training_request, check_existing_job)
except HttpError:
raise
if finished_training_job['state'] != 'SUCCEEDED':
self.log.error('MLEngine training job failed: %s', str(finished_training_job))
raise RuntimeError(finished_training_job['errorMessage'])
| apache-2.0 |
PanDAWMS/autopyfactory | autopyfactory/plugins/queue/sched/StatusTest.py | 1 | 1879 | #! /usr/bin/env python
#
from autopyfactory.interfaces import SchedInterface
import logging
class StatusTest(SchedInterface):
id = 'statustest'
def __init__(self, apfqueue, config, section):
try:
self.apfqueue = apfqueue
self.log = logging.getLogger('autopyfactory.sched.%s' %apfqueue.apfqname)
self.pilots_in_test_mode = self.apfqueue.qcl.generic_get(self.apfqueue.apfqname, 'sched.statustest.pilots', 'getint', default_value=0)
self.log.debug("SchedPlugin: Object initialized.")
except Exception as ex:
self.log.error("SchedPlugin object initialization failed. Raising exception")
raise ex
def calcSubmitNum(self, n=0):
self.log.debug('Starting.')
self.wmsqueueinfo = self.apfqueue.wmsstatus_plugin.getInfo(
queue=self.apfqueue.wmsqueue)
self.siteinfo = self.apfqueue.wmsstatus_plugin.getSiteInfo(
site=self.apfqueue.wmsqueue)
self.batchinfo = self.apfqueue.batchstatus_plugin.getInfo(
queue=self.apfqueue.apfqname)
if self.wmsqueueinfo is None or self.batchinfo is None or self.siteinfo is None:
self.log.warning("wmsinfo, batchinfo, or siteinfo is None!")
out = 0
msg = "StatusTest:comment=no wms/batch/siteinfo,in=%s,ret=0" %n
else:
sitestatus = self.siteinfo.status
self.log.debug('site status is %s' %sitestatus)
out = n
if sitestatus == 'test':
out = self.pilots_in_test_mode
msg='StatusTest:comment=test,in=%d,out=%d' % ( n, self.pilots_in_test_mode )
else:
msg='StatusTest:comment=not test,in=%s,ret=%s' % (n, out)
return (out, msg)
| apache-2.0 |
thresholdsoftware/asylum-v2.0 | openerp/addons/hr_attendance/report/__init__.py | 68 | 1115 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import attendance_errors
import attendance_by_month
import timesheet
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
BD823/pcover | project_version1/booklist.py | 1 | 3959 | # -*- coding: ms949 -*-
import re
import json
#text file read
with open('GutindexAll.txt','r') as gutinfile:
data=gutinfile.read()
data=unicode(data, errors='replace')
result = []
isStart = False
for line in data.splitlines():
#if 'by': #line¿¡ ÀúÀÚ°¡ ÀÖ´Â °æ¿ì¿Í ŸÀÌÆ²¸¸ Àִ°æ¿ì¸¦ ±¸ºÐÇØ¾ßÇÒµí
if isStart == False and 'ETEXT NO.' in line:
isStart = True
if isStart:
m = re.match(r"(?P<title>[\w].*)\,\ by\ (?P<author>.*?)\s+(?P<number>\d{1,5})\s*$", line)
if m:
#print(m.groups())
result.append(m.groups())
else: #[LJH]: []ÆÐÅÏ Ãß°¡, ÀúÀÚ°¡ ´ÙÀ½ÁÙ¿¡ ÀÌ¾î¼ ³ª¿À´Â °æ¿ì(ÀúÀڸǵڿ¡,ºÙÀ½)#pass #print('skipping: ' + line)
if line and line[0] == ' ':
result[len(result) - 1] = list(result[len(result) - 1])
if '[' in line or ']' in line or '(' in line or ')' in line:
continue
elif 'by ' in line:
#pass print('skipping: ' + line)
#print("there is 'by'[1] begin--------- that means ");
s = line.split("by ")
result[len(result) - 1][0] = result[len(result) - 1][0] + s[0]
result[len(result) - 1][1] = s[1]
#print(line);
#print("there is 'by'[1] end---------");
else: #[LJH]: 1. ÀúÀÚ°¡ ÀÌ¾î¼ ³ª¿À´Â °æ¿ì ºÙÀ̱â. 2. []¿¹¿Ü ÄÉÀ̽º »ðÀÔ.
#Yachting Vol. 2, by R. T. Pritchett, Marquis Durrerin, 41973
# Ava and James McFerran, C. L. Blake and T. B. Middleton
#print("there is no 'by'[2] begin--------- that means Giuseppe Bressani, Mary White Rowlandson and Mercy Harbison");
result[len(result) - 1][1] = result[len(result) - 1][1] + line
#print("there is no 'by'[2] end---------");
result[len(result) - 1] = tuple(result[len(result) - 1])
else:
#[LJH]: Trial of Jesus from a Lawyer's Standpoint, Vol. I (of II), 40966
m = re.match(r"(?P<title>[\w].*)\,(?P<author>\s+)(?P<number>\d{1,5})\s*$", line) # ÀúÀںκÐÀ» °ø¹éÀ¸·Î ä¿ò
if m:
#print("there is m2 begin-------");
#print(line)
result.append(m.groups())
#print("there is m2 end---------");
#else: #ÇÊ¿ä¾øÀ»µí?
#print("else[2] begin-------");
#print(line);
# print("else[2] end-------");
#[LJH]: ŸÀÌÆ², ÀúÀÚ¿¡ ´ëÇØ¼ ,¿Í ' 'À» Á¦°ÅÇØ¾ßÇÔ
result_len = len(result)
while result_len > 0:
result[result_len - 1] = list(result[result_len - 1])
title_len = len(result[result_len-1][0])
author_len = len(result[result_len-1][1])
while title_len > 0:
#if temp_title[title_len-1] == ' ' or temp_title[title_len-1] == ',':
if result[result_len-1][0][title_len-1] == ' ' or result[result_len-1][0][title_len-1] == ',':
result[result_len-1][0]=result[result_len-1][0][:-1]
title_len -= 1
else:
break
while author_len > 0:
#if temp_author[author_len-1] == ' ' or temp_author[author_len-1] == ',':
if result[result_len-1][1][author_len-1] == ' ' or result[result_len-1][1][author_len-1] == ',':
result[result_len-1][1]=result[result_len-1][1][:-1]
author_len -= 1
else:
break
result[result_len - 1] = tuple(result[result_len - 1])
result_len -= 1
# JSON ÆÄÀÏ·Î ÀúÀå, (ŸÀÌÆ², ÀúÀÚ, ¹øÈ£¿¡ ´ëÇØ¼)
jsondic = { "title": [x[0] for x in result],
"author": [x[1] for x in result],
"number": [x[2] for x in result]}
j = json.dumps(jsondic,indent=4)
f = open('list.json','w')
print >> f, j
f.close()
#for s in result:
# print(s)
# print(type(s))
print(len(result))
| mit |
catapult-project/catapult | third_party/google-auth/google/auth/transport/_http_client.py | 12 | 3750 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transport adapter for http.client, for internal use only."""
import logging
import socket
import six
from six.moves import http_client
from six.moves import urllib
from google.auth import exceptions
from google.auth import transport
_LOGGER = logging.getLogger(__name__)
class Response(transport.Response):
"""http.client transport response adapter.
Args:
response (http.client.HTTPResponse): The raw http client response.
"""
def __init__(self, response):
self._status = response.status
self._headers = {
key.lower(): value for key, value in response.getheaders()}
self._data = response.read()
@property
def status(self):
return self._status
@property
def headers(self):
return self._headers
@property
def data(self):
return self._data
class Request(transport.Request):
"""http.client transport request adapter."""
def __call__(self, url, method='GET', body=None, headers=None,
timeout=None, **kwargs):
"""Make an HTTP request using http.client.
Args:
url (str): The URI to be requested.
method (str): The HTTP method to use for the request. Defaults
to 'GET'.
body (bytes): The payload / body in HTTP request.
headers (Mapping): Request headers.
timeout (Optional(int)): The number of seconds to wait for a
response from the server. If not specified or if None, the
socket global default timeout will be used.
kwargs: Additional arguments passed throught to the underlying
:meth:`~http.client.HTTPConnection.request` method.
Returns:
Response: The HTTP response.
Raises:
google.auth.exceptions.TransportError: If any exception occurred.
"""
# socket._GLOBAL_DEFAULT_TIMEOUT is the default in http.client.
if timeout is None:
timeout = socket._GLOBAL_DEFAULT_TIMEOUT
# http.client doesn't allow None as the headers argument.
if headers is None:
headers = {}
# http.client needs the host and path parts specified separately.
parts = urllib.parse.urlsplit(url)
path = urllib.parse.urlunsplit(
('', '', parts.path, parts.query, parts.fragment))
if parts.scheme != 'http':
raise exceptions.TransportError(
'http.client transport only supports the http scheme, {}'
'was specified'.format(parts.scheme))
connection = http_client.HTTPConnection(parts.netloc, timeout=timeout)
try:
_LOGGER.debug('Making request: %s %s', method, url)
connection.request(
method, path, body=body, headers=headers, **kwargs)
response = connection.getresponse()
return Response(response)
except (http_client.HTTPException, socket.error) as caught_exc:
new_exc = exceptions.TransportError(caught_exc)
six.raise_from(new_exc, caught_exc)
finally:
connection.close()
| bsd-3-clause |
RockySteveJobs/python-for-android | python3-alpha/extra_modules/gdata/alt/app_engine.py | 127 | 3376 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides functions to persist serialized auth tokens in the datastore.
The get_token and set_token functions should be used in conjunction with
gdata.gauth's token_from_blob and token_to_blob to allow auth token objects
to be reused across requests. It is up to your own code to ensure that the
token key's are unique.
"""
__author__ = 'j.s@google.com (Jeff Scudder)'
from google.appengine.ext import db
from google.appengine.api import memcache
class Token(db.Model):
"""Datastore Model which stores a serialized auth token."""
t = db.BlobProperty()
def get_token(unique_key):
"""Searches for a stored token with the desired key.
Checks memcache and then the datastore if required.
Args:
unique_key: str which uniquely identifies the desired auth token.
Returns:
A string encoding the auth token data. Use gdata.gauth.token_from_blob to
convert back into a usable token object. None if the token was not found
in memcache or the datastore.
"""
token_string = memcache.get(unique_key)
if token_string is None:
# The token wasn't in memcache, so look in the datastore.
token = Token.get_by_key_name(unique_key)
if token is None:
return None
return token.t
return token_string
def set_token(unique_key, token_str):
"""Saves the serialized auth token in the datastore.
The token is also stored in memcache to speed up retrieval on a cache hit.
Args:
unique_key: The unique name for this token as a string. It is up to your
code to ensure that this token value is unique in your application.
Previous values will be silently overwitten.
token_str: A serialized auth token as a string. I expect that this string
will be generated by gdata.gauth.token_to_blob.
Returns:
True if the token was stored sucessfully, False if the token could not be
safely cached (if an old value could not be cleared). If the token was
set in memcache, but not in the datastore, this function will return None.
However, in that situation an exception will likely be raised.
Raises:
Datastore exceptions may be raised from the App Engine SDK in the event of
failure.
"""
# First try to save in memcache.
result = memcache.set(unique_key, token_str)
# If memcache fails to save the value, clear the cached value.
if not result:
result = memcache.delete(unique_key)
# If we could not clear the cached value for this token, refuse to save.
if result == 0:
return False
# Save to the datastore.
if Token(key_name=unique_key, t=token_str).put():
return True
return None
def delete_token(unique_key):
# Clear from memcache.
memcache.delete(unique_key)
# Clear from the datastore.
Token(key_name=unique_key).delete()
| apache-2.0 |
ujjvala-addsol/addsol_hr | openerp/addons/account/wizard/account_statement_from_invoice.py | 106 | 3626 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_statement_from_invoice_lines(osv.osv_memory):
"""
Generate Entries by Statement from Invoices
"""
_name = "account.statement.from.invoice.lines"
_description = "Entries by Statement from Invoices"
_columns = {
'line_ids': fields.many2many('account.move.line', 'account_move_line_relation', 'move_id', 'line_id', 'Invoices'),
}
def populate_statement(self, cr, uid, ids, context=None):
context = dict(context or {})
statement_id = context.get('statement_id', False)
if not statement_id:
return {'type': 'ir.actions.act_window_close'}
data = self.read(cr, uid, ids, context=context)[0]
line_ids = data['line_ids']
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
line_obj = self.pool.get('account.move.line')
statement_obj = self.pool.get('account.bank.statement')
statement_line_obj = self.pool.get('account.bank.statement.line')
currency_obj = self.pool.get('res.currency')
line_date = time.strftime('%Y-%m-%d')
statement = statement_obj.browse(cr, uid, statement_id, context=context)
# for each selected move lines
for line in line_obj.browse(cr, uid, line_ids, context=context):
ctx = context.copy()
# take the date for computation of currency => use payment date
ctx['date'] = line_date
amount = 0.0
if line.debit > 0:
amount = line.debit
elif line.credit > 0:
amount = -line.credit
if line.amount_currency:
amount = currency_obj.compute(cr, uid, line.currency_id.id,
statement.currency.id, line.amount_currency, context=ctx)
elif (line.invoice and line.invoice.currency_id.id != statement.currency.id):
amount = currency_obj.compute(cr, uid, line.invoice.currency_id.id,
statement.currency.id, amount, context=ctx)
context.update({'move_line_ids': [line.id],
'invoice_id': line.invoice.id})
statement_line_obj.create(cr, uid, {
'name': line.name or '?',
'amount': amount,
'partner_id': line.partner_id.id,
'statement_id': statement_id,
'ref': line.ref,
'date': statement.date,
}, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lbartoletti/QGIS | tests/src/python/test_qgscoordinateformatter.py | 36 | 32619 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsCoordinateFormatter.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '25/07/2014'
__copyright__ = 'Copyright 2015, The QGIS Project'
import qgis
from qgis.testing import unittest
from qgis.core import QgsCoordinateFormatter, QgsPointXY
class TestQgsCoordinateFormatter(unittest.TestCase):
def testFormatXPair(self):
"""Test formatting x as pair"""
self.assertEqual(QgsCoordinateFormatter.formatX(20, QgsCoordinateFormatter.FormatPair, 0), '20')
self.assertEqual(QgsCoordinateFormatter.formatX(-20, QgsCoordinateFormatter.FormatPair, 0), '-20')
self.assertEqual(QgsCoordinateFormatter.formatX(20.11111111111111111, QgsCoordinateFormatter.FormatPair, 3), '20.111')
self.assertEqual(QgsCoordinateFormatter.formatX(20.11161111111111111, QgsCoordinateFormatter.FormatPair, 3), '20.112')
self.assertEqual(QgsCoordinateFormatter.formatX(20, QgsCoordinateFormatter.FormatPair, 3), '20.000')
self.assertEqual(QgsCoordinateFormatter.formatX(float('inf'), QgsCoordinateFormatter.FormatPair, 3), 'infinite')
def testFormatYPair(self):
"""Test formatting y as pair"""
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatPair, 0), '20')
self.assertEqual(QgsCoordinateFormatter.formatY(-20, QgsCoordinateFormatter.FormatPair, 0), '-20')
self.assertEqual(QgsCoordinateFormatter.formatY(20.11111111111111111, QgsCoordinateFormatter.FormatPair, 3), '20.111')
self.assertEqual(QgsCoordinateFormatter.formatY(20.11161111111111111, QgsCoordinateFormatter.FormatPair, 3), '20.112')
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatPair, 3), '20.000')
self.assertEqual(QgsCoordinateFormatter.formatY(float('inf'), QgsCoordinateFormatter.FormatPair, 3), 'infinite')
def testAsPair(self):
"""Test formatting x/y as pair"""
self.assertEqual(QgsCoordinateFormatter.asPair(20, 30, 0), '20,30')
self.assertEqual(QgsCoordinateFormatter.asPair(20, -30, 0), '20,-30')
self.assertEqual(QgsCoordinateFormatter.asPair(20.111, 10.999, 0), '20,11')
self.assertEqual(QgsCoordinateFormatter.asPair(20.111, 10.999, 2), '20.11,11.00')
self.assertEqual(QgsCoordinateFormatter.asPair(20, 10, 2), '20.00,10.00')
self.assertEqual(QgsCoordinateFormatter.asPair(20, -10, 2), '20.00,-10.00')
def testFormat(self):
self.assertEqual(QgsCoordinateFormatter.format(QgsPointXY(20.1, 30.2), QgsCoordinateFormatter.FormatPair, 0), '20,30')
self.assertEqual(QgsCoordinateFormatter.format(QgsPointXY(20.1, 30.2), QgsCoordinateFormatter.FormatPair, 1), '20.1,30.2')
self.assertEqual(QgsCoordinateFormatter.format(QgsPointXY(20, 30), QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 0), '20°0′0″E,30°0′0″N')
def testFormatXFormatDegreesMinutesSeconds(self):
"""Test formatting x as DMS"""
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"80°0′0.00″E")
# check precision
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 4), u"80°0′0.0000″E")
self.assertEqual(QgsCoordinateFormatter.formatX(80.12345678, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 4), u"80°7′24.4444″E")
self.assertEqual(QgsCoordinateFormatter.formatX(80.12345678, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 0), u"80°7′24″E")
# check if longitudes > 180 or <-180 wrap around
self.assertEqual(QgsCoordinateFormatter.formatX(370, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"10°0′0.00″E")
self.assertEqual(QgsCoordinateFormatter.formatX(-370, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"10°0′0.00″W")
self.assertEqual(QgsCoordinateFormatter.formatX(181, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"179°0′0.00″W")
self.assertEqual(QgsCoordinateFormatter.formatX(-181, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"179°0′0.00″E")
self.assertEqual(QgsCoordinateFormatter.formatX(359, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"1°0′0.00″W")
self.assertEqual(QgsCoordinateFormatter.formatX(-359, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"1°0′0.00″E")
# should be no directional suffixes for 0 degree coordinates
self.assertEqual(QgsCoordinateFormatter.formatX(0, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"0°0′0.00″")
# should also be no directional suffix for 0 degree coordinates within specified precision
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"0°0′0.00″")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5), u"0°0′0.00360″W")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"0°0′0.00″")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5), u"0°0′0.00360″E")
# should be no directional suffixes for 180 degree longitudes
self.assertEqual(QgsCoordinateFormatter.formatX(180, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"180°0′0.00″")
self.assertEqual(QgsCoordinateFormatter.formatX(179.999999, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"180°0′0.00″")
self.assertEqual(QgsCoordinateFormatter.formatX(179.999999, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5), u"179°59′59.99640″E")
self.assertEqual(QgsCoordinateFormatter.formatX(180.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"180°0′0.00″")
self.assertEqual(QgsCoordinateFormatter.formatX(180.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5), u"179°59′59.99640″W")
# test rounding does not create seconds >= 60
self.assertEqual(QgsCoordinateFormatter.formatX(99.999999, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"100°0′0.00″E")
self.assertEqual(QgsCoordinateFormatter.formatX(89.999999, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"90°0′0.00″E")
# test without direction suffix
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, QgsCoordinateFormatter.FormatFlags()), u"80°0′0.00″")
# test 0 longitude
self.assertEqual(QgsCoordinateFormatter.formatX(0, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0′0.00″")
# test near zero longitude
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0′0.00″")
# should be no "-" prefix for near-zero longitude when rounding to 2 decimal places
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0′0.00″")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5, QgsCoordinateFormatter.FormatFlags()), u"0°0′0.00360″")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5, QgsCoordinateFormatter.FormatFlags()), u"-0°0′0.00360″")
# test with padding
padding_and_suffix = QgsCoordinateFormatter.FormatFlags(QgsCoordinateFormatter.FlagDegreesPadMinutesSeconds | QgsCoordinateFormatter.FlagDegreesUseStringSuffix)
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"80°00′00.00″E")
self.assertEqual(QgsCoordinateFormatter.formatX(85.44, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"85°26′24.00″E")
self.assertEqual(QgsCoordinateFormatter.formatX(0, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"0°00′00.00″")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"0°00′00.00″")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"0°00′00.00″")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5, padding_and_suffix), u"0°00′00.00360″W")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5, padding_and_suffix), u"0°00′00.00360″E")
def testFormatYFormatDegreesMinutesSeconds(self):
"""Test formatting y as DMS"""
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"20°0′0.00″N")
# check precision
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 4), u"20°0′0.0000″N")
self.assertEqual(QgsCoordinateFormatter.formatY(20.12345678, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 4), u"20°7′24.4444″N")
self.assertEqual(QgsCoordinateFormatter.formatY(20.12345678, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 0), u"20°7′24″N")
# check if latitudes > 90 or <-90 wrap around
self.assertEqual(QgsCoordinateFormatter.formatY(190, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"10°0′0.00″N")
self.assertEqual(QgsCoordinateFormatter.formatY(-190, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"10°0′0.00″S")
self.assertEqual(QgsCoordinateFormatter.formatY(91, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"89°0′0.00″S")
self.assertEqual(QgsCoordinateFormatter.formatY(-91, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"89°0′0.00″N")
self.assertEqual(QgsCoordinateFormatter.formatY(179, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"1°0′0.00″S")
self.assertEqual(QgsCoordinateFormatter.formatY(-179, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"1°0′0.00″N")
# should be no directional suffixes for 0 degree coordinates
self.assertEqual(QgsCoordinateFormatter.formatY(0, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"0°0′0.00″")
# should also be no directional suffix for 0 degree coordinates within specified precision
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"0°0′0.00″")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5), u"0°0′0.00360″N")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"0°0′0.00″")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5), u"0°0′0.00360″S")
# test rounding does not create seconds >= 60
self.assertEqual(QgsCoordinateFormatter.formatY(89.999999, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2), u"90°0′0.00″N")
# test without direction suffix
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, QgsCoordinateFormatter.FormatFlags()), u"20°0′0.00″")
# test 0 latitude
self.assertEqual(QgsCoordinateFormatter.formatY(0, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0′0.00″")
# test near zero lat/long
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0′0.00″")
# should be no "-" prefix for near-zero latitude when rounding to 2 decimal places
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0′0.00″")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5, QgsCoordinateFormatter.FormatFlags()), u"0°0′0.00360″")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5, QgsCoordinateFormatter.FormatFlags()), u"-0°0′0.00360″")
# test with padding
padding_and_suffix = QgsCoordinateFormatter.FormatFlags(QgsCoordinateFormatter.FlagDegreesPadMinutesSeconds | QgsCoordinateFormatter.FlagDegreesUseStringSuffix)
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"20°00′00.00″N")
self.assertEqual(QgsCoordinateFormatter.formatY(85.44, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"85°26′24.00″N")
self.assertEqual(QgsCoordinateFormatter.formatY(0, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"0°00′00.00″")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"0°00′00.00″")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 2, padding_and_suffix), u"0°00′00.00″")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5, padding_and_suffix), u"0°00′00.00360″S")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutesSeconds, 5, padding_and_suffix), u"0°00′00.00360″N")
def testFormatXDegreesMinutes(self):
"""Test formatting x as DM"""
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"80°0.00′E")
# check precision
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDegreesMinutes, 4), u"80°0.0000′E")
self.assertEqual(QgsCoordinateFormatter.formatX(80.12345678, QgsCoordinateFormatter.FormatDegreesMinutes, 4), u"80°7.4074′E")
self.assertEqual(QgsCoordinateFormatter.formatX(80.12345678, QgsCoordinateFormatter.FormatDegreesMinutes, 0), u"80°7′E")
# check if longitudes > 180 or <-180 wrap around
self.assertEqual(QgsCoordinateFormatter.formatX(370, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"10°0.00′E")
self.assertEqual(QgsCoordinateFormatter.formatX(-370, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"10°0.00′W")
self.assertEqual(QgsCoordinateFormatter.formatX(181, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"179°0.00′W")
self.assertEqual(QgsCoordinateFormatter.formatX(-181, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"179°0.00′E")
self.assertEqual(QgsCoordinateFormatter.formatX(359, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"1°0.00′W")
self.assertEqual(QgsCoordinateFormatter.formatX(-359, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"1°0.00′E")
# should be no directional suffixes for 0 degree coordinates
self.assertEqual(QgsCoordinateFormatter.formatX(0, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"0°0.00′")
# should also be no directional suffix for 0 degree coordinates within specified precision
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"0°0.00′")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"0°0.00′")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5), u"0°0.00006′W")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5), u"0°0.00006′E")
# test rounding does not create minutes >= 60
self.assertEqual(QgsCoordinateFormatter.formatX(99.999999, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"100°0.00′E")
# should be no directional suffixes for 180 degree longitudes
self.assertEqual(QgsCoordinateFormatter.formatX(180, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"180°0.00′")
# should also be no directional suffix for 180 degree longitudes within specified precision
self.assertEqual(QgsCoordinateFormatter.formatX(180.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"180°0.00′")
self.assertEqual(QgsCoordinateFormatter.formatX(179.999999, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"180°0.00′")
self.assertEqual(QgsCoordinateFormatter.formatX(180.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5), u"179°59.99994′W")
self.assertEqual(QgsCoordinateFormatter.formatX(179.999999, QgsCoordinateFormatter.FormatDegreesMinutes, 5), u"179°59.99994′E")
# test without direction suffix
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDegreesMinutes, 2, QgsCoordinateFormatter.FormatFlags()), u"80°0.00′")
# test 0 longitude
self.assertEqual(QgsCoordinateFormatter.formatX(0, QgsCoordinateFormatter.FormatDegreesMinutes, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0.00′")
# test near zero longitude
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0.00′")
# should be no "-" prefix for near-zero longitude when rounding to 2 decimal places
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0.00′")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5, QgsCoordinateFormatter.FormatFlags()), u"0°0.00006′")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5, QgsCoordinateFormatter.FormatFlags()), u"-0°0.00006′")
# test with padding
padding_and_suffix = QgsCoordinateFormatter.FormatFlags(QgsCoordinateFormatter.FlagDegreesPadMinutesSeconds | QgsCoordinateFormatter.FlagDegreesUseStringSuffix)
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDegreesMinutes, 2, padding_and_suffix), u"80°00.00′E")
self.assertEqual(QgsCoordinateFormatter.formatX(0, QgsCoordinateFormatter.FormatDegreesMinutes, 2, padding_and_suffix), u"0°00.00′")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2, padding_and_suffix), u"0°00.00′")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2, padding_and_suffix), u"0°00.00′")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5, padding_and_suffix), u"0°00.00006′W")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5, padding_and_suffix), u"0°00.00006′E")
def testFormatYDegreesMinutes(self):
"""Test formatting y as DM"""
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"20°0.00′N")
# check precision
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDegreesMinutes, 4), u"20°0.0000′N")
self.assertEqual(QgsCoordinateFormatter.formatY(20.12345678, QgsCoordinateFormatter.FormatDegreesMinutes, 4), u"20°7.4074′N")
self.assertEqual(QgsCoordinateFormatter.formatY(20.12345678, QgsCoordinateFormatter.FormatDegreesMinutes, 0), u"20°7′N")
# check if latitudes > 90 or <-90 wrap around
self.assertEqual(QgsCoordinateFormatter.formatY(190, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"10°0.00′N")
self.assertEqual(QgsCoordinateFormatter.formatY(-190, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"10°0.00′S")
self.assertEqual(QgsCoordinateFormatter.formatY(91, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"89°0.00′S")
self.assertEqual(QgsCoordinateFormatter.formatY(-91, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"89°0.00′N")
self.assertEqual(QgsCoordinateFormatter.formatY(179, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"1°0.00′S")
self.assertEqual(QgsCoordinateFormatter.formatY(-179, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"1°0.00′N")
# should be no directional suffixes for 0 degree coordinates
self.assertEqual(QgsCoordinateFormatter.formatY(0, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"0°0.00′")
# should also be no directional suffix for 0 degree coordinates within specified precision
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"0°0.00′")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"0°0.00′")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5), u"0°0.00006′S")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5), u"0°0.00006′N")
# test rounding does not create minutes >= 60
self.assertEqual(QgsCoordinateFormatter.formatY(79.999999, QgsCoordinateFormatter.FormatDegreesMinutes, 2), u"80°0.00′N")
# test without direction suffix
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDegreesMinutes, 2, QgsCoordinateFormatter.FormatFlags()), u"20°0.00′")
# test 0 latitude
self.assertEqual(QgsCoordinateFormatter.formatY(0, QgsCoordinateFormatter.FormatDegreesMinutes, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0.00′")
# test near zero latitude
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0.00′")
# should be no "-" prefix for near-zero latitude when rounding to 2 decimal places
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2, QgsCoordinateFormatter.FormatFlags()), u"0°0.00′")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5, QgsCoordinateFormatter.FormatFlags()), u"0°0.00006′")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5, QgsCoordinateFormatter.FormatFlags()), u"-0°0.00006′")
# test with padding
padding_and_suffix = QgsCoordinateFormatter.FormatFlags(QgsCoordinateFormatter.FlagDegreesPadMinutesSeconds | QgsCoordinateFormatter.FlagDegreesUseStringSuffix)
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDegreesMinutes, 2, padding_and_suffix), u"20°00.00′N")
self.assertEqual(QgsCoordinateFormatter.formatY(0, QgsCoordinateFormatter.FormatDegreesMinutes, 2, padding_and_suffix), u"0°00.00′")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2, padding_and_suffix), u"0°00.00′")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 2, padding_and_suffix), u"0°00.00′")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5, padding_and_suffix), u"0°00.00006′S")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDegreesMinutes, 5, padding_and_suffix), u"0°00.00006′N")
def testFormatXDegrees(self):
"""Test formatting x as decimal degrees"""
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"80.00°E")
# check precision
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDecimalDegrees, 4), u"80.0000°E")
self.assertEqual(QgsCoordinateFormatter.formatX(80.12345678, QgsCoordinateFormatter.FormatDecimalDegrees, 4), u"80.1235°E")
self.assertEqual(QgsCoordinateFormatter.formatX(80.12345678, QgsCoordinateFormatter.FormatDecimalDegrees, 0), u"80°E")
# check if longitudes > 180 or <-180 wrap around
self.assertEqual(QgsCoordinateFormatter.formatX(370, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"10.00°E")
self.assertEqual(QgsCoordinateFormatter.formatX(-370, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"10.00°W")
self.assertEqual(QgsCoordinateFormatter.formatX(181, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"179.00°W")
self.assertEqual(QgsCoordinateFormatter.formatX(-181, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"179.00°E")
self.assertEqual(QgsCoordinateFormatter.formatX(359, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"1.00°W")
self.assertEqual(QgsCoordinateFormatter.formatX(-359, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"1.00°E")
# should be no directional suffixes for 0 degree coordinates
self.assertEqual(QgsCoordinateFormatter.formatX(0, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"0.00°")
# should also be no directional suffix for 0 degree coordinates within specified precision
self.assertEqual(QgsCoordinateFormatter.formatX(-0.00001, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"0.00°")
self.assertEqual(QgsCoordinateFormatter.formatX(0.00001, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"0.00°")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.00001, QgsCoordinateFormatter.FormatDecimalDegrees, 5), u"0.00001°W")
self.assertEqual(QgsCoordinateFormatter.formatX(0.00001, QgsCoordinateFormatter.FormatDecimalDegrees, 5), u"0.00001°E")
# should be no directional suffixes for 180 degree longitudes
self.assertEqual(QgsCoordinateFormatter.formatX(180, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"180.00°")
# should also be no directional suffix for 180 degree longitudes within specified precision
self.assertEqual(QgsCoordinateFormatter.formatX(180.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"180.00°")
self.assertEqual(QgsCoordinateFormatter.formatX(179.999999, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"180.00°")
self.assertEqual(QgsCoordinateFormatter.formatX(180.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 6), u"179.999999°W")
self.assertEqual(QgsCoordinateFormatter.formatX(179.999999, QgsCoordinateFormatter.FormatDecimalDegrees, 6), u"179.999999°E")
# test without direction suffix
self.assertEqual(QgsCoordinateFormatter.formatX(80, QgsCoordinateFormatter.FormatDecimalDegrees, 2, QgsCoordinateFormatter.FormatFlags()), u"80.00°")
# test 0 longitude
self.assertEqual(QgsCoordinateFormatter.formatX(0, QgsCoordinateFormatter.FormatDecimalDegrees, 2, QgsCoordinateFormatter.FormatFlags()), u"0.00°")
# test near zero longitude
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 2, QgsCoordinateFormatter.FormatFlags()), u"0.00°")
# should be no "-" prefix for near-zero longitude when rounding to 2 decimal places
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 2, QgsCoordinateFormatter.FormatFlags()), u"0.00°")
self.assertEqual(QgsCoordinateFormatter.formatX(0.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 6, QgsCoordinateFormatter.FormatFlags()), u"0.000001°")
self.assertEqual(QgsCoordinateFormatter.formatX(-0.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 6, QgsCoordinateFormatter.FormatFlags()), u"-0.000001°")
def testFormatYDegrees(self):
"""Test formatting y as decimal degrees"""
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"20.00°N")
# check precision
self.assertEqual(QgsCoordinateFormatter.formatY(20, QgsCoordinateFormatter.FormatDecimalDegrees, 4), u"20.0000°N")
self.assertEqual(QgsCoordinateFormatter.formatY(20.12345678, QgsCoordinateFormatter.FormatDecimalDegrees, 4), u"20.1235°N")
self.assertEqual(QgsCoordinateFormatter.formatY(20.12345678, QgsCoordinateFormatter.FormatDecimalDegrees, 0), u"20°N")
# check if latitudes > 90 or <-90 wrap around
self.assertEqual(QgsCoordinateFormatter.formatY(190, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"10.00°N")
self.assertEqual(QgsCoordinateFormatter.formatY(-190, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"10.00°S")
self.assertEqual(QgsCoordinateFormatter.formatY(91, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"89.00°S")
self.assertEqual(QgsCoordinateFormatter.formatY(-91, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"89.00°N")
self.assertEqual(QgsCoordinateFormatter.formatY(179, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"1.00°S")
self.assertEqual(QgsCoordinateFormatter.formatY(-179, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"1.00°N")
# should be no directional suffixes for 0 degree coordinates
self.assertEqual(QgsCoordinateFormatter.formatY(0, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"0.00°")
# should also be no directional suffix for 0 degree coordinates within specified precision
self.assertEqual(QgsCoordinateFormatter.formatY(-0.00001, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"0.00°")
self.assertEqual(QgsCoordinateFormatter.formatY(0.00001, QgsCoordinateFormatter.FormatDecimalDegrees, 2), u"0.00°")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.00001, QgsCoordinateFormatter.FormatDecimalDegrees, 5), u"0.00001°S")
self.assertEqual(QgsCoordinateFormatter.formatY(0.00001, QgsCoordinateFormatter.FormatDecimalDegrees, 5), u"0.00001°N")
# test without direction suffix
self.assertEqual(QgsCoordinateFormatter.formatY(80, QgsCoordinateFormatter.FormatDecimalDegrees, 2, QgsCoordinateFormatter.FormatFlags()), u"80.00°")
# test 0 longitude
self.assertEqual(QgsCoordinateFormatter.formatY(0, QgsCoordinateFormatter.FormatDecimalDegrees, 2, QgsCoordinateFormatter.FormatFlags()), u"0.00°")
# test near zero latitude
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 2, QgsCoordinateFormatter.FormatFlags()), u"0.00°")
# should be no "-" prefix for near-zero latitude when rounding to 2 decimal places
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 2, QgsCoordinateFormatter.FormatFlags()), u"0.00°")
self.assertEqual(QgsCoordinateFormatter.formatY(0.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 6, QgsCoordinateFormatter.FormatFlags()), u"0.000001°")
self.assertEqual(QgsCoordinateFormatter.formatY(-0.000001, QgsCoordinateFormatter.FormatDecimalDegrees, 6, QgsCoordinateFormatter.FormatFlags()), u"-0.000001°")
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
vrv/tensorflow | tensorflow/python/training/adam_test.py | 51 | 12346 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
def adam_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class AdamOptimizerTest(test.TestCase):
def doTestSparse(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
opt = adam.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSparse(self):
self.doTestSparse(use_resource=False)
def testResourceSparse(self):
self.doTestSparse(use_resource=True)
def testSparseDevicePlacement(self):
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.test_session(force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
gathered_sum = math_ops.reduce_sum(array_ops.gather(var, indices))
optimizer = adam.AdamOptimizer(3.0)
minimize_op = optimizer.minimize(gathered_sum)
variables.global_variables_initializer().run()
minimize_op.run()
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adam.AdamOptimizer().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adam.AdamOptimizer().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
def doTestBasic(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testBasic(self):
self.doTestBasic(use_resource=False)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.AdamOptimizer(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.AdamOptimizer()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
beta1_power, beta2_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testTwoSessions(self):
optimizer = adam.AdamOptimizer()
g = ops.Graph()
with g.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
gg = ops.Graph()
with gg.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
# If the optimizer saves any state not keyed by graph the following line
# fails.
optimizer.apply_gradients([(grads0, var0)])
if __name__ == "__main__":
test.main()
| apache-2.0 |
cherba/apitools | apitools/base/py/transfer.py | 3 | 41282 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Upload and download support for apitools."""
from __future__ import print_function
import email.generator as email_generator
import email.mime.multipart as mime_multipart
import email.mime.nonmultipart as mime_nonmultipart
import io
import json
import mimetypes
import os
import threading
import six
from six.moves import http_client
from apitools.base.py import buffered_stream
from apitools.base.py import exceptions
from apitools.base.py import http_wrapper
from apitools.base.py import stream_slice
from apitools.base.py import util
__all__ = [
'Download',
'Upload',
'RESUMABLE_UPLOAD',
'SIMPLE_UPLOAD',
'DownloadProgressPrinter',
'DownloadCompletePrinter',
'UploadProgressPrinter',
'UploadCompletePrinter',
]
_RESUMABLE_UPLOAD_THRESHOLD = 5 << 20
SIMPLE_UPLOAD = 'simple'
RESUMABLE_UPLOAD = 'resumable'
def DownloadProgressPrinter(response, unused_download):
"""Print download progress based on response."""
if 'content-range' in response.info:
print('Received %s' % response.info['content-range'])
else:
print('Received %d bytes' % response.length)
def DownloadCompletePrinter(unused_response, unused_download):
"""Print information about a completed download."""
print('Download complete')
def UploadProgressPrinter(response, unused_upload):
"""Print upload progress based on response."""
print('Sent %s' % response.info['range'])
def UploadCompletePrinter(unused_response, unused_upload):
"""Print information about a completed upload."""
print('Upload complete')
class _Transfer(object):
"""Generic bits common to Uploads and Downloads."""
def __init__(self, stream, close_stream=False, chunksize=None,
auto_transfer=True, http=None, num_retries=5):
self.__bytes_http = None
self.__close_stream = close_stream
self.__http = http
self.__stream = stream
self.__url = None
self.__num_retries = 5
# Let the @property do validation
self.num_retries = num_retries
self.retry_func = (
http_wrapper.HandleExceptionsAndRebuildHttpConnections)
self.auto_transfer = auto_transfer
self.chunksize = chunksize or 1048576
def __repr__(self):
return str(self)
@property
def close_stream(self):
return self.__close_stream
@property
def http(self):
return self.__http
@property
def bytes_http(self):
return self.__bytes_http or self.http
@bytes_http.setter
def bytes_http(self, value):
self.__bytes_http = value
@property
def num_retries(self):
return self.__num_retries
@num_retries.setter
def num_retries(self, value):
util.Typecheck(value, six.integer_types)
if value < 0:
raise exceptions.InvalidDataError(
'Cannot have negative value for num_retries')
self.__num_retries = value
@property
def stream(self):
return self.__stream
@property
def url(self):
return self.__url
def _Initialize(self, http, url):
"""Initialize this download by setting self.http and self.url.
We want the user to be able to override self.http by having set
the value in the constructor; in that case, we ignore the provided
http.
Args:
http: An httplib2.Http instance or None.
url: The url for this transfer.
Returns:
None. Initializes self.
"""
self.EnsureUninitialized()
if self.http is None:
self.__http = http or http_wrapper.GetHttp()
self.__url = url
@property
def initialized(self):
return self.url is not None and self.http is not None
@property
def _type_name(self):
return type(self).__name__
def EnsureInitialized(self):
if not self.initialized:
raise exceptions.TransferInvalidError(
'Cannot use uninitialized %s', self._type_name)
def EnsureUninitialized(self):
if self.initialized:
raise exceptions.TransferInvalidError(
'Cannot re-initialize %s', self._type_name)
def __del__(self):
if self.__close_stream:
self.__stream.close()
def _ExecuteCallback(self, callback, response):
# TODO(craigcitro): Push these into a queue.
if callback is not None:
threading.Thread(target=callback, args=(response, self)).start()
class Download(_Transfer):
"""Data for a single download.
Public attributes:
chunksize: default chunksize to use for transfers.
"""
_ACCEPTABLE_STATUSES = set((
http_client.OK,
http_client.NO_CONTENT,
http_client.PARTIAL_CONTENT,
http_client.REQUESTED_RANGE_NOT_SATISFIABLE,
))
_REQUIRED_SERIALIZATION_KEYS = set((
'auto_transfer', 'progress', 'total_size', 'url'))
def __init__(self, stream, progress_callback=None, finish_callback=None,
**kwds):
total_size = kwds.pop('total_size', None)
super(Download, self).__init__(stream, **kwds)
self.__initial_response = None
self.__progress = 0
self.__total_size = total_size
self.__encoding = None
self.progress_callback = progress_callback
self.finish_callback = finish_callback
@property
def progress(self):
return self.__progress
@property
def encoding(self):
return self.__encoding
@classmethod
def FromFile(cls, filename, overwrite=False, auto_transfer=True, **kwds):
"""Create a new download object from a filename."""
path = os.path.expanduser(filename)
if os.path.exists(path) and not overwrite:
raise exceptions.InvalidUserInputError(
'File %s exists and overwrite not specified' % path)
return cls(open(path, 'wb'), close_stream=True,
auto_transfer=auto_transfer, **kwds)
@classmethod
def FromStream(cls, stream, auto_transfer=True, total_size=None, **kwds):
"""Create a new Download object from a stream."""
return cls(stream, auto_transfer=auto_transfer, total_size=total_size,
**kwds)
@classmethod
def FromData(cls, stream, json_data, http=None, auto_transfer=None,
**kwds):
"""Create a new Download object from a stream and serialized data."""
info = json.loads(json_data)
missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys())
if missing_keys:
raise exceptions.InvalidDataError(
'Invalid serialization data, missing keys: %s' % (
', '.join(missing_keys)))
download = cls.FromStream(stream, **kwds)
if auto_transfer is not None:
download.auto_transfer = auto_transfer
else:
download.auto_transfer = info['auto_transfer']
setattr(download, '_Download__progress', info['progress'])
setattr(download, '_Download__total_size', info['total_size'])
download._Initialize( # pylint: disable=protected-access
http, info['url'])
return download
@property
def serialization_data(self):
self.EnsureInitialized()
return {
'auto_transfer': self.auto_transfer,
'progress': self.progress,
'total_size': self.total_size,
'url': self.url,
}
@property
def total_size(self):
return self.__total_size
def __str__(self):
if not self.initialized:
return 'Download (uninitialized)'
return 'Download with %d/%s bytes transferred from url %s' % (
self.progress, self.total_size, self.url)
def ConfigureRequest(self, http_request, url_builder):
url_builder.query_params['alt'] = 'media'
# TODO(craigcitro): We need to send range requests because by
# default httplib2 stores entire reponses in memory. Override
# httplib2's download method (as gsutil does) so that this is not
# necessary.
http_request.headers['Range'] = 'bytes=0-%d' % (self.chunksize - 1,)
def __SetTotal(self, info):
if 'content-range' in info:
_, _, total = info['content-range'].rpartition('/')
if total != '*':
self.__total_size = int(total)
# Note "total_size is None" means we don't know it; if no size
# info was returned on our initial range request, that means we
# have a 0-byte file. (That last statement has been verified
# empirically, but is not clearly documented anywhere.)
if self.total_size is None:
self.__total_size = 0
def InitializeDownload(self, http_request, http=None, client=None):
"""Initialize this download by making a request.
Args:
http_request: The HttpRequest to use to initialize this download.
http: The httplib2.Http instance for this request.
client: If provided, let this client process the final URL before
sending any additional requests. If client is provided and
http is not, client.http will be used instead.
"""
self.EnsureUninitialized()
if http is None and client is None:
raise exceptions.UserError('Must provide client or http.')
http = http or client.http
if client is not None:
http_request.url = client.FinalizeTransferUrl(http_request.url)
url = http_request.url
if self.auto_transfer:
end_byte = self.__ComputeEndByte(0)
self.__SetRangeHeader(http_request, 0, end_byte)
response = http_wrapper.MakeRequest(
self.bytes_http or http, http_request)
if response.status_code not in self._ACCEPTABLE_STATUSES:
raise exceptions.HttpError.FromResponse(response)
self.__initial_response = response
self.__SetTotal(response.info)
url = response.info.get('content-location', response.request_url)
if client is not None:
url = client.FinalizeTransferUrl(url)
self._Initialize(http, url)
# Unless the user has requested otherwise, we want to just
# go ahead and pump the bytes now.
if self.auto_transfer:
self.StreamInChunks()
def __NormalizeStartEnd(self, start, end=None):
if end is not None:
if start < 0:
raise exceptions.TransferInvalidError(
'Cannot have end index with negative start index')
elif start >= self.total_size:
raise exceptions.TransferInvalidError(
'Cannot have start index greater than total size')
end = min(end, self.total_size - 1)
if end < start:
raise exceptions.TransferInvalidError(
'Range requested with end[%s] < start[%s]' % (end, start))
return start, end
else:
if start < 0:
start = max(0, start + self.total_size)
return start, self.total_size - 1
def __SetRangeHeader(self, request, start, end=None):
if start < 0:
request.headers['range'] = 'bytes=%d' % start
elif end is None:
request.headers['range'] = 'bytes=%d-' % start
else:
request.headers['range'] = 'bytes=%d-%d' % (start, end)
def __ComputeEndByte(self, start, end=None, use_chunks=True):
"""Compute the last byte to fetch for this request.
This is all based on the HTTP spec for Range and
Content-Range.
Note that this is potentially confusing in several ways:
* the value for the last byte is 0-based, eg "fetch 10 bytes
from the beginning" would return 9 here.
* if we have no information about size, and don't want to
use the chunksize, we'll return None.
See the tests for more examples.
Args:
start: byte to start at.
end: (int or None, default: None) Suggested last byte.
use_chunks: (bool, default: True) If False, ignore self.chunksize.
Returns:
Last byte to use in a Range header, or None.
"""
end_byte = end
if start < 0 and not self.total_size:
return end_byte
if use_chunks:
alternate = start + self.chunksize - 1
if end_byte is not None:
end_byte = min(end_byte, alternate)
else:
end_byte = alternate
if self.total_size:
alternate = self.total_size - 1
if end_byte is not None:
end_byte = min(end_byte, alternate)
else:
end_byte = alternate
return end_byte
def __GetChunk(self, start, end, additional_headers=None):
"""Retrieve a chunk, and return the full response."""
self.EnsureInitialized()
request = http_wrapper.Request(url=self.url)
self.__SetRangeHeader(request, start, end=end)
if additional_headers is not None:
request.headers.update(additional_headers)
return http_wrapper.MakeRequest(
self.bytes_http, request, retry_func=self.retry_func,
retries=self.num_retries)
def __ProcessResponse(self, response):
"""Process response (by updating self and writing to self.stream)."""
if response.status_code not in self._ACCEPTABLE_STATUSES:
# We distinguish errors that mean we made a mistake in setting
# up the transfer versus something we should attempt again.
if response.status_code in (http_client.FORBIDDEN,
http_client.NOT_FOUND):
raise exceptions.HttpError.FromResponse(response)
else:
raise exceptions.TransferRetryError(response.content)
if response.status_code in (http_client.OK,
http_client.PARTIAL_CONTENT):
self.stream.write(response.content)
self.__progress += response.length
if response.info and 'content-encoding' in response.info:
# TODO(craigcitro): Handle the case where this changes over a
# download.
self.__encoding = response.info['content-encoding']
elif response.status_code == http_client.NO_CONTENT:
# It's important to write something to the stream for the case
# of a 0-byte download to a file, as otherwise python won't
# create the file.
self.stream.write('')
return response
def GetRange(self, start, end=None, additional_headers=None,
use_chunks=True):
"""Retrieve a given byte range from this download, inclusive.
Range must be of one of these three forms:
* 0 <= start, end = None: Fetch from start to the end of the file.
* 0 <= start <= end: Fetch the bytes from start to end.
* start < 0, end = None: Fetch the last -start bytes of the file.
(These variations correspond to those described in the HTTP 1.1
protocol for range headers in RFC 2616, sec. 14.35.1.)
Args:
start: (int) Where to start fetching bytes. (See above.)
end: (int, optional) Where to stop fetching bytes. (See above.)
additional_headers: (bool, optional) Any additional headers to
pass with the request.
use_chunks: (bool, default: True) If False, ignore self.chunksize
and fetch this range in a single request.
Returns:
None. Streams bytes into self.stream.
"""
self.EnsureInitialized()
progress_end_normalized = False
if self.total_size is not None:
progress, end_byte = self.__NormalizeStartEnd(start, end)
progress_end_normalized = True
else:
progress = start
end_byte = end
while (not progress_end_normalized or end_byte is None or
progress <= end_byte):
end_byte = self.__ComputeEndByte(progress, end=end_byte,
use_chunks=use_chunks)
response = self.__GetChunk(progress, end_byte,
additional_headers=additional_headers)
if not progress_end_normalized:
self.__SetTotal(response.info)
progress, end_byte = self.__NormalizeStartEnd(start, end)
progress_end_normalized = True
response = self.__ProcessResponse(response)
progress += response.length
if response.length == 0:
raise exceptions.TransferRetryError(
'Zero bytes unexpectedly returned in download response')
def StreamInChunks(self, callback=None, finish_callback=None,
additional_headers=None):
"""Stream the entire download in chunks."""
self.StreamMedia(callback=callback, finish_callback=finish_callback,
additional_headers=additional_headers,
use_chunks=True)
def StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None, use_chunks=True):
"""Stream the entire download.
Args:
callback: (default: None) Callback to call as each chunk is
completed.
finish_callback: (default: None) Callback to call when the
download is complete.
additional_headers: (default: None) Additional headers to
include in fetching bytes.
use_chunks: (bool, default: True) If False, ignore self.chunksize
and stream this download in a single request.
Returns:
None. Streams bytes into self.stream.
"""
callback = callback or self.progress_callback
finish_callback = finish_callback or self.finish_callback
self.EnsureInitialized()
while True:
if self.__initial_response is not None:
response = self.__initial_response
self.__initial_response = None
else:
end_byte = self.__ComputeEndByte(self.progress,
use_chunks=use_chunks)
response = self.__GetChunk(
self.progress, end_byte,
additional_headers=additional_headers)
if self.total_size is None:
self.__SetTotal(response.info)
response = self.__ProcessResponse(response)
self._ExecuteCallback(callback, response)
if (response.status_code == http_client.OK or
self.progress >= self.total_size):
break
self._ExecuteCallback(finish_callback, response)
class Upload(_Transfer):
"""Data for a single Upload.
Fields:
stream: The stream to upload.
mime_type: MIME type of the upload.
total_size: (optional) Total upload size for the stream.
close_stream: (default: False) Whether or not we should close the
stream when finished with the upload.
auto_transfer: (default: True) If True, stream all bytes as soon as
the upload is created.
"""
_REQUIRED_SERIALIZATION_KEYS = set((
'auto_transfer', 'mime_type', 'total_size', 'url'))
def __init__(self, stream, mime_type, total_size=None, http=None,
close_stream=False, chunksize=None, auto_transfer=True,
progress_callback=None, finish_callback=None,
**kwds):
super(Upload, self).__init__(
stream, close_stream=close_stream, chunksize=chunksize,
auto_transfer=auto_transfer, http=http, **kwds)
self.__complete = False
self.__final_response = None
self.__mime_type = mime_type
self.__progress = 0
self.__server_chunk_granularity = None
self.__strategy = None
self.__total_size = None
self.progress_callback = progress_callback
self.finish_callback = finish_callback
self.total_size = total_size
@property
def progress(self):
return self.__progress
@classmethod
def FromFile(cls, filename, mime_type=None, auto_transfer=True, **kwds):
"""Create a new Upload object from a filename."""
path = os.path.expanduser(filename)
if not os.path.exists(path):
raise exceptions.NotFoundError('Could not find file %s' % path)
if not mime_type:
mime_type, _ = mimetypes.guess_type(path)
if mime_type is None:
raise exceptions.InvalidUserInputError(
'Could not guess mime type for %s' % path)
size = os.stat(path).st_size
return cls(open(path, 'rb'), mime_type, total_size=size,
close_stream=True, auto_transfer=auto_transfer, **kwds)
@classmethod
def FromStream(cls, stream, mime_type, total_size=None, auto_transfer=True,
**kwds):
"""Create a new Upload object from a stream."""
if mime_type is None:
raise exceptions.InvalidUserInputError(
'No mime_type specified for stream')
return cls(stream, mime_type, total_size=total_size,
close_stream=False, auto_transfer=auto_transfer, **kwds)
@classmethod
def FromData(cls, stream, json_data, http, auto_transfer=None, **kwds):
"""Create a new Upload of stream from serialized json_data and http."""
info = json.loads(json_data)
missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys())
if missing_keys:
raise exceptions.InvalidDataError(
'Invalid serialization data, missing keys: %s' % (
', '.join(missing_keys)))
if 'total_size' in kwds:
raise exceptions.InvalidUserInputError(
'Cannot override total_size on serialized Upload')
upload = cls.FromStream(stream, info['mime_type'],
total_size=info.get('total_size'), **kwds)
if isinstance(stream, io.IOBase) and not stream.seekable():
raise exceptions.InvalidUserInputError(
'Cannot restart resumable upload on non-seekable stream')
if auto_transfer is not None:
upload.auto_transfer = auto_transfer
else:
upload.auto_transfer = info['auto_transfer']
upload.strategy = RESUMABLE_UPLOAD
upload._Initialize( # pylint: disable=protected-access
http, info['url'])
upload.RefreshResumableUploadState()
upload.EnsureInitialized()
if upload.auto_transfer:
upload.StreamInChunks()
return upload
@property
def serialization_data(self):
self.EnsureInitialized()
if self.strategy != RESUMABLE_UPLOAD:
raise exceptions.InvalidDataError(
'Serialization only supported for resumable uploads')
return {
'auto_transfer': self.auto_transfer,
'mime_type': self.mime_type,
'total_size': self.total_size,
'url': self.url,
}
@property
def complete(self):
return self.__complete
@property
def mime_type(self):
return self.__mime_type
def __str__(self):
if not self.initialized:
return 'Upload (uninitialized)'
return 'Upload with %d/%s bytes transferred for url %s' % (
self.progress, self.total_size or '???', self.url)
@property
def strategy(self):
return self.__strategy
@strategy.setter
def strategy(self, value):
if value not in (SIMPLE_UPLOAD, RESUMABLE_UPLOAD):
raise exceptions.UserError((
'Invalid value "%s" for upload strategy, must be one of '
'"simple" or "resumable".') % value)
self.__strategy = value
@property
def total_size(self):
return self.__total_size
@total_size.setter
def total_size(self, value):
self.EnsureUninitialized()
self.__total_size = value
def __SetDefaultUploadStrategy(self, upload_config, http_request):
"""Determine and set the default upload strategy for this upload.
We generally prefer simple or multipart, unless we're forced to
use resumable. This happens when any of (1) the upload is too
large, (2) the simple endpoint doesn't support multipart requests
and we have metadata, or (3) there is no simple upload endpoint.
Args:
upload_config: Configuration for the upload endpoint.
http_request: The associated http request.
Returns:
None.
"""
if upload_config.resumable_path is None:
self.strategy = SIMPLE_UPLOAD
if self.strategy is not None:
return
strategy = SIMPLE_UPLOAD
if (self.total_size is not None and
self.total_size > _RESUMABLE_UPLOAD_THRESHOLD):
strategy = RESUMABLE_UPLOAD
if http_request.body and not upload_config.simple_multipart:
strategy = RESUMABLE_UPLOAD
if not upload_config.simple_path:
strategy = RESUMABLE_UPLOAD
self.strategy = strategy
def ConfigureRequest(self, upload_config, http_request, url_builder):
"""Configure the request and url for this upload."""
# Validate total_size vs. max_size
if (self.total_size and upload_config.max_size and
self.total_size > upload_config.max_size):
raise exceptions.InvalidUserInputError(
'Upload too big: %s larger than max size %s' % (
self.total_size, upload_config.max_size))
# Validate mime type
if not util.AcceptableMimeType(upload_config.accept, self.mime_type):
raise exceptions.InvalidUserInputError(
'MIME type %s does not match any accepted MIME ranges %s' % (
self.mime_type, upload_config.accept))
self.__SetDefaultUploadStrategy(upload_config, http_request)
if self.strategy == SIMPLE_UPLOAD:
url_builder.relative_path = upload_config.simple_path
if http_request.body:
url_builder.query_params['uploadType'] = 'multipart'
self.__ConfigureMultipartRequest(http_request)
else:
url_builder.query_params['uploadType'] = 'media'
self.__ConfigureMediaRequest(http_request)
else:
url_builder.relative_path = upload_config.resumable_path
url_builder.query_params['uploadType'] = 'resumable'
self.__ConfigureResumableRequest(http_request)
def __ConfigureMediaRequest(self, http_request):
"""Configure http_request as a simple request for this upload."""
http_request.headers['content-type'] = self.mime_type
http_request.body = self.stream.read()
http_request.loggable_body = '<media body>'
def __ConfigureMultipartRequest(self, http_request):
"""Configure http_request as a multipart request for this upload."""
# This is a multipart/related upload.
msg_root = mime_multipart.MIMEMultipart('related')
# msg_root should not write out its own headers
setattr(msg_root, '_write_headers', lambda self: None)
# attach the body as one part
msg = mime_nonmultipart.MIMENonMultipart(
*http_request.headers['content-type'].split('/'))
msg.set_payload(http_request.body)
msg_root.attach(msg)
# attach the media as the second part
msg = mime_nonmultipart.MIMENonMultipart(*self.mime_type.split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
msg.set_payload(self.stream.read())
msg_root.attach(msg)
# NOTE: We encode the body, but can't use
# `email.message.Message.as_string` because it prepends
# `> ` to `From ` lines.
fp = six.BytesIO()
if six.PY3:
generator_class = email_generator.BytesGenerator
else:
generator_class = email_generator.Generator
g = generator_class(fp, mangle_from_=False)
g.flatten(msg_root, unixfrom=False)
http_request.body = fp.getvalue()
multipart_boundary = msg_root.get_boundary()
http_request.headers['content-type'] = (
'multipart/related; boundary=%r' % multipart_boundary)
if isinstance(multipart_boundary, six.text_type):
multipart_boundary = multipart_boundary.encode('ascii')
body_components = http_request.body.split(multipart_boundary)
headers, _, _ = body_components[-2].partition(b'\n\n')
body_components[-2] = b'\n\n'.join([headers, b'<media body>\n\n--'])
http_request.loggable_body = multipart_boundary.join(body_components)
def __ConfigureResumableRequest(self, http_request):
http_request.headers['X-Upload-Content-Type'] = self.mime_type
if self.total_size is not None:
http_request.headers[
'X-Upload-Content-Length'] = str(self.total_size)
def RefreshResumableUploadState(self):
"""Talk to the server and refresh the state of this resumable upload.
Returns:
Response if the upload is complete.
"""
if self.strategy != RESUMABLE_UPLOAD:
return
self.EnsureInitialized()
refresh_request = http_wrapper.Request(
url=self.url, http_method='PUT',
headers={'Content-Range': 'bytes */*'})
refresh_response = http_wrapper.MakeRequest(
self.http, refresh_request, redirections=0,
retries=self.num_retries)
range_header = self._GetRangeHeaderFromResponse(refresh_response)
if refresh_response.status_code in (http_client.OK,
http_client.CREATED):
self.__complete = True
self.__progress = self.total_size
self.stream.seek(self.progress)
# If we're finished, the refresh response will contain the metadata
# originally requested. Cache it so it can be returned in
# StreamInChunks.
self.__final_response = refresh_response
elif refresh_response.status_code == http_wrapper.RESUME_INCOMPLETE:
if range_header is None:
self.__progress = 0
else:
self.__progress = self.__GetLastByte(range_header) + 1
self.stream.seek(self.progress)
else:
raise exceptions.HttpError.FromResponse(refresh_response)
def _GetRangeHeaderFromResponse(self, response):
return response.info.get('Range', response.info.get('range'))
def InitializeUpload(self, http_request, http=None, client=None):
"""Initialize this upload from the given http_request."""
if self.strategy is None:
raise exceptions.UserError(
'No upload strategy set; did you call ConfigureRequest?')
if http is None and client is None:
raise exceptions.UserError('Must provide client or http.')
if self.strategy != RESUMABLE_UPLOAD:
return
http = http or client.http
if client is not None:
http_request.url = client.FinalizeTransferUrl(http_request.url)
self.EnsureUninitialized()
http_response = http_wrapper.MakeRequest(http, http_request,
retries=self.num_retries)
if http_response.status_code != http_client.OK:
raise exceptions.HttpError.FromResponse(http_response)
self.__server_chunk_granularity = http_response.info.get(
'X-Goog-Upload-Chunk-Granularity')
url = http_response.info['location']
if client is not None:
url = client.FinalizeTransferUrl(url)
self._Initialize(http, url)
# Unless the user has requested otherwise, we want to just
# go ahead and pump the bytes now.
if self.auto_transfer:
return self.StreamInChunks()
return http_response
def __GetLastByte(self, range_header):
_, _, end = range_header.partition('-')
# TODO(craigcitro): Validate start == 0?
return int(end)
def __ValidateChunksize(self, chunksize=None):
if self.__server_chunk_granularity is None:
return
chunksize = chunksize or self.chunksize
if chunksize % self.__server_chunk_granularity:
raise exceptions.ConfigurationValueError(
'Server requires chunksize to be a multiple of %d',
self.__server_chunk_granularity)
def __StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None, use_chunks=True):
"""Helper function for StreamMedia / StreamInChunks."""
if self.strategy != RESUMABLE_UPLOAD:
raise exceptions.InvalidUserInputError(
'Cannot stream non-resumable upload')
callback = callback or self.progress_callback
finish_callback = finish_callback or self.finish_callback
# final_response is set if we resumed an already-completed upload.
response = self.__final_response
send_func = self.__SendChunk if use_chunks else self.__SendMediaBody
if use_chunks:
self.__ValidateChunksize(self.chunksize)
self.EnsureInitialized()
while not self.complete:
response = send_func(self.stream.tell(),
additional_headers=additional_headers)
if response.status_code in (http_client.OK, http_client.CREATED):
self.__complete = True
break
self.__progress = self.__GetLastByte(response.info['range'])
if self.progress + 1 != self.stream.tell():
# TODO(craigcitro): Add a better way to recover here.
raise exceptions.CommunicationError(
'Failed to transfer all bytes in chunk, upload paused at '
'byte %d' % self.progress)
self._ExecuteCallback(callback, response)
if self.__complete and hasattr(self.stream, 'seek'):
current_pos = self.stream.tell()
self.stream.seek(0, os.SEEK_END)
end_pos = self.stream.tell()
self.stream.seek(current_pos)
if current_pos != end_pos:
raise exceptions.TransferInvalidError(
'Upload complete with %s additional bytes left in stream' %
(int(end_pos) - int(current_pos)))
self._ExecuteCallback(finish_callback, response)
return response
def StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None):
"""Send this resumable upload in a single request.
Args:
callback: Progress callback function with inputs
(http_wrapper.Response, transfer.Upload)
finish_callback: Final callback function with inputs
(http_wrapper.Response, transfer.Upload)
additional_headers: Dict of headers to include with the upload
http_wrapper.Request.
Returns:
http_wrapper.Response of final response.
"""
return self.__StreamMedia(
callback=callback, finish_callback=finish_callback,
additional_headers=additional_headers, use_chunks=False)
def StreamInChunks(self, callback=None, finish_callback=None,
additional_headers=None):
"""Send this (resumable) upload in chunks."""
return self.__StreamMedia(
callback=callback, finish_callback=finish_callback,
additional_headers=additional_headers)
def __SendMediaRequest(self, request, end):
"""Request helper function for SendMediaBody & SendChunk."""
response = http_wrapper.MakeRequest(
self.bytes_http, request, retry_func=self.retry_func,
retries=self.num_retries)
if response.status_code not in (http_client.OK, http_client.CREATED,
http_wrapper.RESUME_INCOMPLETE):
# We want to reset our state to wherever the server left us
# before this failed request, and then raise.
self.RefreshResumableUploadState()
raise exceptions.HttpError.FromResponse(response)
if response.status_code == http_wrapper.RESUME_INCOMPLETE:
last_byte = self.__GetLastByte(
self._GetRangeHeaderFromResponse(response))
if last_byte + 1 != end:
self.stream.seek(last_byte)
return response
def __SendMediaBody(self, start, additional_headers=None):
"""Send the entire media stream in a single request."""
self.EnsureInitialized()
if self.total_size is None:
raise exceptions.TransferInvalidError(
'Total size must be known for SendMediaBody')
body_stream = stream_slice.StreamSlice(
self.stream, self.total_size - start)
request = http_wrapper.Request(url=self.url, http_method='PUT',
body=body_stream)
request.headers['Content-Type'] = self.mime_type
if start == self.total_size:
# End of an upload with 0 bytes left to send; just finalize.
range_string = 'bytes */%s' % self.total_size
else:
range_string = 'bytes %s-%s/%s' % (start, self.total_size - 1,
self.total_size)
request.headers['Content-Range'] = range_string
if additional_headers:
request.headers.update(additional_headers)
return self.__SendMediaRequest(request, self.total_size)
def __SendChunk(self, start, additional_headers=None):
"""Send the specified chunk."""
self.EnsureInitialized()
no_log_body = self.total_size is None
if self.total_size is None:
# For the streaming resumable case, we need to detect when
# we're at the end of the stream.
body_stream = buffered_stream.BufferedStream(
self.stream, start, self.chunksize)
end = body_stream.stream_end_position
if body_stream.stream_exhausted:
self.__total_size = end
# TODO: Here, change body_stream from a stream to a string object,
# which means reading a chunk into memory. This works around
# https://code.google.com/p/httplib2/issues/detail?id=176 which can
# cause httplib2 to skip bytes on 401's for file objects.
# Rework this solution to be more general.
body_stream = body_stream.read(self.chunksize)
else:
end = min(start + self.chunksize, self.total_size)
body_stream = stream_slice.StreamSlice(self.stream, end - start)
# TODO(craigcitro): Think about clearer errors on "no data in
# stream".
request = http_wrapper.Request(url=self.url, http_method='PUT',
body=body_stream)
request.headers['Content-Type'] = self.mime_type
if no_log_body:
# Disable logging of streaming body.
# TODO: Remove no_log_body and rework as part of a larger logs
# refactor.
request.loggable_body = '<media body>'
if self.total_size is None:
# Streaming resumable upload case, unknown total size.
range_string = 'bytes %s-%s/*' % (start, end - 1)
elif end == start:
# End of an upload with 0 bytes left to send; just finalize.
range_string = 'bytes */%s' % self.total_size
else:
# Normal resumable upload case with known sizes.
range_string = 'bytes %s-%s/%s' % (start, end - 1, self.total_size)
request.headers['Content-Range'] = range_string
if additional_headers:
request.headers.update(additional_headers)
return self.__SendMediaRequest(request, end)
| apache-2.0 |
riteshshrv/django | django/core/exceptions.py | 486 | 5276 | """
Global Django exception and warning classes.
"""
from django.utils import six
from django.utils.encoding import force_text
class FieldDoesNotExist(Exception):
"""The requested model field does not exist"""
pass
class DjangoRuntimeWarning(RuntimeWarning):
pass
class AppRegistryNotReady(Exception):
"""The django.apps registry is not populated yet"""
pass
class ObjectDoesNotExist(Exception):
"""The requested object does not exist"""
silent_variable_failure = True
class MultipleObjectsReturned(Exception):
"""The query returned multiple objects when only one was expected."""
pass
class SuspiciousOperation(Exception):
"""The user did something suspicious"""
class SuspiciousMultipartForm(SuspiciousOperation):
"""Suspect MIME request in multipart form data"""
pass
class SuspiciousFileOperation(SuspiciousOperation):
"""A Suspicious filesystem operation was attempted"""
pass
class DisallowedHost(SuspiciousOperation):
"""HTTP_HOST header contains invalid value"""
pass
class DisallowedRedirect(SuspiciousOperation):
"""Redirect to scheme not in allowed list"""
pass
class PermissionDenied(Exception):
"""The user did not have permission to do that"""
pass
class ViewDoesNotExist(Exception):
"""The requested view does not exist"""
pass
class MiddlewareNotUsed(Exception):
"""This middleware is not used in this server configuration"""
pass
class ImproperlyConfigured(Exception):
"""Django is somehow improperly configured"""
pass
class FieldError(Exception):
"""Some kind of problem with a model field."""
pass
NON_FIELD_ERRORS = '__all__'
class ValidationError(Exception):
"""An error while validating data."""
def __init__(self, message, code=None, params=None):
"""
The `message` argument can be a single error, a list of errors, or a
dictionary that maps field names to lists of errors. What we define as
an "error" can be either a simple string or an instance of
ValidationError with its message attribute set, and what we define as
list or dictionary can be an actual `list` or `dict` or an instance
of ValidationError with its `error_list` or `error_dict` attribute set.
"""
# PY2 can't pickle naive exception: http://bugs.python.org/issue1692335.
super(ValidationError, self).__init__(message, code, params)
if isinstance(message, ValidationError):
if hasattr(message, 'error_dict'):
message = message.error_dict
# PY2 has a `message` property which is always there so we can't
# duck-type on it. It was introduced in Python 2.5 and already
# deprecated in Python 2.6.
elif not hasattr(message, 'message' if six.PY3 else 'code'):
message = message.error_list
else:
message, code, params = message.message, message.code, message.params
if isinstance(message, dict):
self.error_dict = {}
for field, messages in message.items():
if not isinstance(messages, ValidationError):
messages = ValidationError(messages)
self.error_dict[field] = messages.error_list
elif isinstance(message, list):
self.error_list = []
for message in message:
# Normalize plain strings to instances of ValidationError.
if not isinstance(message, ValidationError):
message = ValidationError(message)
if hasattr(message, 'error_dict'):
self.error_list.extend(sum(message.error_dict.values(), []))
else:
self.error_list.extend(message.error_list)
else:
self.message = message
self.code = code
self.params = params
self.error_list = [self]
@property
def message_dict(self):
# Trigger an AttributeError if this ValidationError
# doesn't have an error_dict.
getattr(self, 'error_dict')
return dict(self)
@property
def messages(self):
if hasattr(self, 'error_dict'):
return sum(dict(self).values(), [])
return list(self)
def update_error_dict(self, error_dict):
if hasattr(self, 'error_dict'):
for field, error_list in self.error_dict.items():
error_dict.setdefault(field, []).extend(error_list)
else:
error_dict.setdefault(NON_FIELD_ERRORS, []).extend(self.error_list)
return error_dict
def __iter__(self):
if hasattr(self, 'error_dict'):
for field, errors in self.error_dict.items():
yield field, list(ValidationError(errors))
else:
for error in self.error_list:
message = error.message
if error.params:
message %= error.params
yield force_text(message)
def __str__(self):
if hasattr(self, 'error_dict'):
return repr(dict(self))
return repr(list(self))
def __repr__(self):
return 'ValidationError(%s)' % self
| bsd-3-clause |
TheTypoMaster/chromium-crosswalk | tools/perf/metrics/network.py | 20 | 2307 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.value import scalar
from metrics import Metric
NETWORK_DATA_NOT_FOUND = 'Network data could not be found.'
# This is experimental. crbug.com/480512
# Will not be supported once network data is ported to TimelineBasedMetric.
class NetworkMetric(Metric):
"""NetworkMetrics gathers network statistics."""
def __init__(self, platform):
super(NetworkMetric, self).__init__()
self._network_snd = None
self._network_rcv = None
self._platform = platform
self._browser = None
def Start(self, _, tab):
"""Start the per-page preparation for this metric.
Here, this consists of recording the start value.
"""
self._browser = tab.browser
if not self._platform.CanMonitorNetworkData():
return
data = self._platform.GetNetworkData(self._browser)
if data is not None:
self._network_snd, self._network_rcv = data
def Stop(self, _, tab):
"""Prepare the results for this page.
The results are the differences between the current values
and the values when Start() was called.
"""
if not self._platform.CanMonitorNetworkData():
return
data = self._platform.GetNetworkData(self._browser)
if data is not None:
snd, rcv = data
if self._network_snd is not None:
self._network_snd = snd - self._network_snd
if self._network_rcv is not None:
self._network_rcv = rcv - self._network_rcv
else: # If end data cannot be found, report none.
self._network_snd = None
self._network_rcv = None
def AddResults(self, tab, results):
none_value_reason = (
None if self._network_snd is not None else NETWORK_DATA_NOT_FOUND)
results.AddValue(scalar.ScalarValue(
results.current_page, 'network_data_sent', 'kb', self._network_snd,
important=False, none_value_reason=none_value_reason))
none_value_reason = (
None if self._network_rcv is not None else NETWORK_DATA_NOT_FOUND)
results.AddValue(scalar.ScalarValue(
results.current_page, 'network_data_received', 'kb', self._network_rcv,
important=False, none_value_reason=none_value_reason))
| bsd-3-clause |
zycdragonball/tensorflow | tensorflow/python/util/all_util.py | 128 | 4709 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate __all__ from a module docstring."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re as _re
import sys as _sys
from tensorflow.python.util import tf_inspect as _tf_inspect
_reference_pattern = _re.compile(r'^@@(\w+)$', flags=_re.MULTILINE)
def make_all(module_name, doc_string_modules=None):
"""Generates `__all__` from the docstring of one or more modules.
Usage: `make_all(__name__)` or
`make_all(__name__, [sys.modules(__name__), other_module])`. The doc string
modules must each a docstring, and `__all__` will contain all symbols with
`@@` references, where that symbol currently exists in the module named
`module_name`.
Args:
module_name: The name of the module (usually `__name__`).
doc_string_modules: a list of modules from which to take docstring.
If None, then a list containing only the module named `module_name` is used.
Returns:
A list suitable for use as `__all__`.
"""
if doc_string_modules is None:
doc_string_modules = [_sys.modules[module_name]]
cur_members = set([name for name, _
in _tf_inspect.getmembers(_sys.modules[module_name])])
results = set()
for doc_module in doc_string_modules:
results.update([m.group(1)
for m in _reference_pattern.finditer(doc_module.__doc__)
if m.group(1) in cur_members])
return list(results)
# Hidden attributes are attributes that have been hidden by
# `remove_undocumented`. They can be re-instated by `reveal_undocumented`.
# This maps symbol names to a tuple, containing:
# (module object, attribute value)
_HIDDEN_ATTRIBUTES = {}
def reveal_undocumented(symbol_name, target_module=None):
"""Reveals a symbol that was previously removed by `remove_undocumented`.
This should be used by tensorflow internal tests only. It explicitly
defeats the encapsulation afforded by `remove_undocumented`.
It throws an exception when the symbol was not hidden in the first place.
Args:
symbol_name: a string representing the full absolute path of the symbol.
target_module: if specified, the module in which to restore the symbol.
"""
if symbol_name not in _HIDDEN_ATTRIBUTES:
raise LookupError('Symbol %s is not a hidden symbol' % symbol_name)
symbol_basename = symbol_name.split('.')[-1]
(original_module, attr_value) = _HIDDEN_ATTRIBUTES[symbol_name]
if not target_module: target_module = original_module
setattr(target_module, symbol_basename, attr_value)
def remove_undocumented(module_name, allowed_exception_list=None,
doc_string_modules=None):
"""Removes symbols in a module that are not referenced by a docstring.
Args:
module_name: the name of the module (usually `__name__`).
allowed_exception_list: a list of names that should not be removed.
doc_string_modules: a list of modules from which to take the docstrings.
If None, then a list containing only the module named `module_name` is used.
Furthermore, if a symbol previously added with `add_to_global_whitelist`,
then it will always be allowed. This is useful for internal tests.
Returns:
None
"""
current_symbols = set(dir(_sys.modules[module_name]))
should_have = make_all(module_name, doc_string_modules)
should_have += allowed_exception_list or []
extra_symbols = current_symbols - set(should_have)
target_module = _sys.modules[module_name]
for extra_symbol in extra_symbols:
# Skip over __file__, etc. Also preserves internal symbols.
if extra_symbol.startswith('_'): continue
fully_qualified_name = module_name + '.' + extra_symbol
_HIDDEN_ATTRIBUTES[fully_qualified_name] = (target_module,
getattr(target_module,
extra_symbol))
delattr(target_module, extra_symbol)
__all__ = [
'make_all',
'remove_undocumented',
'reveal_undocumented',
]
| apache-2.0 |
gdl-civestav-localization/cinvestav_location_fingerprinting | experimentation/__init__.py | 1 | 1691 | import os
import cPickle
import matplotlib.pyplot as plt
from datasets import DatasetManager
def plot_cost(results, data_name, plot_label):
plt.figure(plot_label)
plt.ylabel('Accuracy (m)', fontsize=30)
plt.xlabel('Epoch', fontsize=30)
plt.yscale('symlog')
plt.tick_params(axis='both', which='major', labelsize=20)
plt.grid(True)
for i in range(1, 2, 1):
y, x = zip(*results[i][data_name])
name = results[i]['Name']
plt.plot(x, y, label=name, linewidth=5.0)
plt.legend(fontsize='xx-large')
def get_metrics(test_set_y, predicted_values, model_name):
for i in xrange(len(predicted_values)):
print predicted_values[i][1]
if __name__ == '__main__':
"""
seed = 50
with open(os.path.join('experimentation', 'cinvestav_testbed_experiment_results_' + str(seed)), 'rb') as f:
results = cPickle.load(f)
plot_cost(
results=results,
data_name='cost_train',
plot_label='Cost on train phase')
plot_cost(
results=results,
data_name='cost_valid',
plot_label='Cost on valid phase')
plot_cost(
results=results,
data_name='cost_test',
plot_label='Cost on test phase')
plt.show()
"""
seed = 50
dataset, result = DatasetManager.read_dataset2('test_cleaned_dataset.csv', shared=True, seed=seed)
with open(os.path.join('trained_models', 'Logistic Regressionbrandeis_university.save'), 'rb') as f:
model = cPickle.load(f)
predicted_values = model.predict(dataset)
get_metrics(
test_set_y=result,
predicted_values=predicted_values,
model_name='Logistic Regression'
)
| gpl-3.0 |
danielmartin/swift | utils/protocol_graph.py | 48 | 6786 | # ===--- protocol_graph.py ---------------------------*- coding: utf-8 -*-===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===---------------------------------------------------------------------===//
#
# Create a graph of the protocol refinement relationships, associated
# types, operator requirements, and defaulted generic operators.
#
# run as follows to view the Nth-largest connected component in a web browser:
#
# N=0 && rm -f /tmp/protocols.dot && \
# python protocol_graph.py stdlib.swift > /tmp/p0.dot && \
# (ccomps -zX#$N -o /tmp/protocols.dot /tmp/p0.dot || true) \
# && dot -Tsvg /tmp/protocols.dot > /tmp/protocols.svg \
# && open /tmp/protocols.svg
#
# ===---------------------------------------------------------------------===//
from __future__ import print_function
import cgi
import os
import re
import sys
# Open 'stdlib.swift' in this directory if no path specified.
args = list(sys.argv) + \
[os.path.join(os.path.dirname(__file__), 'stdlib.swift')]
re_flags = re.MULTILINE | re.VERBOSE
# Pattern to recognize stdlib identifiers (FIXME: doesn't handle Unicode).
identifier = '[A-Za-z_][A-Za-z0-9_]*'
# Pattern to recognize a (possibly-generic) operator decl.
operator = r'''
(?:(?:prefix|postfix).*)? func \s*
(?=\S)[^A-Za-z_] # non-space, non-identifier: begins an operator name
(?:(?=\S)[^(){])* # rest of operator name
\s*
(<[^>{]+>)? # generic parameter list
\s*
\([^)]*\) # function parameter list
'''
# substitute local variables into the string
def interpolate(string):
import inspect
frame = inspect.currentframe()
return string % frame.f_back.f_locals
# Given the body_text of a protocol definition, return a list of
# associated type and operator requirements.
def body_lines(body_text):
return [
cgi.escape(b.group(0)) for b in
re.finditer(
r'(typealias\s*' + identifier +
r'(\s*[:,]\s*' + identifier + ')?|' + operator + '.*)',
body_text, re_flags)
]
# Mapping from protocol to associated type / operator requirements
body = {}
# Mapping from a parent protocol to set of children.
graph = {}
# Mapping from protocol to generic operators taking instances as arguments
generic_operators = {}
# FIXME: doesn't respect strings or comment nesting)
comments = r'//.* | /[*] (.|\n)*? [*]/'
# read source, stripping all comments
with open(args[1]) as src:
source_sans_comments = re.sub(comments, '', src.read(), flags=re_flags)
generic_parameter_constraint = interpolate(
r' (%(identifier)s) \s* : \s* (%(identifier)s) ')
def parse_generic_operator(m):
generic_params = m.group(5)
generic_operator = cgi.escape(m.group(0).strip())
function_param_start = m.end(5) - m.start(0)
function_params = generic_operator[function_param_start:]
for m2 in re.finditer(
generic_parameter_constraint, generic_params, re_flags):
type_parameter = m2.group(1)
protocol = m2.group(2)
# we're only interested if we can find a function parameter of that
# type
if not re.search(r':\s*%s\s*[,)]' % type_parameter, function_params):
continue
# Make some replacements in the signature to limit the graph size
letter_tau = 'τ'
letter_pi = 'π'
abbreviated_signature = re.sub(
r'\b%s\b' % protocol, letter_pi,
re.sub(r'\b%s\b' % type_parameter, letter_tau, generic_operator))
generic_operators.setdefault(
protocol, set()).add(abbreviated_signature)
def parse_protocol(m):
child = m.group(1)
# skip irrelevant protocols
if re.match(r'_Builtin.*Convertible', child):
return
graph.setdefault(child, set())
body[child] = body_lines(m.group(3))
if m.group(2):
for parent in m.group(2).strip().split(","):
if re.match(r'_Builtin.*Convertible', parent):
return
graph.setdefault(parent.strip(), set()).add(child)
protocols_and_operators = interpolate(r'''
\bprotocol \s+ (%(identifier)s) \s*
(?::\s*([^{]+))? # refinements
{([^{}\n]*(.*\n)*?)} # body
|
%(operator)s [^{]*(?={) # operator definition up to the open brace
''')
# Main parsing loop
for m in re.finditer(protocols_and_operators, source_sans_comments, re_flags):
if m.group(1):
parse_protocol(m)
elif m.group(5):
parse_generic_operator(m)
# otherwise we matched some non-generic operator
# Find clusters of protocols that have the same name when underscores
# are stripped
# map from potential cluster name to nodes in the cluster
cluster_builder = {}
for n in graph:
cluster_builder.setdefault(n.translate(None, '_'), set()).add(n)
# Grab the clusters with more than one member.
clusters = dict((c, nodes)
for (c, nodes) in cluster_builder.items() if len(nodes) > 1)
# A set of all intra-cluster edges
cluster_edges = set(
(s, t) for (c, elements) in clusters.items()
for s in elements
for t in graph[s] if t in elements)
print('digraph ProtocolHierarchies {')
# ; packmode="array1"
print(' mclimit = 100; ranksep=1.5; ')
print(' edge [dir="back"];')
print(' node [shape = box, fontname = Helvetica, fontsize = 10];')
for c in sorted(clusters):
print(' subgraph "cluster_%s" {' % c)
for (s, t) in sorted(cluster_edges):
if s in clusters[c]:
print('%s -> %s [weight=100];' % (s, t))
print('}')
for node in sorted(graph.keys()):
requirements = body.get(node, [])
generics = sorted(generic_operators.get(node, set()))
style = 'solid' if node.startswith('_') else 'bold'
divider = '<HR/>\n' if len(requirements) != 0 and len(generics) != 0 \
else ''
label = node if len(requirements + generics) == 0 else (
'\n<TABLE BORDER="0">\n<TR><TD>\n%s\n</TD></TR><HR/>' +
'\n%s%s%s</TABLE>\n' % (
node,
'\n'.join('<TR><TD>%s</TD></TR>' % r for r in requirements),
divider,
'\n'.join('<TR><TD>%s</TD></TR>' % g for g in generics)))
print(interpolate(' %(node)s [style = %(style)s, label=<%(label)s>]'))
for (parent, children) in sorted(graph.items()):
print(' %s -> {' % parent, end=' ')
print('; '.join(sorted(
child for child in children
if not (parent, child) in cluster_edges)
), end=' ')
print('}')
print('}')
| apache-2.0 |
fduraffourg/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/test_util.py | 449 | 7538 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for util module."""
import os
import random
import sys
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import util
_TEST_DATA_DIR = os.path.join(os.path.split(__file__)[0], 'testdata')
class UtilTest(unittest.TestCase):
"""A unittest for util module."""
def test_get_stack_trace(self):
self.assertEqual('None\n', util.get_stack_trace())
try:
a = 1 / 0 # Intentionally raise exception.
except Exception:
trace = util.get_stack_trace()
self.failUnless(trace.startswith('Traceback'))
self.failUnless(trace.find('ZeroDivisionError') != -1)
def test_prepend_message_to_exception(self):
exc = Exception('World')
self.assertEqual('World', str(exc))
util.prepend_message_to_exception('Hello ', exc)
self.assertEqual('Hello World', str(exc))
def test_get_script_interp(self):
cygwin_path = 'c:\\cygwin\\bin'
cygwin_perl = os.path.join(cygwin_path, 'perl')
self.assertEqual(None, util.get_script_interp(
os.path.join(_TEST_DATA_DIR, 'README')))
self.assertEqual(None, util.get_script_interp(
os.path.join(_TEST_DATA_DIR, 'README'), cygwin_path))
self.assertEqual('/usr/bin/perl -wT', util.get_script_interp(
os.path.join(_TEST_DATA_DIR, 'hello.pl')))
self.assertEqual(cygwin_perl + ' -wT', util.get_script_interp(
os.path.join(_TEST_DATA_DIR, 'hello.pl'), cygwin_path))
def test_hexify(self):
self.assertEqual('61 7a 41 5a 30 39 20 09 0d 0a 00 ff',
util.hexify('azAZ09 \t\r\n\x00\xff'))
class RepeatedXorMaskerTest(unittest.TestCase):
"""A unittest for RepeatedXorMasker class."""
def test_mask(self):
# Sample input e6,97,a5 is U+65e5 in UTF-8
masker = util.RepeatedXorMasker('\xff\xff\xff\xff')
result = masker.mask('\xe6\x97\xa5')
self.assertEqual('\x19\x68\x5a', result)
masker = util.RepeatedXorMasker('\x00\x00\x00\x00')
result = masker.mask('\xe6\x97\xa5')
self.assertEqual('\xe6\x97\xa5', result)
masker = util.RepeatedXorMasker('\xe6\x97\xa5\x20')
result = masker.mask('\xe6\x97\xa5')
self.assertEqual('\x00\x00\x00', result)
def test_mask_twice(self):
masker = util.RepeatedXorMasker('\x00\x7f\xff\x20')
# mask[0], mask[1], ... will be used.
result = masker.mask('\x00\x00\x00\x00\x00')
self.assertEqual('\x00\x7f\xff\x20\x00', result)
# mask[2], mask[0], ... will be used for the next call.
result = masker.mask('\x00\x00\x00\x00\x00')
self.assertEqual('\x7f\xff\x20\x00\x7f', result)
def test_mask_large_data(self):
masker = util.RepeatedXorMasker('mASk')
original = ''.join([chr(i % 256) for i in xrange(1000)])
result = masker.mask(original)
expected = ''.join(
[chr((i % 256) ^ ord('mASk'[i % 4])) for i in xrange(1000)])
self.assertEqual(expected, result)
masker = util.RepeatedXorMasker('MaSk')
first_part = 'The WebSocket Protocol enables two-way communication.'
result = masker.mask(first_part)
self.assertEqual(
'\x19\t6K\x1a\x0418"\x028\x0e9A\x03\x19"\x15<\x08"\rs\x0e#'
'\x001\x07(\x12s\x1f:\x0e~\x1c,\x18s\x08"\x0c>\x1e#\x080\n9'
'\x08<\x05c',
result)
second_part = 'It has two parts: a handshake and the data transfer.'
result = masker.mask(second_part)
self.assertEqual(
"('K%\x00 K9\x16<K=\x00!\x1f>[s\nm\t2\x05)\x12;\n&\x04s\n#"
"\x05s\x1f%\x04s\x0f,\x152K9\x132\x05>\x076\x19c",
result)
def get_random_section(source, min_num_chunks):
chunks = []
bytes_chunked = 0
while bytes_chunked < len(source):
chunk_size = random.randint(
1,
min(len(source) / min_num_chunks, len(source) - bytes_chunked))
chunk = source[bytes_chunked:bytes_chunked + chunk_size]
chunks.append(chunk)
bytes_chunked += chunk_size
return chunks
class InflaterDeflaterTest(unittest.TestCase):
"""A unittest for _Inflater and _Deflater class."""
def test_inflate_deflate_default(self):
input = b'hello' + '-' * 30000 + b'hello'
inflater15 = util._Inflater(15)
deflater15 = util._Deflater(15)
inflater8 = util._Inflater(8)
deflater8 = util._Deflater(8)
compressed15 = deflater15.compress_and_finish(input)
compressed8 = deflater8.compress_and_finish(input)
inflater15.append(compressed15)
inflater8.append(compressed8)
self.assertNotEqual(compressed15, compressed8)
self.assertEqual(input, inflater15.decompress(-1))
self.assertEqual(input, inflater8.decompress(-1))
def test_random_section(self):
random.seed(a=0)
source = ''.join(
[chr(random.randint(0, 255)) for i in xrange(100 * 1024)])
chunked_input = get_random_section(source, 10)
print "Input chunk sizes: %r" % [len(c) for c in chunked_input]
deflater = util._Deflater(15)
compressed = []
for chunk in chunked_input:
compressed.append(deflater.compress(chunk))
compressed.append(deflater.compress_and_finish(''))
chunked_expectation = get_random_section(source, 10)
print ("Expectation chunk sizes: %r" %
[len(c) for c in chunked_expectation])
inflater = util._Inflater(15)
inflater.append(''.join(compressed))
for chunk in chunked_expectation:
decompressed = inflater.decompress(len(chunk))
self.assertEqual(chunk, decompressed)
self.assertEqual('', inflater.decompress(-1))
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
danxhuber/isoclassify | setup.py | 1 | 1463 | import os
from setuptools import setup
# Load version
__version__ = None
# exec(open('isoclassify/version.py').read())
# Load requirements
requirements = None
with open('requirements.txt') as file:
requirements = file.read().splitlines()
# mwdust requires manual install to download maps
# TODO: add warning if `mwdust` is not installed
requirements.remove('mwdust')
# Configure data directory
if 'ISOCLASSIFY' in os.environ:
# For those who've already configured environment variables
datadir = os.environ['ISOCLASSIFY']
else:
# Create isoclassify directory in user home
datadir = os.path.join(os.path.expanduser('~'), '.isoclassify')
if not os.path.isdir(datadir):
# If datadir doesn't exist, make a new one upon install
os.mkdir(datadir)
# Package description
desc = 'Python codes to perform stellar classifications given any set of input observables.'
setup(
name='isoclassify',
version=__version__,
description=desc,
package_dir={
'isoclassify': 'isoclassify',
'isoclassify.direct': 'isoclassify/direct',
'isoclassify.grid': 'isoclassify/grid',
},
packages=['isoclassify', 'isoclassify.direct', 'isoclassify.grid'],
include_package_data=True, # <-- includes 'isoclassify/data'
author='Daniel Huber',
install_requires=requirements,
entry_points={
'console_scripts': [
'isoclassify = isoclassify.isoclassify:main',
],
}
)
| mit |
lebauce/artub | sysfont.py | 1 | 10522 | ## pygame - Python Game Library
## Copyright (C) 2000-2003 Pete Shinners
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## pete@shinners.org
"sysfont, used in the font module to find system fonts"
import os, sys
#create simple version of the font name
def _simplename(name):
for char in '_ -':
name = name.replace(char, '')
name = name.lower()
name = name.replace('-', '')
name = name.replace("'", '')
return name
#insert a font and style into the font dictionary
def _addfont(name, bold, italic, font, fontdict):
if not fontdict.has_key(name):
fontdict[name] = {}
fontdict[name][bold, italic] = font
#read the fonts on windows
def initsysfonts_win32():
import _winreg
fonts = {}
mods = 'demibold', 'narrow', 'light', 'unicode', 'bt', 'mt'
fontdir = os.path.join(os.environ['WINDIR'], "Fonts")
#this is a list of registry keys containing information
#about fonts installed on the system.
keys = []
#find valid registry keys containing font information.
possible_keys = [
r"SOFTWARE\Microsoft\Windows\CurrentVersion\Fonts",
r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts"
]
for key_name in possible_keys:
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key_name)
keys.append(key)
except WindowsError:
pass
for key in keys:
fontdict = {}
for i in range(_winreg.QueryInfoKey(key)[1]):
try: name, font, t = _winreg.EnumValue(key,i)
except EnvironmentError: break
# try and handle windows unicode strings for some file names.
# here are two documents with some information about it:
# http://www.python.org/peps/pep-0277.html
# https://www.microsoft.com/technet/archive/interopmigration/linux/mvc/lintowin.mspx#ECAA
try:
font = str(font)
except UnicodeEncodeError:
# MBCS is the windows encoding for unicode file names.
try:
font = font.encode('MBCS')
except:
# no goodness with str or MBCS encoding... skip this font.
continue
if font[-4:].lower() not in [".ttf", ".ttc"]:
continue
if os.sep not in font:
font = os.path.join(fontdir, font)
if name[-10:] == '(TrueType)':
name = name[:-11]
name = name.lower().split()
bold = italic = 0
for m in mods:
if m in name:
name.remove(m)
if 'bold' in name:
name.remove('bold')
bold = 1
if 'italic' in name:
name.remove('italic')
italic = 1
name = ''.join(name)
name=_simplename(name)
_addfont(name, bold, italic, font, fonts)
return fonts
#read of the fonts on osx (fill me in!)
def initsysfonts_darwin():
paths = ['/Library/Fonts',
'~/Library/Fonts',
'/Local/Library/Fonts',
'/Network/Library/Fonts']
fonts = {}
for p in paths:
if os.path.isdir(p):
os.path.walk(p, _fontwalk, fonts)
return fonts
#read the fonts from a unix 'fonts.cache-1' file
def read_unix_fontscache(dir, file, fonts):
file = open(os.path.join(dir, file))
for line in file.readlines():
try:
font, num, vals = line.split(' ', 2)
except ValueError:
continue
font = font.replace('"', '')
if (font[-4:].lower() not in [".ttf", ".ttc"]) and (font[-6:].lower() != ".dfont"):
continue
font = os.path.join(dir, font)
vals = vals.split(':')
name = _simplename(vals[0][1:])
bold = vals[1].find('Bold') >= 0
italic = vals[1].find('Italic') >= 0
_addfont(name, bold, italic, font, fonts)
#read the fonts from a unix 'fonts.dot' file
def read_unix_fontsdir(dir, file, fonts):
file = open(os.path.join(dir, file))
numfonts = int(file.readline())
for line in file.readlines():
font, descr = (line.split(' ', 1) + ['', ''])[:2]
if (font[-4:].lower() not in [".ttf", ".ttc"]) and (font[-6:].lower() != ".dfont"):
continue
font = os.path.join(dir, font)
descr = descr.split('-', 13)
name = _simplename(descr[2])
bold = (descr[3] == 'bold')
italic = (descr[4] == 'i')
_addfont(name, bold, italic, font, fonts)
#walk the path directory trees
def _fontwalk(fonts, path, files):
if 'fonts.scale' in files:
read_unix_fontsdir(path, 'fonts.scale', fonts)
elif 'fonts.dir' in files:
read_unix_fontsdir(path, 'fonts.dir', fonts)
elif 'fonts.cache-1' in files:
read_unix_fontscache(path, 'fonts.cache-1', fonts)
#read the fonts on unix
def initsysfonts_unix():
paths = ['/usr/X11R6/lib/X11/fonts', '/usr/share/fonts']
fonts = {}
for p in paths:
if os.path.isdir(p):
os.path.walk(p, _fontwalk, fonts)
return fonts
#create alias entries
def create_aliases():
aliases = (
('monospace', 'misc-fixed', 'courier', 'couriernew', 'console',
'fixed', 'mono', 'freemono', 'bitstreamverasansmono',
'verasansmono', 'monotype', 'lucidaconsole'),
('sans', 'arial', 'helvetica', 'swiss', 'freesans',
'bitstreamverasans', 'verasans', 'verdana', 'tahoma'),
('serif', 'times', 'freeserif', 'bitstreamveraserif', 'roman',
'timesroman', 'timesnewroman', 'dutch', 'veraserif',
'georgia'),
('wingdings', 'wingbats'),
)
for set in aliases:
found = None
fname = None
for name in set:
if Sysfonts.has_key(name):
found = Sysfonts[name]
fname = name
break
if not found:
continue
for name in set:
if not Sysfonts.has_key(name):
Sysalias[name] = found
Sysfonts = {}
Sysalias = {}
#initialize it all, called once
def initsysfonts():
if sys.platform == 'win32':
fonts = initsysfonts_win32()
elif sys.platform == 'darwin':
fonts = initsysfonts_darwin()
else:
fonts = initsysfonts_unix()
Sysfonts.update(fonts)
create_aliases()
if not Sysfonts: #dummy so we don't try to reinit
Sysfonts[None] = None
#the exported functions
def SysFont(name, size, bold=False, italic=False):
"""pygame.font.SysFont(name, size, bold=False, italic=False) -> Font
create a pygame Font from system font resources
This will search the system fonts for the given font
name. You can also enable bold or italic styles, and
the appropriate system font will be selected if available.
This will always return a valid Font object, and will
fallback on the builtin pygame font if the given font
is not found.
Name can also be a comma separated list of names, in
which case set of names will be searched in order. Pygame
uses a small set of common font aliases, if the specific
font you ask for is not available, a reasonable alternative
may be used.
"""
import pygame.font
if not Sysfonts:
initsysfonts()
gotbold = gotitalic = False
fontname = None
if name:
allnames = name
for name in allnames.split(','):
name = _simplename(name)
styles = Sysfonts.get(name)
if not styles:
styles = Sysalias.get(name)
if styles:
while not fontname:
plainname = styles.get((False, False))
fontname = styles.get((bold, italic))
if plainname != fontname:
gotbold = bold
gotitalic = italic
elif not fontname:
fontname = plainname
if fontname: break
font = pygame.font.Font(fontname, size)
if bold and not gotbold:
font.set_bold(1)
if italic and not gotitalic:
font.set_italic(1)
return font
def get_fonts():
"""pygame.font.get_fonts() -> list
get a list of system font names
Returns the list of all found system fonts. Note that
the names of the fonts will be all lowercase with spaces
removed. This is how pygame internally stores the font
names for matching.
"""
if not Sysfonts:
initsysfonts()
return Sysfonts.keys()
def match_font(name, bold=0, italic=0):
"""pygame.font.match_font(name, bold=0, italic=0) -> name
find the filename for the named system font
This performs the same font search as the SysFont()
function, only it returns the path to the TTF file
that would be loaded. The font name can be a comma
separated list of font names to try.
If no match is found, None is returned.
"""
if not Sysfonts:
initsysfonts()
fontname = None
allnames = name
for name in allnames.split(','):
name = _simplename(name)
styles = Sysfonts.get(name)
if not styles:
styles = Sysalias.get(name)
if styles:
while not fontname:
fontname = styles.get((bold, italic))
if italic:
italic = 0
elif bold:
bold = 0
elif not fontname:
fontname = styles.values()[0]
if fontname: break
return fontname
| gpl-2.0 |
asquared/openreplay | console/openreplay_console.py | 1 | 18254 | import wx
import os, sys
import signal
import traceback
all_processes = { }
MAX_CAPTURES = 8
def sigchld_handler(signum, frame):
(pid, status) = os.wait( )
if all_processes.has_key(pid):
all_processes[pid].on_dead(status)
else:
print "unknown child died?"
def sigint_handler(signum, frame):
global all_processes
for x in all_processes.values:
x.stop( )
sys.exit(0)
signal.signal(signal.SIGCHLD, sigchld_handler)
signal.signal(signal.SIGINT, sigint_handler)
SHELL='/bin/sh'
MJPEG_INGEST_PATH='core/mjpeg_ingest'
SDL_GUI_PATH='core/sdl_gui'
BMDPLAYOUTD_PATH='core/bmdplayoutd'
FFMPEG_PATH='/usr/local/bin/ffmpeg'
MPLAYER_PATH='/usr/local/bin/mplayer'
SSH_PATH='/usr/bin/ssh'
SSH_IDENTITY='id_rsa.openreplay'
NOT_STARTED = 0
RUNNING = 1
EXITED_NORMALLY = 2
EXITED_WITH_SIGNAL = 3
EXITED_WITH_ERROR = 4
FORK_FAILED = 5
INTERRUPTED = 6
STATUS_DICT = {
NOT_STARTED : "Not started",
RUNNING : "Running",
EXITED_NORMALLY : "Exited normally",
EXITED_WITH_SIGNAL : "Terminated",
EXITED_WITH_ERROR : "Exited with error",
FORK_FAILED : "Fork failed",
INTERRUPTED : "Awaiting termination"
}
class OpenreplayProcess(object):
def __init__(self):
self._pid = -1
self._status = NOT_STARTED
self._signal = None
self._exitstatus = None
def reinit(self):
return False
def configure(self):
pass
def config_report(self):
return 'null'
def config_complete(self):
return True
def reset(self):
self._pid = -1
self._status = NOT_STARTED
self._signal = None
self._exitstatus = None
def on_dead(self, status):
global all_processes
del all_processes[self._pid]
if os.WIFSIGNALED(status):
self._status = EXITED_WITH_SIGNAL
self._signal = os.WTERMSIG(status)
else:
self._exitstatus = os.WEXITSTATUS(status)
if self._exitstatus == 0:
self._status = EXITED_NORMALLY
else:
self._status = EXITED_WITH_ERROR
def sigchld(self, signal, frame):
pass
def sigint(self, signal, frame):
print "child got SIGINT: waiting for things to shut down"
def pipeline_child(self):
# child process must close STDIN, otherwise it gets SIGTSTP
devnull = os.open('/dev/null', os.O_RDONLY)
os.dup2(devnull, 0)
signal.signal(signal.SIGCHLD, self.sigchld)
signal.signal(signal.SIGINT, self.sigint)
os.setpgid(0, 0)
try:
# set up pipeline
pipeline = self.pipeline( )
last_fd = devnull
children = []
# spawn the child processes
for x in range(len(pipeline)):
if x == len(pipeline) - 1:
new_stdout = None
next_fd = None
else:
# make pipe for stdout
(next_fd, new_stdout) = os.pipe( )
try:
child_pid = os.fork( )
if child_pid:
children.append(child_pid)
os.close(last_fd)
last_fd = next_fd
if new_stdout is not None:
os.close(new_stdout)
else:
# child process - redirect stdin and stdout then go go go
os.dup2(last_fd, 0)
if new_stdout is not None:
os.dup2(new_stdout, 1)
self.exec_with_args(pipeline[x][0], pipeline[x][1])
except OSError:
raise
status = 0
while len(children) > 0:
try:
(pid, status) = os.wait( )
children.remove(pid)
except OSError, e:
if e.errno == 4:
# interrupted system call - just try again...
pass
else:
# something dire happened so re-raise it
raise
os._exit(status) # not 100% perfect...
except:
traceback.print_exc( )
# don't forget to kill the children!
os.kill(0, signal.SIGTERM)
os._exit(1)
def exec_with_args(self, cmd, args):
os.execl(cmd, cmd, *args)
def start(self):
global all_processes
self._status = RUNNING
try:
self._pid = os.fork( )
except OSError:
print "fork failed"
self._status = FORK_FAILED
return
if self._pid == 0:
try:
self.pipeline_child( )
except:
traceback.print_exc( )
os._exit(1)
else:
# parent process (make the child process a group leader)
print "parent process: setting process group ID"
os.setpgid(self._pid, 0)
print "parent: done!"
all_processes[self._pid] = self
def pipeline(self):
# should return a list of tuples in the form (program, [ args ] ).
# Each program's output will be piped into the next program's input.
return []
def status(self):
return self._status
def stop(self):
if self._status == RUNNING:
os.kill(-self._pid, signal.SIGINT)
self._status = INTERRUPTED
elif self._status == INTERRUPTED:
# KILL DASH NINE! No more CPU time!
# Kill dash nine and your process(-group) is mine!
os.kill(-self._pid, signal.SIGKILL)
else:
print "tried to stop something that wasn't running?"
@classmethod
def name(cls):
return 'null'
class CaptureProcess(OpenreplayProcess):
def __init__(self):
super(CaptureProcess, self).__init__( )
self._buf_file = None
def get_buffer(self):
return self._buf_file
def set_buffer(self, buf_file):
self._buf_file = buf_file
def choose_buffer(self, parent_win=None):
dlg = wx.FileDialog(parent_win, 'Select Buffer File', style=wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
self._buf_file = dlg.GetPath( )
else:
print "buffer selection cancelled by user"
def config_complete(self):
if self._buf_file is not None:
return True
else:
return False
def config_report(self):
if self.config_complete( ):
return ["buffer file: " + self._buf_file]
else:
return ["Configuration incomplete"]
class LocalFileCaptureProcess(CaptureProcess):
def __init__(self):
super(LocalFileCaptureProcess, self).__init__( )
self._filename = None
def configure(self, parent_win = None):
dlg = wx.FileDialog(parent_win, 'Select Video File', style=wx.FD_OPEN)
if dlg.ShowModal( ) == wx.ID_OK:
self._filename = dlg.GetPath( )
self.choose_buffer( )
else:
print "input file selection cancelled by user"
def config_complete(self):
if super(LocalFileCaptureProcess, self).config_complete( ):
if self._filename is not None:
return True
return False
def pipeline(self):
if self.config_complete( ):
return [
(FFMPEG_PATH, ["-i", self._filename, "-f", "mjpeg", "-qscale", "5", "-s", "720x480", "-"]),
(MJPEG_INGEST_PATH, [ self.get_buffer( ) ])
]
else:
return []
def config_report(self):
if self.config_complete( ):
return [
"Local file capture from " + self._filename
] + super(LocalFileCaptureProcess, self).config_report( )
else:
return ["Configuration incomplete"]
@classmethod
def name(cls):
return 'Capture from local file'
class SSHFileCaptureProcess(CaptureProcess):
def __init__(self):
super(SSHFileCaptureProcess, self).__init__( )
self._filename = None
self._hostname = None
def configure(self, parent_win = None):
dlg1 = wx.TextEntryDialog(parent_win,
'Please enter the SSH username and server (i.e. armena@127.0.0.1)', 'Remote Capture Configuration',
'armena@127.0.0.1'
)
if dlg1.ShowModal( ) == wx.ID_OK:
new_hostname = dlg1.GetValue( )
else:
print "account/hostname configuration cancelled by user"
return
dlg2 = wx.TextEntryDialog(parent_win,
'Please enter the filename to encode from', 'Remote Capture Configuration',
'fail.mov'
)
if dlg2.ShowModal( ) == wx.ID_OK:
self._filename = dlg2.GetValue( )
self._hostname = new_hostname
else:
print "filename configuration cancelled by user"
return
self.choose_buffer( )
def config_complete(self):
if super(SSHFileCaptureProcess, self).config_complete( ):
if self._filename is not None:
return True
return False
def pipeline(self):
if self.config_complete( ):
return [
# SSH to remote box and start ffmpeg
(SSH_PATH, ["-i", SSH_IDENTITY, self._hostname, "ffmpeg", "-i", self._filename, "-f", "mjpeg", "-qscale", "5", "-s", "720x480", "-"]),
# pipe into mjpeg_ingest
(MJPEG_INGEST_PATH, [ self.get_buffer( ) ])
]
else:
return []
def config_report(self):
if self.config_complete( ):
return [
"Local file capture from " + self._filename
] + super(SSHFileCaptureProcess, self).config_report( )
else:
return ["Configuration incomplete"]
@classmethod
def name(cls):
return 'Capture from remote file via SSH'
class ConsumerProcess(OpenreplayProcess):
def __init__(self):
super(ConsumerProcess, self).__init__( )
self._capture_processes = None
def set_capture_processes(self, processes):
self._capture_processes = processes
def config_complete(self):
if self._capture_processes is not None and len(self._capture_processes) > 0:
if self.capture_processes_configs_complete( ):
return True
else:
return False
else:
return False
def config_report(self):
return []
def capture_processes_configs_complete(self):
flag = True
for x in self._capture_processes:
if not x.config_complete( ):
flag = False
return flag
class SDLGUIProcess(ConsumerProcess):
def pipeline(self):
return [ (SDL_GUI_PATH, [ x.get_buffer( ) for x in self._capture_processes ] ) ]
@classmethod
def name(cls):
return 'SDL GUI'
class MplayerPlayoutProcess(ConsumerProcess):
def pipeline(self):
return [
(BMDPLAYOUTD_PATH, [ x.get_buffer( ) for x in self._capture_processes ] ),
(MPLAYER_PATH, ['-vo', 'xv', '-demuxer', 'rawvideo', '-rawvideo', 'uyvy:ntsc', '-'])
]
@classmethod
def name(cls):
return 'Stdout Playout Daemon to MPlayer'
INPUTS = [ LocalFileCaptureProcess, SSHFileCaptureProcess ]
GUIS = [ SDLGUIProcess ]
PLAYOUTS = [ MplayerPlayoutProcess ]
class ClassChooser(wx.ComboBox):
def __init__(self, parent, classes, allow_none=False):
assert len(classes) > 0
self._classes = classes
self._names = [ klass.name( ) for klass in classes ]
if allow_none:
self._names = [ 'None' ] + self._names
self._classes = [ None ] + self._classes
wx.ComboBox.__init__(self, parent, -1, choices=self._names)
self.SetSelection(0)
def GetClass(self):
return self._classes[self.GetSelection( )]
class StatusWidget(wx.StaticText):
def __init__(self, parent):
wx.StaticText.__init__(self, parent, -1)
def SetStatus(self, stat_code):
global STATUS_DICT
self.SetLabel(STATUS_DICT[stat_code])
def ClearStatus(self):
self.SetLabel('')
ALLOW_NONE=True
REQUIRE_SOMETHING=False
class ProcessPanel(wx.Panel):
def __init__(self, parent, classes, allow_none=False):
wx.Panel.__init__(self, parent, -1)
self._process = None
self._notify = None
self.status_widget = StatusWidget(self)
self.class_chooser = ClassChooser(self, classes, allow_none)
self.start_button = wx.Button(self, -1, 'Start')
self.stop_button = wx.Button(self, -1, 'Kill')
self.configure_button = wx.Button(self, -1, 'Configure')
self.Bind(wx.EVT_BUTTON, self.OnStart, self.start_button)
self.Bind(wx.EVT_BUTTON, self.OnStop, self.stop_button)
self.Bind(wx.EVT_BUTTON, self.OnConfigure, self.configure_button)
self.Bind(wx.EVT_COMBOBOX, self.OnChangeClass, self.class_chooser)
sz = wx.BoxSizer(wx.HORIZONTAL)
sz.Add(self.status_widget, 1, wx.ALIGN_CENTER | wx.ALL, 1)
sz.Add(self.class_chooser, 1, wx.ALIGN_CENTER | wx.ALL, 1)
sz.Add(self.start_button, 0, wx.ALIGN_CENTER | wx.ALL, 1)
sz.Add(self.stop_button, 0, wx.ALIGN_CENTER | wx.ALL, 1)
sz.Add(self.configure_button, 0, wx.ALIGN_CENTER | wx.ALL, 1)
self.SetSizer(sz)
self.SetAutoLayout(1)
self.update_class( )
def OnStart(self, event):
if self._process is not None:
self._process.start( )
self.poll( )
def OnStop(self, event):
if self._process is not None:
self._process.stop( )
self.poll( )
def OnConfigure(self, event):
if self._process is not None:
self._process.configure(self)
self.poll( )
def OnChangeClass(self, event):
self.update_class( )
def poll(self):
if self._process is not None:
status = self._process.status( )
self.status_widget.SetStatus(status)
if not self._process.config_complete( ):
self.class_chooser.Enable( )
self.configure_button.Enable( )
self.start_button.Disable( )
self.stop_button.Disable( )
elif status == RUNNING or status == INTERRUPTED:
# It's running, or we're waiting for it to stop.
self.stop_button.Enable( )
self.start_button.Disable( )
self.class_chooser.Disable( )
self.configure_button.Disable( )
else:
# it's not running, or stopped for some reason or other
self.start_button.Enable( )
self.configure_button.Enable( )
self.class_chooser.Enable( )
self.stop_button.Disable( )
else:
self.status_widget.ClearStatus( )
self.start_button.Disable( )
self.stop_button.Disable( )
self.configure_button.Disable( )
self.class_chooser.Enable( )
def update_class(self):
klass = self.class_chooser.GetClass( )
if klass is None:
self._process = None
else:
self._process = klass( )
if self._notify is not None:
self._notify(self)
self.poll( )
def get_process(self):
return self._process
def register_notify(self,fn):
self._notify = fn
class MainFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, 'Openreplay Console')
sz = wx.BoxSizer(wx.VERTICAL)
# construct input panels
sbox = wx.StaticBox(self, -1, 'Video Capture')
sz1 = wx.StaticBoxSizer(sbox, wx.VERTICAL)
self.input_process_panels = []
for x in range(MAX_CAPTURES):
pp = ProcessPanel(self, INPUTS, ALLOW_NONE);
pp.register_notify(self.inputs_changed)
sz1.Add(pp, 0, wx.EXPAND | wx.ALL, 1)
self.input_process_panels.append(pp)
sz.Add(sz1, 0, wx.EXPAND | wx.ALL, 1)
# GUI
sbox = wx.StaticBox(self, -1, 'Control Interface')
sz1 = wx.StaticBoxSizer(sbox, wx.VERTICAL)
self.gui_panel = ProcessPanel(self, GUIS, REQUIRE_SOMETHING)
sz1.Add(self.gui_panel, 0, wx.EXPAND | wx.ALL, 1)
sz.Add(sz1, 0, wx.EXPAND | wx.ALL, 1)
# playout
sbox = wx.StaticBox(self, -1, 'Playout')
sz1 = wx.StaticBoxSizer(sbox, wx.VERTICAL)
self.playout_panel = ProcessPanel(self, PLAYOUTS, REQUIRE_SOMETHING)
sz1.Add(self.playout_panel, 0, wx.EXPAND | wx.ALL, 1)
sz.Add(sz1, 0, wx.EXPAND | wx.ALL, 1)
self.SetSizer(sz)
self.Fit( )
# construct a timer for the periodic polling
self.timer = wx.PyTimer(self.poll_children)
self.timer.Start(1000)
self.poll_children( )
def poll_children(self):
for x in self.input_process_panels:
x.poll( )
self.gui_panel.poll( )
self.playout_panel.poll( )
def inputs_changed(self, in_panel):
args = []
gui = self.gui_panel.get_process( )
playout = self.playout_panel.get_process( )
for panel in self.input_process_panels:
process = panel.get_process( )
if process is not None:
args.append(process)
if gui is not None:
gui.set_capture_processes(args)
self.gui_panel.poll( )
if playout is not None:
playout.set_capture_processes(args)
self.playout_panel.poll( )
class OpenreplayConsoleApp(wx.App):
def OnInit(self):
frame = MainFrame()
frame.Show(True)
self.SetTopWindow(frame)
return True
app = OpenreplayConsoleApp(0)
app.MainLoop( )
| gpl-3.0 |
Bashar/django | django/db/models/__init__.py | 82 | 2439 | from functools import wraps
import sys
import warnings
from django.core.exceptions import ObjectDoesNotExist, ImproperlyConfigured # NOQA
from django.db.models.query import Q, QuerySet, Prefetch # NOQA
from django.db.models.expressions import F # NOQA
from django.db.models.manager import Manager # NOQA
from django.db.models.base import Model # NOQA
from django.db.models.aggregates import * # NOQA
from django.db.models.fields import * # NOQA
from django.db.models.fields.subclassing import SubfieldBase # NOQA
from django.db.models.fields.files import FileField, ImageField # NOQA
from django.db.models.fields.related import ( # NOQA
ForeignKey, ForeignObject, OneToOneField, ManyToManyField,
ManyToOneRel, ManyToManyRel, OneToOneRel)
from django.db.models.fields.proxy import OrderWrt # NOQA
from django.db.models.deletion import ( # NOQA
CASCADE, PROTECT, SET, SET_NULL, SET_DEFAULT, DO_NOTHING, ProtectedError)
from django.db.models.lookups import Lookup, Transform # NOQA
from django.db.models import signals # NOQA
from django.utils.deprecation import RemovedInDjango19Warning
def permalink(func):
"""
Decorator that calls urlresolvers.reverse() to return a URL using
parameters returned by the decorated function "func".
"func" should be a function that returns a tuple in one of the
following formats:
(viewname, viewargs)
(viewname, viewargs, viewkwargs)
"""
from django.core.urlresolvers import reverse
@wraps(func)
def inner(*args, **kwargs):
bits = func(*args, **kwargs)
return reverse(bits[0], None, *bits[1:3])
return inner
# Deprecated aliases for functions were exposed in this module.
def make_alias(function_name):
# Close function_name.
def alias(*args, **kwargs):
warnings.warn(
"django.db.models.%s is deprecated." % function_name,
RemovedInDjango19Warning, stacklevel=2)
# This raises a second warning.
from . import loading
return getattr(loading, function_name)(*args, **kwargs)
alias.__name__ = function_name
return alias
this_module = sys.modules['django.db.models']
for function_name in ('get_apps', 'get_app_path', 'get_app_paths', 'get_app',
'get_models', 'get_model', 'register_models'):
setattr(this_module, function_name, make_alias(function_name))
del this_module, make_alias, function_name
| bsd-3-clause |
stephenmcd/ratemyflight | ratemyflight/scripts/create_project.py | 1 | 1581 | #!/usr/bin/env python
import os
import shutil
import sys
import ratemyflight
class ProjectException(Exception):
pass
def create_project():
"""
Copies the contents of the project_template directory to a new directory
specified as an argument to the command line.
"""
# Ensure a directory name is specified.
script_name = os.path.basename(sys.argv[0])
usage_text = "Usage: ratemyflight project_name"
usage_text += "\nProject names beginning with \"-\" are illegal."
if len(sys.argv) != 2:
raise ProjectException(usage_text)
project_name = sys.argv[1]
if project_name.startswith("-"):
raise ProjectException(usage_text)
# Ensure the given directory name doesn't clash with an existing Python
# package/module.
try:
__import__(project_name)
except ImportError:
pass
else:
raise ProjectException("'%s' conflicts with the name of an existing "
"Python module and cannot be used as a project name. Please try "
"another name." % project_name)
ratemyflight_path = os.path.dirname(os.path.abspath(ratemyflight.__file__))
from_path = os.path.join(ratemyflight_path, "project_template")
to_path = os.path.join(os.getcwd(), project_name)
shutil.copytree(from_path, to_path)
shutil.move(os.path.join(to_path, "local_settings.py.template"),
os.path.join(to_path, "local_settings.py"))
if __name__ == "__main__":
try:
create_project()
except ProjectException, e:
print
print e
print
| bsd-2-clause |
telwertowski/Books-Mac-OS-X | Versions/Books_3.0b5/BibTeX Importer.app/Contents/Resources/bibtex2xml.py | 6 | 18190 | #!/usr/bin/python
# Time-stamp: "2006-01-06 12:37:03 vidar"
"""
Decoder for bibliographic data, BibTeX
Usage: python bibtex2xml.py bibfile.bib > bibfile.xml
(c) Vidar Bronken Gundersen, Sara Sprenkle
http://bibtexml.sourceforge.net/
Reuse approved as long as this notification is kept.
License: http://creativecommons.org/licenses/GPL/2.0/
Contributions/thanks to:
Thomas Karl Schwaerzler, read stdin
Egon Willighagen, http://jreferences.sf.net/
Richard Mahoney, for providing a test case
This is Sara Sprenkle's rewrite of our original script, which
is changed to be more robust and handle more bibtex features:
3. Allow spaces between @type and first {
4. 'author' fields with multiple authors split by ' and '
are put in separate xml 'bibtex:person' tags.
5. Option for Titles: words are capitalized
only if first letter in title or capitalized inside braces
6. Removes braces from within field values
7. Ignores comments in bibtex file (including @comment{ or % )
8. Replaces some special latex tags, e.g., replaces ~ with ' '
9. Handles bibtex @string abbreviations
--> includes bibtex's default abbreviations for months
--> does concatenation of abbr # ' more ' and ' more ' # abbr
10. Handles @type( ... ) or @type{ ... }
11. The keywords field is split on , or ; and put into
separate xml 'bibtex:keywords' tags
12. Ignores @preamble
replace ':' with '-' for bibtex:entry@id: unique-ids cannot contain ':'
Known Limitations
1. Does not transform Latex encoding like math mode
and special latex symbols.
2. Does not parse author fields into first and last names.
E.g., It does not do anything special to an author whose name is
in the form LAST_NAME, FIRST_NAME In'author' tag, will show up as
<bibtex:author>LAST_NAME, FIRST_NAME</bibtex:author>
3. Does not handle 'crossref' fields other than to print
<bibtex:crossref>...</bibtex:crossref>
4. Does not inform user of the input's format errors.
You just won't be able to transform the file later with XSL
Create error.log file?
5. Special treatment of
howpublished = '\url{http://www.cs.duke.edu/ari/crisp/}',
6. document functions with docstrings
You will have to manually edit the XML output if you need to handle
these (and unknown) limitations.
"""
import string, re
# set of valid name characters
valid_name_chars = '[\w\-:]'
# define global regular expression variables
author_rex = re.compile('\s+and\s+')
rembraces_rex = re.compile('[{}]')
capitalize_rex = re.compile('({\w*})')
# used by bibtexkeywords(data)
keywords_rex = re.compile('[,;]')
# used by concat_line(line)
concatsplit_rex = re.compile('\s*#\s*')
# split on {, }, or " in verify_out_of_braces
delimiter_rex = re.compile('([{}"])',re.I)
field_rex = re.compile('\s*(\w*)\s*=\s*(.*)')
data_rex = re.compile('\s*(\w*)\s*=\s*([^,]*),?')
#
# return the string parameter without braces
#
def removebraces(str):
return rembraces_rex.sub('',str)
# fix author so that it creates multiple authors,
# split by "and"
def bibtexauthor(data):
bibtex = '<bibtex:author>'
author_list = author_rex.split(data)
if len(author_list) >1:
bibtex = bibtex + '\n'
for author in author_list:
author = author.strip()
bibtex = bibtex + '<bibtex:person>' + removebraces(author) + \
'</bibtex:person>' + '\n'
else: bibtex = bibtex + removebraces(author_list[0])
bibtex = bibtex + '</bibtex:author>'
return bibtex.strip()
# @return the bibtex for the title
# @param data --> title string
# braces are removed from title
def bibtextitle(data):
title = removebraces(data)
title = title.strip()
bibtex = '<bibtex:title>' + title + \
'</bibtex:title>'
return bibtex
# @return the bibtex for the keyword
# keywords are assumed to be delimited by , or ;
def bibtexkeyword(data):
bibtex = ''
keyword_list = keywords_rex.split(data)
for keyword in keyword_list:
keyword = keyword.strip()
bibtex = bibtex + '<bibtex:keywords>' + removebraces(keyword) \
+ '</bibtex:keywords>' + '\n'
return bibtex.strip()
# data = title string
# @return the capitalized title (first letter is capitalized),
# rest are capitalized only if capitalized inside braces
def capitalizetitle(data):
title_list = capitalize_rex.split(data)
title = ''
count = 0
for phrase in title_list:
check = string.lstrip(phrase)
# keep phrase's capitalization the same
if check.find('{') == 0:
title = title + removebraces(phrase)
else:
# first word --> capitalize first letter (after spaces)
if count == 0:
title = title + check.capitalize()
else:
title = title + phrase.lower()
count = count + 1
return title
#
# print the XML for the transformed "filecontents_source"
#
def bibtexdecoder(filecontents_source):
filecontents = []
endentry = ''
# want @<alphanumeric chars><spaces>{<spaces><any chars>,
pubtype_rex = re.compile('@(\w*)\s*{\s*(.*),')
endtype_rex = re.compile('}\s*$')
endtag_rex = re.compile('^\s*}\s*$')
bracefield_rex = re.compile('\s*(\w*)\s*=\s*(.*)')
bracedata_rex = re.compile('\s*(\w*)\s*=\s*{(.*)},?')
quotefield_rex = re.compile('\s*(\w*)\s*=\s*(.*)')
quotedata_rex = re.compile('\s*(\w*)\s*=\s*"(.*)",?')
for line in filecontents_source:
line = line[:-1]
# encode character entities
line = string.replace(line, '&', '&')
line = string.replace(line, '<', '<')
line = string.replace(line, '>', '>')
# start item: publication type (store for later use)
if pubtype_rex.match(line):
# want @<alphanumeric chars><spaces>{<spaces><any chars>,
arttype = pubtype_rex.sub('\g<1>',line)
arttype = string.lower(arttype)
artid = pubtype_rex.sub('\g<2>', line)
artid = string.replace(artid,':','-')
endentry = '</bibtex:' + arttype + '>' + '\n</bibtex:entry>\n'
line = '<bibtex:entry id="' + artid + '">\n' + \
'<bibtex:' + arttype + '>'
# end item
# end entry if just a }
if endtype_rex.match(line):
line = endtag_rex.sub(endentry, line)
field = ''
data = ''
# field, publication info
# field = {data} entries
if bracedata_rex.match(line):
field = bracefield_rex.sub('\g<1>', line)
field = string.lower(field)
data = bracedata_rex.sub('\g<2>', line)
# field = "data" entries
elif quotedata_rex.match(line):
field = quotefield_rex.sub('\g<1>', line)
field = string.lower(field)
data = quotedata_rex.sub('\g<2>', line)
# field = data entries
elif data_rex.match(line):
field = field_rex.sub('\g<1>', line)
field = string.lower(field)
data = data_rex.sub('\g<2>', line)
if field == 'title':
line = bibtextitle(data)
elif field == 'author':
line = bibtexauthor(data)
elif field == 'keywords':
line = bibtexkeyword(data)
elif field != '':
data = removebraces(data)
data = string.strip(data)
if data != '':
line = '<bibtex:' + field + '>' + string.strip(data) + \
'</bibtex:' + field + '>'
# get rid of the field={} type stuff
else:
line = ''
if line != '':
# latex-specific replacements
# do this now after braces were removed
line = string.replace(line, '~', ' ')#' ')
line = string.replace(line, '\\\'a', 'á')
line = string.replace(line, '\\"a', 'ä')
line = string.replace(line, '\\\'c', 'ć')
line = string.replace(line, '\\"o', 'ö')
line = string.replace(line, '\\o', 'ø')
line = string.replace(line, '\\"u', 'ü')
line = string.replace(line, '---', '—')
line = string.replace(line, '--', '-')
filecontents.append(line)
return filecontents
#
# return 1 iff abbr is in line but not inside braces or quotes
# assumes that abbr appears only once on the line (out of braces and quotes)
#
def verify_out_of_braces(line, abbr):
phrase_split = delimiter_rex.split(line)
abbr_rex = re.compile( '\\b' + abbr + '\\b', re.I)
open_brace = 0
open_quote = 0
for phrase in phrase_split:
if phrase == "{":
open_brace = open_brace + 1
elif phrase == "}":
open_brace = open_brace - 1
elif phrase == '"':
if open_quote == 1:
open_quote = 0
else:
open_quote = 1
elif abbr_rex.search(phrase):
if open_brace == 0 and open_quote == 0:
return 1
return 0
#
# a line in the form phrase1 # phrase2 # ... # phrasen
# is returned as phrase1 phrase2 ... phrasen
# with the correct punctuation
# Bug: Doesn't always work with multiple abbreviations plugged in
#
def concat_line(line):
# only look at part after equals
field = field_rex.sub('\g<1>',line)
rest = field_rex.sub('\g<2>',line)
concat_line = field + ' ='
pound_split = concatsplit_rex.split(rest)
phrase_count = 0
length = len(pound_split)
for phrase in pound_split:
phrase = phrase.strip()
if phrase_count != 0:
if phrase.startswith('"') or phrase.startswith('{'):
phrase = phrase[1:]
elif phrase.startswith('"'):
phrase = phrase.replace('"','{',1)
if phrase_count != length-1:
if phrase.endswith('"') or phrase.endswith('}'):
phrase = phrase[:-1]
else:
if phrase.endswith('"'):
phrase = phrase[:-1]
phrase = phrase + "}"
elif phrase.endswith('",'):
phrase = phrase[:-2]
phrase = phrase + "},"
# if phrase did have \#, add the \# back
if phrase.endswith('\\'):
phrase = phrase + "#"
concat_line = concat_line + ' ' + phrase
phrase_count = phrase_count + 1
return concat_line
# substitute abbreviations into filecontents
# @param filecontents_source - string of data from file
def bibtex_replace_abbreviations(filecontents_source):
filecontents = filecontents_source.splitlines()
# These are defined in bibtex, so we'll define them too
abbr_list = ['jan','feb','mar','apr','may','jun',
'jul','aug','sep','oct','nov','dec']
value_list = ['January','February','March','April',
'May','June','July','August','September',
'October','November','December']
abbr_rex = []
total_abbr_count = 0
front = '\\b'
back = '(,?)\\b'
for x in abbr_list:
abbr_rex.append( re.compile(\
front + abbr_list[total_abbr_count] + back, re.I ) )
total_abbr_count = total_abbr_count + 1
abbrdef_rex = re.compile('\s*@string\s*{\s*('+\
valid_name_chars +'*)\s*=(.*)', re.I)
comment_rex = re.compile('@comment\s*{',re.I)
preamble_rex = re.compile('@preamble\s*{',re.I)
waiting_for_end_string = 0
i = 0
filecontents2 = ''
for line in filecontents:
if line == ' ' or line == '':
continue
if waiting_for_end_string:
if re.search('}',line):
waiting_for_end_string = 0
continue
if abbrdef_rex.search(line):
abbr = abbrdef_rex.sub('\g<1>', line)
if abbr_list.count(abbr) == 0:
val = abbrdef_rex.sub('\g<2>', line)
abbr_list.append(abbr)
value_list.append(string.strip(val))
abbr_rex.append( re.compile(\
front + abbr_list[total_abbr_count] + back, re.I ) )
total_abbr_count = total_abbr_count + 1
waiting_for_end_string = 1
continue
if comment_rex.search(line):
waiting_for_end_string = 1
continue
if preamble_rex.search(line):
waiting_for_end_string = 1
continue
# replace subsequent abbreviations with the value
abbr_count = 0
for x in abbr_list:
if abbr_rex[abbr_count].search(line):
if verify_out_of_braces(line,abbr_list[abbr_count]) == 1:
line = abbr_rex[abbr_count].sub(\
value_list[abbr_count] + '\g<1>', line)
# Check for # concatenations
if concatsplit_rex.search(line):
line = concat_line(line)
abbr_count = abbr_count + 1
filecontents2 = filecontents2 + line + '\n'
i = i+1
# Do one final pass over file
# make sure that didn't end up with {" or }" after the substitution
filecontents2 = filecontents2.replace('{"','{{')
filecontents2 = filecontents2.replace('"}','}}')
afterquotevalue_rex = re.compile('"\s*,\s*')
afterbrace_rex = re.compile('"\s*}')
afterbracevalue_rex = re.compile('(=\s*{[^=]*)},\s*')
# add new lines to data that changed because of abbreviation substitutions
filecontents2 = afterquotevalue_rex.sub('",\n', filecontents2)
filecontents2 = afterbrace_rex.sub('"\n}', filecontents2)
filecontents2 = afterbracevalue_rex.sub('\g<1>},\n', filecontents2)
return filecontents2
#
# convert @type( ... ) to @type{ ... }
#
def no_outer_parens(filecontents):
# do checking for open parens
# will convert to braces
paren_split = re.split('([(){}])',filecontents)
open_paren_count = 0
open_type = 0
look_next = 0
# rebuild filecontents
filecontents = ''
at_rex = re.compile('@\w*')
for phrase in paren_split:
if look_next == 1:
if phrase == '(':
phrase = '{'
open_paren_count = open_paren_count + 1
else:
open_type = 0
look_next = 0
if phrase == '(':
open_paren_count = open_paren_count + 1
elif phrase == ')':
open_paren_count = open_paren_count - 1
if open_type == 1 and open_paren_count == 0:
phrase = '}'
open_type = 0
elif at_rex.search( phrase ):
open_type = 1
look_next = 1
filecontents = filecontents + phrase
return filecontents
# make all whitespace into just one space
# format the bibtex file into a usable form.
def bibtexwasher(filecontents_source):
space_rex = re.compile('\s+')
comment_rex = re.compile('\s*%')
filecontents = []
# remove trailing and excessive whitespace
# ignore comments
for line in filecontents_source:
line = string.strip(line)
line = space_rex.sub(' ', line)
# ignore comments
if not comment_rex.match(line):
filecontents.append(' '+ line)
filecontents = string.join(filecontents, '')
# the file is in one long string
filecontents = no_outer_parens(filecontents)
#
# split lines according to preferred syntax scheme
#
filecontents = re.sub('(=\s*{[^=]*)},', '\g<1>},\n', filecontents)
# add new lines after commas that are after values
filecontents = re.sub('"\s*,', '",\n', filecontents)
filecontents = re.sub('=\s*([\w\d]+)\s*,', '= \g<1>,\n', filecontents)
filecontents = re.sub('(@\w*)\s*({(\s*)[^,\s]*)\s*,',
'\n\n\g<1>\g<2>,\n', filecontents)
# add new lines after }
filecontents = re.sub('"\s*}','"\n}\n', filecontents)
filecontents = re.sub('}\s*,','},\n', filecontents)
filecontents = re.sub('@(\w*)', '\n@\g<1>', filecontents)
# character encoding, reserved latex characters
filecontents = re.sub('{\\\&}', '&', filecontents)
filecontents = re.sub('\\\&', '&', filecontents)
# do checking for open braces to get format correct
open_brace_count = 0
brace_split = re.split('([{}])',filecontents)
# rebuild filecontents
filecontents = ''
for phrase in brace_split:
if phrase == '{':
open_brace_count = open_brace_count + 1
elif phrase == '}':
open_brace_count = open_brace_count - 1
if open_brace_count == 0:
filecontents = filecontents + '\n'
filecontents = filecontents + phrase
filecontents2 = bibtex_replace_abbreviations(filecontents)
# gather
filecontents = filecontents2.splitlines()
i=0
j=0 # count the number of blank lines
for line in filecontents:
# ignore blank lines
if line == '' or line == ' ':
j = j+1
continue
filecontents[i] = line + '\n'
i = i+1
# get rid of the extra stuff at the end of the array
# (The extra stuff are duplicates that are in the array because
# blank lines were removed.)
length = len( filecontents)
filecontents[length-j:length] = []
return filecontents
def contentshandler(filecontents_source):
washeddata = bibtexwasher(filecontents_source)
outdata = bibtexdecoder(washeddata)
print '<?xml version="1.0" encoding="utf-8"?>'
#print '<?xml-stylesheet href="bibtexml.css" type="text/css" ?>'
print '<!DOCTYPE bibtex:file PUBLIC'
print ' "-//BibTeXML//DTD XML for BibTeX v1.0//EN"'
print ' "bibtexml.dtd" >'
print '<bibtex:file xmlns:bibtex="http://bibtexml.sf.net/">'
print
for line in outdata:
print line
print ' <!-- manual cleanup may be required... -->'
print '</bibtex:file>'
def filehandler(filepath):
try:
fd = open(filepath, 'r')
filecontents_source = fd.readlines()
fd.close()
except:
print 'Could not open file:', filepath
return filecontents_source
# main program
def main():
import sys
if sys.argv[1:]:
filepath = sys.argv[1]
filecontents_source = filehandler(filepath)
else:
# instead of exit() read stdin here
filecontents_source = sys.stdin.readlines()
contentshandler(filecontents_source)
if __name__ == "__main__": main()
# end python script
| mit |
jdinuncio/ansible-modules-extras | system/firewalld.py | 23 | 37432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Adam Miller (maxamillion@fedoraproject.org)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: firewalld
short_description: Manage arbitrary ports/services with firewalld
description:
- This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules.
version_added: "1.4"
options:
service:
description:
- "Name of a service to add/remove to/from firewalld - service must be listed in output of firewall-cmd --get-services."
required: false
default: null
port:
description:
- "Name of a port or port range to add/remove to/from firewalld. Must be in the form PORT/PROTOCOL or PORT-PORT/PROTOCOL for port ranges."
required: false
default: null
rich_rule:
description:
- "Rich rule to add/remove to/from firewalld."
required: false
default: null
source:
description:
- 'The source/network you would like to add/remove to/from firewalld'
required: false
default: null
version_added: "2.0"
interface:
description:
- 'The interface you would like to add/remove to/from a zone in firewalld'
required: false
default: null
version_added: "2.1"
zone:
description:
- 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).'
required: false
default: system-default(public)
choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block" ]
permanent:
description:
- "Should this configuration be in the running firewalld configuration or persist across reboots. As of Ansible version 2.3, permanent operations can operate on firewalld configs when it's not running (requires firewalld >= 3.0.9)"
required: false
default: null
immediate:
description:
- "Should this configuration be applied immediately, if set as permanent"
required: false
default: false
version_added: "1.9"
state:
description:
- "Should this port accept(enabled) or reject(disabled) connections."
required: true
choices: [ "enabled", "disabled" ]
timeout:
description:
- "The amount of time the rule should be in effect for when non-permanent."
required: false
default: 0
masquerade:
description:
- 'The masquerade setting you would like to enable/disable to/from zones within firewalld'
required: false
default: null
version_added: "2.1"
notes:
- Not tested on any Debian based system.
- Requires the python2 bindings of firewalld, which may not be installed by default if the distribution switched to python 3
requirements: [ 'firewalld >= 0.2.11' ]
author: "Adam Miller (@maxamillion)"
'''
EXAMPLES = '''
- firewalld:
service: https
permanent: true
state: enabled
- firewalld:
port: 8081/tcp
permanent: true
state: disabled
- firewalld:
port: 161-162/udp
permanent: true
state: enabled
- firewalld:
zone: dmz
service: http
permanent: true
state: enabled
- firewalld:
rich_rule: 'rule service name="ftp" audit limit value="1/m" accept'
permanent: true
state: enabled
- firewalld:
source: 192.0.2.0/24
zone: internal
state: enabled
- firewalld:
zone: trusted
interface: eth2
permanent: true
state: enabled
- firewalld:
masquerade: yes
state: enabled
permanent: true
zone: dmz
'''
from ansible.module_utils.basic import AnsibleModule
import sys
#####################
# Globals
#
fw = None
module = None
fw_offline = False
Rich_Rule = None
FirewallClientZoneSettings = None
module = None
#####################
# exception handling
#
def action_handler(action_func, action_func_args):
"""
Function to wrap calls to make actions on firewalld in try/except
logic and emit (hopefully) useful error messages
"""
msgs = []
try:
return action_func(*action_func_args)
except Exception:
# Make python 2.4 shippable ci tests happy
e = sys.exc_info()[1]
# If there are any commonly known errors that we should provide more
# context for to help the users diagnose what's wrong. Handle that here
if "INVALID_SERVICE" in "%s" % e:
msgs.append("Services are defined by port/tcp relationship and named as they are in /etc/services (on most systems)")
if len(msgs) > 0:
module.fail_json(
msg='ERROR: Exception caught: %s %s' % (e, ', '.join(msgs))
)
else:
module.fail_json(msg='ERROR: Exception caught: %s' % e)
#####################
# fw_offline helpers
#
def get_fw_zone_settings(zone):
if fw_offline:
fw_zone = fw.config.get_zone(zone)
fw_settings = FirewallClientZoneSettings(
list(fw.config.get_zone_config(fw_zone))
)
else:
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
return (fw_zone, fw_settings)
def update_fw_settings(fw_zone, fw_settings):
if fw_offline:
fw.config.set_zone_config(fw_zone, fw_settings.settings)
else:
fw_zone.update(fw_settings)
#####################
# masquerade handling
#
def get_masquerade_enabled(zone):
if fw.queryMasquerade(zone) == True:
return True
else:
return False
def get_masquerade_enabled_permanent(zone):
fw_zone, fw_settings = get_fw_zone_settings(zone)
if fw_settings.getMasquerade() == True:
return True
else:
return False
def set_masquerade_enabled(zone):
fw.addMasquerade(zone)
def set_masquerade_disabled(zone):
fw.removeMasquerade(zone)
def set_masquerade_permanent(zone, masquerade):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.setMasquerade(masquerade)
update_fw_settings(fw_zone, fw_settings)
################
# port handling
#
def get_port_enabled(zone, port_proto):
if fw_offline:
fw_zone, fw_settings = get_fw_zone_settings(zone)
ports_list = fw_settings.getPorts()
else:
ports_list = fw.getPorts(zone)
if port_proto in ports_list:
return True
else:
return False
def set_port_enabled(zone, port, protocol, timeout):
fw.addPort(zone, port, protocol, timeout)
def set_port_disabled(zone, port, protocol):
fw.removePort(zone, port, protocol)
def get_port_enabled_permanent(zone, port_proto):
fw_zone, fw_settings = get_fw_zone_settings(zone)
if tuple(port_proto) in fw_settings.getPorts():
return True
else:
return False
def set_port_enabled_permanent(zone, port, protocol):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.addPort(port, protocol)
update_fw_settings(fw_zone, fw_settings)
def set_port_disabled_permanent(zone, port, protocol):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.removePort(port, protocol)
update_fw_settings(fw_zone, fw_settings)
####################
# source handling
#
def get_source(zone, source):
fw_zone, fw_settings = get_fw_zone_settings(zone)
if source in fw_settings.getSources():
return True
else:
return False
def add_source(zone, source):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.addSource(source)
update_fw_settings(fw_zone, fw_settings)
def remove_source(zone, source):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.removeSource(source)
update_fw_settings(fw_zone, fw_settings)
####################
# interface handling
#
def get_interface(zone, interface):
if fw_offline:
fw_zone, fw_settings = get_fw_zone_settings(zone)
interface_list = fw_settings.getInterfaces()
else:
interface_list = fw.getInterfaces(zone)
if interface in fw.getInterfaces(zone):
return True
else:
return False
def change_zone_of_interface(zone, interface):
fw.changeZoneOfInterface(zone, interface)
def remove_interface(zone, interface):
fw.removeInterface(zone, interface)
def get_interface_permanent(zone, interface):
fw_zone, fw_settings = get_fw_zone_settings(zone)
if interface in fw_settings.getInterfaces():
return True
else:
return False
def change_zone_of_interface_permanent(zone, interface):
fw_zone, fw_settings = get_fw_zone_settings(zone)
if fw_offline:
iface_zone_objs = [ ]
for zone in fw.config.get_zones():
old_zone_obj = fw.config.get_zone(zone)
if interface in old_zone_obj.interfaces:
iface_zone_objs.append(old_zone_obj)
if len(iface_zone_objs) > 1:
# Even it shouldn't happen, it's actually possible that
# the same interface is in several zone XML files
module.fail_json(
msg = 'ERROR: interface {} is in {} zone XML file, can only be in one'.format(
interface,
len(iface_zone_objs)
)
)
old_zone_obj = iface_zone_objs[0]
if old_zone_obj.name != zone:
old_zone_settings = FirewallClientZoneSettings(
fw.config.get_zone_config(old_zone_obj)
)
old_zone_settings.removeInterface(interface) # remove from old
fw.config.set_zone_config(old_zone_obj, old_zone_settings.settings)
fw_settings.addInterface(interface) # add to new
fw.config.set_zone_config(fw_zone, fw_settings.settings)
else:
old_zone_name = fw.config().getZoneOfInterface(interface)
if old_zone_name != zone:
if old_zone_name:
old_zone_obj = fw.config().getZoneByName(old_zone_name)
old_zone_settings = old_zone_obj.getSettings()
old_zone_settings.removeInterface(interface) # remove from old
old_zone_obj.update(old_zone_settings)
fw_settings.addInterface(interface) # add to new
fw_zone.update(fw_settings)
def remove_interface_permanent(zone, interface):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.removeInterface(interface)
update_fw_settings(fw_zone, fw_settings)
####################
# service handling
#
def get_service_enabled(zone, service):
if service in fw.getServices(zone):
return True
else:
return False
def set_service_enabled(zone, service, timeout):
fw.addService(zone, service, timeout)
def set_service_disabled(zone, service):
fw.removeService(zone, service)
def get_service_enabled_permanent(zone, service):
fw_zone, fw_settings = get_fw_zone_settings(zone)
if service in fw_settings.getServices():
return True
else:
return False
def set_service_enabled_permanent(zone, service):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.addService(service)
update_fw_settings(fw_zone, fw_settings)
def set_service_disabled_permanent(zone, service):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.removeService(service)
update_fw_settings(fw_zone, fw_settings)
####################
# rich rule handling
#
def get_rich_rule_enabled(zone, rule):
# Convert the rule string to standard format
# before checking whether it is present
rule = str(Rich_Rule(rule_str=rule))
if rule in fw.getRichRules(zone):
return True
else:
return False
def set_rich_rule_enabled(zone, rule, timeout):
fw.addRichRule(zone, rule, timeout)
def set_rich_rule_disabled(zone, rule):
fw.removeRichRule(zone, rule)
def get_rich_rule_enabled_permanent(zone, rule):
fw_zone, fw_settings = get_fw_zone_settings(zone)
# Convert the rule string to standard format
# before checking whether it is present
rule = str(Rich_Rule(rule_str=rule))
if rule in fw_settings.getRichRules():
return True
else:
return False
def set_rich_rule_enabled_permanent(zone, rule):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.addRichRule(rule)
update_fw_settings(fw_zone, fw_settings)
def set_rich_rule_disabled_permanent(zone, rule):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.removeRichRule(rule)
update_fw_settings(fw_zone, fw_settings)
def main():
global module
## make module global so we don't have to pass it to action_handler every
## function call
global module
module = AnsibleModule(
argument_spec = dict(
service=dict(required=False,default=None),
port=dict(required=False,default=None),
rich_rule=dict(required=False,default=None),
zone=dict(required=False,default=None),
immediate=dict(type='bool',default=False),
source=dict(required=False,default=None),
permanent=dict(type='bool',required=False,default=None),
state=dict(choices=['enabled', 'disabled'], required=True),
timeout=dict(type='int',required=False,default=0),
interface=dict(required=False,default=None),
masquerade=dict(required=False,default=None),
offline=dict(type='bool',required=False,default=None),
),
supports_check_mode=True
)
## Handle running (online) daemon vs non-running (offline) daemon
global fw
global fw_offline
global Rich_Rule
global FirewallClientZoneSettings
## Imports
try:
import firewall.config
FW_VERSION = firewall.config.VERSION
from firewall.client import Rich_Rule
from firewall.client import FirewallClient
fw = None
fw_offline = False
try:
fw = FirewallClient()
fw.getDefaultZone()
except AttributeError:
## Firewalld is not currently running, permanent-only operations
## Import other required parts of the firewalld API
##
## NOTE:
## online and offline operations do not share a common firewalld API
from firewall.core.fw_test import Firewall_test
from firewall.client import FirewallClientZoneSettings
fw = Firewall_test()
fw.start()
fw_offline = True
except ImportError:
## Make python 2.4 shippable ci tests happy
e = sys.exc_info()[1]
module.fail_json(msg='firewalld and its python 2 module are required for this module, version 2.0.11 or newer required (3.0.9 or newer for offline operations) \n %s' % e)
if fw_offline:
## Pre-run version checking
if FW_VERSION < "0.3.9":
module.fail_json(msg='unsupported version of firewalld, offline operations require >= 3.0.9')
else:
## Pre-run version checking
if FW_VERSION < "0.2.11":
module.fail_json(msg='unsupported version of firewalld, requires >= 2.0.11')
## Check for firewalld running
try:
if fw.connected == False:
module.fail_json(msg='firewalld service must be running, or try with offline=true')
except AttributeError:
module.fail_json(msg="firewalld connection can't be established,\
installed version (%s) likely too old. Requires firewalld >= 2.0.11" % FW_VERSION)
## Verify required params are provided
if module.params['source'] == None and module.params['permanent'] == None:
module.fail_json(msg='permanent is a required parameter')
if module.params['interface'] != None and module.params['zone'] == None:
module.fail(msg='zone is a required parameter')
if module.params['immediate'] and fw_offline:
module.fail(msg='firewall is not currently running, unable to perform immediate actions without a running firewall daemon')
## Global Vars
changed=False
msgs = []
service = module.params['service']
rich_rule = module.params['rich_rule']
source = module.params['source']
if module.params['port'] != None:
port, protocol = module.params['port'].split('/')
if protocol == None:
module.fail_json(msg='improper port format (missing protocol?)')
else:
port = None
if module.params['zone'] != None:
zone = module.params['zone']
else:
if fw_offline:
zone = fw.get_default_zone()
else:
zone = fw.getDefaultZone()
permanent = module.params['permanent']
desired_state = module.params['state']
immediate = module.params['immediate']
timeout = module.params['timeout']
interface = module.params['interface']
masquerade = module.params['masquerade']
modification_count = 0
if service != None:
modification_count += 1
if port != None:
modification_count += 1
if rich_rule != None:
modification_count += 1
if interface != None:
modification_count += 1
if masquerade != None:
modification_count += 1
if modification_count > 1:
module.fail_json(msg='can only operate on port, service, rich_rule or interface at once')
if service != None:
if immediate and permanent:
is_enabled_permanent = action_handler(
get_service_enabled_permanent,
(zone, service)
)
is_enabled_immediate = action_handler(
get_service_enabled,
(zone, service)
)
msgs.append('Permanent and Non-Permanent(immediate) operation')
if desired_state == "enabled":
if not is_enabled_permanent or not is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if not is_enabled_permanent:
action_handler(
set_service_enabled_permanent,
(zone, service)
)
changed=True
if not is_enabled_immediate:
action_handler(
set_service_enabled,
(zone, service, timeout)
)
changed=True
elif desired_state == "disabled":
if is_enabled_permanent or is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if is_enabled_permanent:
action_handler(
set_service_disabled_permanent,
(zone, service)
)
changed=True
if is_enabled_immediate:
action_handler(
set_service_disabled,
(zone, service)
)
changed=True
elif permanent and not immediate:
is_enabled = action_handler(
get_service_enabled_permanent,
(zone, service)
)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_service_enabled_permanent,
(zone, service)
)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_service_disabled_permanent,
(zone, service)
)
changed=True
elif immediate and not permanent:
is_enabled = action_handler(
get_service_enabled,
(zone, service)
)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_service_enabled,
(zone, service, timeout)
)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_service_disabled,
(zone, service)
)
changed=True
if changed == True:
msgs.append("Changed service %s to %s" % (service, desired_state))
# FIXME - source type does not handle non-permanent mode, this was an
# oversight in the past.
if source != None:
is_enabled = action_handler(get_source, (zone, source))
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(add_source, (zone, source))
changed=True
msgs.append("Added %s to zone %s" % (source, zone))
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(remove_source, (zone, source))
changed=True
msgs.append("Removed %s from zone %s" % (source, zone))
if port != None:
if immediate and permanent:
is_enabled_permanent = action_handler(
get_port_enabled_permanent,
(zone,[port, protocol])
)
is_enabled_immediate = action_handler(
get_port_enabled,
(zone, [port, protocol])
)
msgs.append('Permanent and Non-Permanent(immediate) operation')
if desired_state == "enabled":
if not is_enabled_permanent or not is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if not is_enabled_permanent:
action_handler(
set_port_enabled_permanent,
(zone, port, protocol)
)
changed=True
if not is_enabled_immediate:
action_handler(
set_port_enabled,
(zone, port, protocol, timeout)
)
changed=True
elif desired_state == "disabled":
if is_enabled_permanent or is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if is_enabled_permanent:
action_handler(
set_port_disabled_permanent,
(zone, port, protocol)
)
changed=True
if is_enabled_immediate:
action_handler(
set_port_disabled,
(zone, port, protocol)
)
changed=True
elif permanent and not immediate:
is_enabled = action_handler(
get_port_enabled_permanent,
(zone, [port, protocol])
)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_port_enabled_permanent,
(zone, port, protocol)
)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_port_disabled_permanent,
(zone, port, protocol)
)
changed=True
if immediate and not permanent:
is_enabled = action_handler(
get_port_enabled,
(zone, [port,protocol])
)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_port_enabled,
(zone, port, protocol, timeout)
)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_port_disabled,
(zone, port, protocol)
)
changed=True
if changed == True:
msgs.append("Changed port %s to %s" % ("%s/%s" % (port, protocol), \
desired_state))
if rich_rule != None:
if immediate and permanent:
is_enabled_permanent = action_handler(
get_rich_rule_enabled_permanent,
(zone, rich_rule)
)
is_enabled_immediate = action_handler(
get_rich_rule_enabled,
(zone, rich_rule)
)
msgs.append('Permanent and Non-Permanent(immediate) operation')
if desired_state == "enabled":
if not is_enabled_permanent or not is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if not is_enabled_permanent:
action_handler(
set_rich_rule_enabled_permanent,
(zone, rich_rule)
)
changed=True
if not is_enabled_immediate:
action_handler(
set_rich_rule_enabled,
(zone, rich_rule, timeout)
)
changed=True
elif desired_state == "disabled":
if is_enabled_permanent or is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if is_enabled_permanent:
action_handler(
set_rich_rule_disabled_permanent,
(zone, rich_rule)
)
changed=True
if is_enabled_immediate:
action_handler(
set_rich_rule_disabled,
(zone, rich_rule)
)
changed=True
if permanent and not immediate:
is_enabled = action_handler(
get_rich_rule_enabled_permanent,
(zone, rich_rule)
)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_rich_rule_enabled_permanent,
(zone, rich_rule)
)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_rich_rule_disabled_permanent,
(zone, rich_rule)
)
changed=True
if immediate and not permanent:
is_enabled = action_handler(
get_rich_rule_enabled,
(zone, rich_rule)
)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_rich_rule_enabled,
(zone, rich_rule, timeout)
)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_rich_rule_disabled,
(zone, rich_rule)
)
changed=True
if changed == True:
msgs.append("Changed rich_rule %s to %s" % (rich_rule, desired_state))
if interface != None:
if immediate and permanent:
is_enabled_permanent = action_handler(
get_interface_permanent,
(zone, interface)
)
is_enabled_immediate = action_handler(
get_interface,
(zone, interface)
)
msgs.append('Permanent and Non-Permanent(immediate) operation')
if desired_state == "enabled":
if not is_enabled_permanent or not is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if not is_enabled_permanent:
change_zone_of_interface_permanent(zone, interface)
changed=True
if not is_enabled_immediate:
change_zone_of_interface(zone, interface)
changed=True
if changed:
msgs.append("Changed %s to zone %s" % (interface, zone))
elif desired_state == "disabled":
if is_enabled_permanent or is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if is_enabled_permanent:
remove_interface_permanent(zone, interface)
changed=True
if is_enabled_immediate:
remove_interface(zone, interface)
changed=True
if changed:
msgs.append("Removed %s from zone %s" % (interface, zone))
elif permanent and not immediate:
is_enabled = action_handler(
get_interface_permanent,
(zone, interface)
)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
change_zone_of_interface_permanent(zone, interface)
changed=True
msgs.append("Changed %s to zone %s" % (interface, zone))
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
remove_interface_permanent(zone, interface)
changed=True
msgs.append("Removed %s from zone %s" % (interface, zone))
elif immediate and not permanent:
is_enabled = action_handler(
get_interface,
(zone, interface)
)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
change_zone_of_interface(zone, interface)
changed=True
msgs.append("Changed %s to zone %s" % (interface, zone))
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
remove_interface(zone, interface)
changed=True
msgs.append("Removed %s from zone %s" % (interface, zone))
if masquerade != None:
if immediate and permanent:
is_enabled_permanent = action_handler(
get_masquerade_enabled_permanent,
(zone)
)
is_enabled_immediate = action_handler(get_masquerade_enabled, (zone))
msgs.append('Permanent and Non-Permanent(immediate) operation')
if desired_state == "enabled":
if not is_enabled_permanent or not is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if not is_enabled_permanent:
action_handler(set_masquerade_permanent, (zone, True))
changed=True
if not is_enabled_immediate:
action_handler(set_masquerade_enabled, (zone))
changed=True
if changed:
msgs.append("Added masquerade to zone %s" % (zone))
elif desired_state == "disabled":
if is_enabled_permanent or is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if is_enabled_permanent:
action_handler(set_masquerade_permanent, (zone, False))
changed=True
if is_enabled_immediate:
action_handler(set_masquerade_disabled, (zone))
changed=True
if changed:
msgs.append("Removed masquerade from zone %s" % (zone))
elif permanent and not immediate:
is_enabled = action_handler(get_masquerade_enabled_permanent, (zone))
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(set_masquerade_permanent, (zone, True))
changed=True
msgs.append("Added masquerade to zone %s" % (zone))
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(set_masquerade_permanent, (zone, False))
changed=True
msgs.append("Removed masquerade from zone %s" % (zone))
elif immediate and not permanent:
is_enabled = action_handler(get_masquerade_enabled, (zone))
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(set_masquerade_enabled, (zone))
changed=True
msgs.append("Added masquerade to zone %s" % (zone))
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(set_masquerade_disabled, (zone))
changed=True
msgs.append("Removed masquerade from zone %s" % (zone))
if fw_offline:
msgs.append("(offline operation: only on-disk configs were altered)")
module.exit_json(changed=changed, msg=', '.join(msgs))
if __name__ == '__main__':
main()
| gpl-3.0 |
pczerkas/tempest | tempest/api/identity/admin/v3/test_regions.py | 8 | 4188 | # Copyright 2014 Hewlett-Packard Development Company, L.P
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib import exceptions as lib_exc
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import test
class RegionsTestJSON(base.BaseIdentityV3AdminTest):
@classmethod
def setup_clients(cls):
super(RegionsTestJSON, cls).setup_clients()
cls.client = cls.region_client
@classmethod
def resource_setup(cls):
super(RegionsTestJSON, cls).resource_setup()
cls.setup_regions = list()
for i in range(2):
r_description = data_utils.rand_name('description')
region = cls.client.create_region(r_description)['region']
cls.setup_regions.append(region)
@classmethod
def resource_cleanup(cls):
for r in cls.setup_regions:
cls.client.delete_region(r['id'])
super(RegionsTestJSON, cls).resource_cleanup()
def _delete_region(self, region_id):
self.client.delete_region(region_id)
self.assertRaises(lib_exc.NotFound,
self.client.get_region, region_id)
@test.idempotent_id('56186092-82e4-43f2-b954-91013218ba42')
def test_create_update_get_delete_region(self):
r_description = data_utils.rand_name('description')
region = self.client.create_region(
r_description,
parent_region_id=self.setup_regions[0]['id'])['region']
self.addCleanup(self._delete_region, region['id'])
self.assertEqual(r_description, region['description'])
self.assertEqual(self.setup_regions[0]['id'],
region['parent_region_id'])
# Update region with new description and parent ID
r_alt_description = data_utils.rand_name('description')
region = self.client.update_region(
region['id'],
description=r_alt_description,
parent_region_id=self.setup_regions[1]['id'])['region']
self.assertEqual(r_alt_description, region['description'])
self.assertEqual(self.setup_regions[1]['id'],
region['parent_region_id'])
# Get the details of region
region = self.client.get_region(region['id'])['region']
self.assertEqual(r_alt_description, region['description'])
self.assertEqual(self.setup_regions[1]['id'],
region['parent_region_id'])
@test.attr(type='smoke')
@test.idempotent_id('2c12c5b5-efcf-4aa5-90c5-bff1ab0cdbe2')
def test_create_region_with_specific_id(self):
# Create a region with a specific id
r_region_id = data_utils.rand_uuid()
r_description = data_utils.rand_name('description')
region = self.client.create_region(
r_description, unique_region_id=r_region_id)['region']
self.addCleanup(self._delete_region, region['id'])
# Asserting Create Region with specific id response body
self.assertEqual(r_region_id, region['id'])
self.assertEqual(r_description, region['description'])
@test.idempotent_id('d180bf99-544a-445c-ad0d-0c0d27663796')
def test_list_regions(self):
# Get a list of regions
fetched_regions = self.client.list_regions()['regions']
missing_regions =\
[e for e in self.setup_regions if e not in fetched_regions]
# Asserting List Regions response
self.assertEqual(0, len(missing_regions),
"Failed to find region %s in fetched list" %
', '.join(str(e) for e in missing_regions))
| apache-2.0 |
sftd/scons | scons-local/SCons/Tool/msvc.py | 8 | 11451 | """engine.SCons.Tool.msvc
Tool-specific initialization for Microsoft Visual C/C++.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/msvc.py 2014/03/02 14:18:15 garyo"
import os.path
import re
import sys
import SCons.Action
import SCons.Builder
import SCons.Errors
import SCons.Platform.win32
import SCons.Tool
import SCons.Tool.msvs
import SCons.Util
import SCons.Warnings
import SCons.Scanner.RC
from MSCommon import msvc_exists, msvc_setup_env_once
CSuffixes = ['.c', '.C']
CXXSuffixes = ['.cc', '.cpp', '.cxx', '.c++', '.C++']
def validate_vars(env):
"""Validate the PCH and PCHSTOP construction variables."""
if 'PCH' in env and env['PCH']:
if 'PCHSTOP' not in env:
raise SCons.Errors.UserError("The PCHSTOP construction must be defined if PCH is defined.")
if not SCons.Util.is_String(env['PCHSTOP']):
raise SCons.Errors.UserError("The PCHSTOP construction variable must be a string: %r"%env['PCHSTOP'])
def pch_emitter(target, source, env):
"""Adds the object file target."""
validate_vars(env)
pch = None
obj = None
for t in target:
if SCons.Util.splitext(str(t))[1] == '.pch':
pch = t
if SCons.Util.splitext(str(t))[1] == '.obj':
obj = t
if not obj:
obj = SCons.Util.splitext(str(pch))[0]+'.obj'
target = [pch, obj] # pch must be first, and obj second for the PCHCOM to work
return (target, source)
def object_emitter(target, source, env, parent_emitter):
"""Sets up the PCH dependencies for an object file."""
validate_vars(env)
parent_emitter(target, source, env)
# Add a dependency, but only if the target (e.g. 'Source1.obj')
# doesn't correspond to the pre-compiled header ('Source1.pch').
# If the basenames match, then this was most likely caused by
# someone adding the source file to both the env.PCH() and the
# env.Program() calls, and adding the explicit dependency would
# cause a cycle on the .pch file itself.
#
# See issue #2505 for a discussion of what to do if it turns
# out this assumption causes trouble in the wild:
# http://scons.tigris.org/issues/show_bug.cgi?id=2505
if 'PCH' in env:
pch = env['PCH']
if str(target[0]) != SCons.Util.splitext(str(pch))[0] + '.obj':
env.Depends(target, pch)
return (target, source)
def static_object_emitter(target, source, env):
return object_emitter(target, source, env,
SCons.Defaults.StaticObjectEmitter)
def shared_object_emitter(target, source, env):
return object_emitter(target, source, env,
SCons.Defaults.SharedObjectEmitter)
pch_action = SCons.Action.Action('$PCHCOM', '$PCHCOMSTR')
pch_builder = SCons.Builder.Builder(action=pch_action, suffix='.pch',
emitter=pch_emitter,
source_scanner=SCons.Tool.SourceFileScanner)
# Logic to build .rc files into .res files (resource files)
res_scanner = SCons.Scanner.RC.RCScan()
res_action = SCons.Action.Action('$RCCOM', '$RCCOMSTR')
res_builder = SCons.Builder.Builder(action=res_action,
src_suffix='.rc',
suffix='.res',
src_builder=[],
source_scanner=res_scanner)
def msvc_batch_key(action, env, target, source):
"""
Returns a key to identify unique batches of sources for compilation.
If batching is enabled (via the $MSVC_BATCH setting), then all
target+source pairs that use the same action, defined by the same
environment, and have the same target and source directories, will
be batched.
Returning None specifies that the specified target+source should not
be batched with other compilations.
"""
# Fixing MSVC_BATCH mode. Previous if did not work when MSVC_BATCH
# was set to False. This new version should work better.
# Note we need to do the env.subst so $MSVC_BATCH can be a reference to
# another construction variable, which is why we test for False and 0
# as strings.
if not 'MSVC_BATCH' in env or env.subst('$MSVC_BATCH') in ('0', 'False', '', None):
# We're not using batching; return no key.
return None
t = target[0]
s = source[0]
if os.path.splitext(t.name)[0] != os.path.splitext(s.name)[0]:
# The base names are different, so this *must* be compiled
# separately; return no key.
return None
return (id(action), id(env), t.dir, s.dir)
def msvc_output_flag(target, source, env, for_signature):
"""
Returns the correct /Fo flag for batching.
If batching is disabled or there's only one source file, then we
return an /Fo string that specifies the target explicitly. Otherwise,
we return an /Fo string that just specifies the first target's
directory (where the Visual C/C++ compiler will put the .obj files).
"""
# Fixing MSVC_BATCH mode. Previous if did not work when MSVC_BATCH
# was set to False. This new version should work better. Removed
# len(source)==1 as batch mode can compile only one file
# (and it also fixed problem with compiling only one changed file
# with batch mode enabled)
if not 'MSVC_BATCH' in env or env.subst('$MSVC_BATCH') in ('0', 'False', '', None):
return '/Fo$TARGET'
else:
# The Visual C/C++ compiler requires a \ at the end of the /Fo
# option to indicate an output directory. We use os.sep here so
# that the test(s) for this can be run on non-Windows systems
# without having a hard-coded backslash mess up command-line
# argument parsing.
return '/Fo${TARGET.dir}' + os.sep
CAction = SCons.Action.Action("$CCCOM", "$CCCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
ShCAction = SCons.Action.Action("$SHCCCOM", "$SHCCCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
CXXAction = SCons.Action.Action("$CXXCOM", "$CXXCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
ShCXXAction = SCons.Action.Action("$SHCXXCOM", "$SHCXXCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
def generate(env):
"""Add Builders and construction variables for MSVC++ to an Environment."""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
# TODO(batch): shouldn't reach in to cmdgen this way; necessary
# for now to bypass the checks in Builder.DictCmdGenerator.__call__()
# and allow .cc and .cpp to be compiled in the same command line.
static_obj.cmdgen.source_ext_match = False
shared_obj.cmdgen.source_ext_match = False
for suffix in CSuffixes:
static_obj.add_action(suffix, CAction)
shared_obj.add_action(suffix, ShCAction)
static_obj.add_emitter(suffix, static_object_emitter)
shared_obj.add_emitter(suffix, shared_object_emitter)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, CXXAction)
shared_obj.add_action(suffix, ShCXXAction)
static_obj.add_emitter(suffix, static_object_emitter)
shared_obj.add_emitter(suffix, shared_object_emitter)
env['CCPDBFLAGS'] = SCons.Util.CLVar(['${(PDB and "/Z7") or ""}'])
env['CCPCHFLAGS'] = SCons.Util.CLVar(['${(PCH and "/Yu%s \\\"/Fp%s\\\""%(PCHSTOP or "",File(PCH))) or ""}'])
env['_MSVC_OUTPUT_FLAG'] = msvc_output_flag
env['_CCCOMCOM'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS $CCPCHFLAGS $CCPDBFLAGS'
env['CC'] = 'cl'
env['CCFLAGS'] = SCons.Util.CLVar('/nologo')
env['CFLAGS'] = SCons.Util.CLVar('')
env['CCCOM'] = '${TEMPFILE("$CC $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $CFLAGS $CCFLAGS $_CCCOMCOM")}'
env['SHCC'] = '$CC'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS')
env['SHCFLAGS'] = SCons.Util.CLVar('$CFLAGS')
env['SHCCCOM'] = '${TEMPFILE("$SHCC $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $SHCFLAGS $SHCCFLAGS $_CCCOMCOM")}'
env['CXX'] = '$CC'
env['CXXFLAGS'] = SCons.Util.CLVar('$( /TP $)')
env['CXXCOM'] = '${TEMPFILE("$CXX $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $CXXFLAGS $CCFLAGS $_CCCOMCOM")}'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS')
env['SHCXXCOM'] = '${TEMPFILE("$SHCXX $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $SHCXXFLAGS $SHCCFLAGS $_CCCOMCOM")}'
env['CPPDEFPREFIX'] = '/D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '/I'
env['INCSUFFIX'] = ''
# env.Append(OBJEMITTER = [static_object_emitter])
# env.Append(SHOBJEMITTER = [shared_object_emitter])
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
env['RC'] = 'rc'
env['RCFLAGS'] = SCons.Util.CLVar('')
env['RCSUFFIXES']=['.rc','.rc2']
env['RCCOM'] = '$RC $_CPPDEFFLAGS $_CPPINCFLAGS $RCFLAGS /fo$TARGET $SOURCES'
env['BUILDERS']['RES'] = res_builder
env['OBJPREFIX'] = ''
env['OBJSUFFIX'] = '.obj'
env['SHOBJPREFIX'] = '$OBJPREFIX'
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
# Set-up ms tools paths
msvc_setup_env_once(env)
env['CFILESUFFIX'] = '.c'
env['CXXFILESUFFIX'] = '.cc'
env['PCHPDBFLAGS'] = SCons.Util.CLVar(['${(PDB and "/Yd") or ""}'])
env['PCHCOM'] = '$CXX /Fo${TARGETS[1]} $CXXFLAGS $CCFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c $SOURCES /Yc$PCHSTOP /Fp${TARGETS[0]} $CCPDBFLAGS $PCHPDBFLAGS'
env['BUILDERS']['PCH'] = pch_builder
if 'ENV' not in env:
env['ENV'] = {}
if 'SystemRoot' not in env['ENV']: # required for dlls in the winsxs folders
env['ENV']['SystemRoot'] = SCons.Platform.win32.get_system_root()
def exists(env):
return msvc_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
jdgwartney/meter-plugin-sdk-python | setup.py | 1 | 1283 | #
# Copyright 2016 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils.core import setup
setup(
name='meterplugin',
version='0.2.3',
url='https://github.com/boundary/meter-plugin-sdk-python',
author='David Gwartney',
author_email='david_gwartney@bmc.com',
packages=['meterplugin', ],
entry_points={
'console_scripts': [
'plugin-runner = meterplugin.plugin_runner:main',
'post-extract = meterplugin.post_extract:main',
],
},
package_data={'meterplugin': ['templates/*']},
license='LICENSE',
description='TrueSight Pulse Meter Plugin SDK for Python',
long_description=open('README.txt').read(),
install_requires=[
'tinyrpc >= 0.5',
'tspapi >= 0.3.6',],
)
| apache-2.0 |
Juniper/tempest | tempest/cmd/workspace.py | 2 | 8568 | # Copyright 2016 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages Tempest workspaces
This command is used for managing tempest workspaces
Commands
========
list
----
Outputs the name and path of all known tempest workspaces
register
--------
Registers a new tempest workspace via a given --name and --path
rename
------
Renames a tempest workspace from --old-name to --new-name
move
----
Changes the path of a given tempest workspace --name to --path
remove
------
Deletes the entry for a given tempest workspace --name
--rmdir Deletes the given tempest workspace directory
General Options
===============
**--workspace_path**: Allows the user to specify a different location for the
workspace.yaml file containing the workspace definitions
instead of ~/.tempest/workspace.yaml
"""
import os
import shutil
import sys
from cliff import command
from cliff import lister
from oslo_concurrency import lockutils
import yaml
from tempest import config
CONF = config.CONF
class WorkspaceManager(object):
def __init__(self, path=None):
lockutils.get_lock_path(CONF)
self.path = path or os.path.join(
os.path.expanduser("~"), ".tempest", "workspace.yaml")
if not os.path.isdir(os.path.dirname(self.path)):
os.makedirs(self.path.rsplit(os.path.sep, 1)[0])
self.workspaces = {}
@lockutils.synchronized('workspaces', external=True)
def get_workspace(self, name):
"""Returns the workspace that has the given name
If the workspace isn't registered then `None` is returned.
"""
self._populate()
return self.workspaces.get(name)
@lockutils.synchronized('workspaces', external=True)
def rename_workspace(self, old_name, new_name):
self._populate()
self._name_exists(old_name)
self._workspace_name_exists(new_name)
self.workspaces[new_name] = self.workspaces.pop(old_name)
self._write_file()
@lockutils.synchronized('workspaces', external=True)
def move_workspace(self, name, path):
self._populate()
path = os.path.abspath(os.path.expanduser(path))
self._name_exists(name)
self._validate_path(path)
self.workspaces[name] = path
self._write_file()
def _name_exists(self, name):
if name not in self.workspaces:
print("A workspace was not found with name: {0}".format(name))
sys.exit(1)
@lockutils.synchronized('workspaces', external=True)
def remove_workspace_entry(self, name):
self._populate()
self._name_exists(name)
workspace_path = self.workspaces.pop(name)
self._write_file()
return workspace_path
@lockutils.synchronized('workspaces', external=True)
def remove_workspace_directory(self, workspace_path):
shutil.rmtree(workspace_path)
@lockutils.synchronized('workspaces', external=True)
def list_workspaces(self):
self._populate()
self._validate_workspaces()
return self.workspaces
def _workspace_name_exists(self, name):
if name in self.workspaces:
print("A workspace already exists with name: {0}.".format(
name))
sys.exit(1)
def _validate_path(self, path):
if not os.path.exists(path):
print("Path does not exist.")
sys.exit(1)
@lockutils.synchronized('workspaces', external=True)
def register_new_workspace(self, name, path, init=False):
"""Adds the new workspace and writes out the new workspace config"""
self._populate()
path = os.path.abspath(os.path.expanduser(path))
# This only happens when register is called from outside of init
if not init:
self._validate_path(path)
self._workspace_name_exists(name)
self.workspaces[name] = path
self._write_file()
def _validate_workspaces(self):
if self.workspaces is not None:
self.workspaces = {n: p for n, p in self.workspaces.items()
if os.path.exists(p)}
self._write_file()
def _write_file(self):
with open(self.path, 'w') as f:
f.write(yaml.dump(self.workspaces))
def _populate(self):
if not os.path.isfile(self.path):
return
with open(self.path, 'r') as f:
self.workspaces = yaml.safe_load(f) or {}
def add_global_arguments(parser):
parser.add_argument(
'--workspace-path', required=False, default=None,
help="The path to the workspace file, the default is "
"~/.tempest/workspace.yaml")
return parser
class TempestWorkspaceRegister(command.Command):
def get_description(self):
return ('Registers a new tempest workspace via a given '
'--name and --path')
def get_parser(self, prog_name):
parser = super(TempestWorkspaceRegister, self).get_parser(prog_name)
add_global_arguments(parser)
parser.add_argument('--name', required=True)
parser.add_argument('--path', required=True)
return parser
def take_action(self, parsed_args):
self.manager = WorkspaceManager(parsed_args.workspace_path)
self.manager.register_new_workspace(parsed_args.name, parsed_args.path)
sys.exit(0)
class TempestWorkspaceRename(command.Command):
def get_description(self):
return 'Renames a tempest workspace from --old-name to --new-name'
def get_parser(self, prog_name):
parser = super(TempestWorkspaceRename, self).get_parser(prog_name)
add_global_arguments(parser)
parser.add_argument('--old-name', required=True)
parser.add_argument('--new-name', required=True)
return parser
def take_action(self, parsed_args):
self.manager = WorkspaceManager(parsed_args.workspace_path)
self.manager.rename_workspace(
parsed_args.old_name, parsed_args.new_name)
sys.exit(0)
class TempestWorkspaceMove(command.Command):
def get_description(self):
return 'Changes the path of a given tempest workspace --name to --path'
def get_parser(self, prog_name):
parser = super(TempestWorkspaceMove, self).get_parser(prog_name)
add_global_arguments(parser)
parser.add_argument('--name', required=True)
parser.add_argument('--path', required=True)
return parser
def take_action(self, parsed_args):
self.manager = WorkspaceManager(parsed_args.workspace_path)
self.manager.move_workspace(parsed_args.name, parsed_args.path)
sys.exit(0)
class TempestWorkspaceRemove(command.Command):
def get_description(self):
return 'Deletes the entry for a given tempest workspace --name'
def get_parser(self, prog_name):
parser = super(TempestWorkspaceRemove, self).get_parser(prog_name)
add_global_arguments(parser)
parser.add_argument('--name', required=True)
parser.add_argument('--rmdir', action='store_true',
help='Deletes the given workspace directory')
return parser
def take_action(self, parsed_args):
self.manager = WorkspaceManager(parsed_args.workspace_path)
workspace_path = self.manager.remove_workspace_entry(parsed_args.name)
if parsed_args.rmdir:
self.manager.remove_workspace_directory(workspace_path)
sys.exit(0)
class TempestWorkspaceList(lister.Lister):
def get_description(self):
return 'Outputs the name and path of all known tempest workspaces'
def get_parser(self, prog_name):
parser = super(TempestWorkspaceList, self).get_parser(prog_name)
add_global_arguments(parser)
return parser
def take_action(self, parsed_args):
self.manager = WorkspaceManager(parsed_args.workspace_path)
return (("Name", "Path"),
((n, p) for n, p in self.manager.list_workspaces().items()))
| apache-2.0 |
OpenCobolIDE/OpenCobolIDE | open_cobol_ide/extlibs/future/standard_library/__init__.py | 34 | 27587 | """
Python 3 reorganized the standard library (PEP 3108). This module exposes
several standard library modules to Python 2 under their new Python 3
names.
It is designed to be used as follows::
from future import standard_library
standard_library.install_aliases()
And then these normal Py3 imports work on both Py3 and Py2::
import builtins
import copyreg
import queue
import reprlib
import socketserver
import winreg # on Windows only
import test.support
import html, html.parser, html.entites
import http, http.client, http.server
import http.cookies, http.cookiejar
import urllib.parse, urllib.request, urllib.response, urllib.error, urllib.robotparser
import xmlrpc.client, xmlrpc.server
import _thread
import _dummy_thread
import _markupbase
from itertools import filterfalse, zip_longest
from sys import intern
from collections import UserDict, UserList, UserString
from collections import OrderedDict, Counter # even on Py2.6
from subprocess import getoutput, getstatusoutput
from subprocess import check_output # even on Py2.6
(The renamed modules and functions are still available under their old
names on Python 2.)
This is a cleaner alternative to this idiom (see
http://docs.pythonsprints.com/python3_porting/py-porting.html)::
try:
import queue
except ImportError:
import Queue as queue
Limitations
-----------
We don't currently support these modules, but would like to::
import dbm
import dbm.dumb
import dbm.gnu
import collections.abc # on Py33
import pickle # should (optionally) bring in cPickle on Python 2
"""
from __future__ import absolute_import, division, print_function
import sys
import logging
import imp
import contextlib
import types
import copy
import os
# Make a dedicated logger; leave the root logger to be configured
# by the application.
flog = logging.getLogger('future_stdlib')
_formatter = logging.Formatter(logging.BASIC_FORMAT)
_handler = logging.StreamHandler()
_handler.setFormatter(_formatter)
flog.addHandler(_handler)
flog.setLevel(logging.WARN)
from future.utils import PY2, PY3
# The modules that are defined under the same names on Py3 but with
# different contents in a significant way (e.g. submodules) are:
# pickle (fast one)
# dbm
# urllib
# test
# email
REPLACED_MODULES = set(['test', 'urllib', 'pickle', 'dbm']) # add email and dbm when we support it
# The following module names are not present in Python 2.x, so they cause no
# potential clashes between the old and new names:
# http
# html
# tkinter
# xmlrpc
# Keys: Py2 / real module names
# Values: Py3 / simulated module names
RENAMES = {
# 'cStringIO': 'io', # there's a new io module in Python 2.6
# that provides StringIO and BytesIO
# 'StringIO': 'io', # ditto
# 'cPickle': 'pickle',
'__builtin__': 'builtins',
'copy_reg': 'copyreg',
'Queue': 'queue',
'future.moves.socketserver': 'socketserver',
'ConfigParser': 'configparser',
'repr': 'reprlib',
# 'FileDialog': 'tkinter.filedialog',
# 'tkFileDialog': 'tkinter.filedialog',
# 'SimpleDialog': 'tkinter.simpledialog',
# 'tkSimpleDialog': 'tkinter.simpledialog',
# 'tkColorChooser': 'tkinter.colorchooser',
# 'tkCommonDialog': 'tkinter.commondialog',
# 'Dialog': 'tkinter.dialog',
# 'Tkdnd': 'tkinter.dnd',
# 'tkFont': 'tkinter.font',
# 'tkMessageBox': 'tkinter.messagebox',
# 'ScrolledText': 'tkinter.scrolledtext',
# 'Tkconstants': 'tkinter.constants',
# 'Tix': 'tkinter.tix',
# 'ttk': 'tkinter.ttk',
# 'Tkinter': 'tkinter',
'_winreg': 'winreg',
'thread': '_thread',
'dummy_thread': '_dummy_thread',
# 'anydbm': 'dbm', # causes infinite import loop
# 'whichdb': 'dbm', # causes infinite import loop
# anydbm and whichdb are handled by fix_imports2
# 'dbhash': 'dbm.bsd',
# 'dumbdbm': 'dbm.dumb',
# 'dbm': 'dbm.ndbm',
# 'gdbm': 'dbm.gnu',
'future.moves.xmlrpc': 'xmlrpc',
# 'future.backports.email': 'email', # for use by urllib
# 'DocXMLRPCServer': 'xmlrpc.server',
# 'SimpleXMLRPCServer': 'xmlrpc.server',
# 'httplib': 'http.client',
# 'htmlentitydefs' : 'html.entities',
# 'HTMLParser' : 'html.parser',
# 'Cookie': 'http.cookies',
# 'cookielib': 'http.cookiejar',
# 'BaseHTTPServer': 'http.server',
# 'SimpleHTTPServer': 'http.server',
# 'CGIHTTPServer': 'http.server',
# 'future.backports.test': 'test', # primarily for renaming test_support to support
# 'commands': 'subprocess',
# 'urlparse' : 'urllib.parse',
# 'robotparser' : 'urllib.robotparser',
# 'abc': 'collections.abc', # for Py33
# 'future.utils.six.moves.html': 'html',
# 'future.utils.six.moves.http': 'http',
'future.moves.html': 'html',
'future.moves.http': 'http',
# 'future.backports.urllib': 'urllib',
# 'future.utils.six.moves.urllib': 'urllib',
'future.moves._markupbase': '_markupbase',
}
# It is complicated and apparently brittle to mess around with the
# ``sys.modules`` cache in order to support "import urllib" meaning two
# different things (Py2.7 urllib and backported Py3.3-like urllib) in different
# contexts. So we require explicit imports for these modules.
assert len(set(RENAMES.values()) & set(REPLACED_MODULES)) == 0
# Harmless renames that we can insert.
# These modules need names from elsewhere being added to them:
# subprocess: should provide getoutput and other fns from commands
# module but these fns are missing: getstatus, mk2arg,
# mkarg
# re: needs an ASCII constant that works compatibly with Py3
# etc: see lib2to3/fixes/fix_imports.py
# (New module name, new object name, old module name, old object name)
MOVES = [('collections', 'UserList', 'UserList', 'UserList'),
('collections', 'UserDict', 'UserDict', 'UserDict'),
('collections', 'UserString','UserString', 'UserString'),
('itertools', 'filterfalse','itertools', 'ifilterfalse'),
('itertools', 'zip_longest','itertools', 'izip_longest'),
('sys', 'intern','__builtin__', 'intern'),
# The re module has no ASCII flag in Py2, but this is the default.
# Set re.ASCII to a zero constant. stat.ST_MODE just happens to be one
# (and it exists on Py2.6+).
('re', 'ASCII','stat', 'ST_MODE'),
('base64', 'encodebytes','base64', 'encodestring'),
('base64', 'decodebytes','base64', 'decodestring'),
('subprocess', 'getoutput', 'commands', 'getoutput'),
('subprocess', 'getstatusoutput', 'commands', 'getstatusoutput'),
('subprocess', 'check_output', 'future.backports.misc', 'check_output'),
('math', 'ceil', 'future.backports.misc', 'ceil'),
('collections', 'OrderedDict', 'future.backports.misc', 'OrderedDict'),
('collections', 'Counter', 'future.backports.misc', 'Counter'),
('itertools', 'count', 'future.backports.misc', 'count'),
('reprlib', 'recursive_repr', 'future.backports.misc', 'recursive_repr'),
('functools', 'cmp_to_key', 'future.backports.misc', 'cmp_to_key'),
# This is no use, since "import urllib.request" etc. still fails:
# ('urllib', 'error', 'future.moves.urllib', 'error'),
# ('urllib', 'parse', 'future.moves.urllib', 'parse'),
# ('urllib', 'request', 'future.moves.urllib', 'request'),
# ('urllib', 'response', 'future.moves.urllib', 'response'),
# ('urllib', 'robotparser', 'future.moves.urllib', 'robotparser'),
]
# A minimal example of an import hook:
# class WarnOnImport(object):
# def __init__(self, *args):
# self.module_names = args
#
# def find_module(self, fullname, path=None):
# if fullname in self.module_names:
# self.path = path
# return self
# return None
#
# def load_module(self, name):
# if name in sys.modules:
# return sys.modules[name]
# module_info = imp.find_module(name, self.path)
# module = imp.load_module(name, *module_info)
# sys.modules[name] = module
# flog.warning("Imported deprecated module %s", name)
# return module
class RenameImport(object):
"""
A class for import hooks mapping Py3 module names etc. to the Py2 equivalents.
"""
# Different RenameImport classes are created when importing this module from
# different source files. This causes isinstance(hook, RenameImport) checks
# to produce inconsistent results. We add this RENAMER attribute here so
# remove_hooks() and install_hooks() can find instances of these classes
# easily:
RENAMER = True
def __init__(self, old_to_new):
'''
Pass in a dictionary-like object mapping from old names to new
names. E.g. {'ConfigParser': 'configparser', 'cPickle': 'pickle'}
'''
self.old_to_new = old_to_new
both = set(old_to_new.keys()) & set(old_to_new.values())
assert (len(both) == 0 and
len(set(old_to_new.values())) == len(old_to_new.values())), \
'Ambiguity in renaming (handler not implemented)'
self.new_to_old = dict((new, old) for (old, new) in old_to_new.items())
def find_module(self, fullname, path=None):
# Handles hierarchical importing: package.module.module2
new_base_names = set([s.split('.')[0] for s in self.new_to_old])
# Before v0.12: Was: if fullname in set(self.old_to_new) | new_base_names:
if fullname in new_base_names:
return self
return None
def load_module(self, name):
path = None
if name in sys.modules:
return sys.modules[name]
elif name in self.new_to_old:
# New name. Look up the corresponding old (Py2) name:
oldname = self.new_to_old[name]
module = self._find_and_load_module(oldname)
# module.__future_module__ = True
else:
module = self._find_and_load_module(name)
# In any case, make it available under the requested (Py3) name
sys.modules[name] = module
return module
def _find_and_load_module(self, name, path=None):
"""
Finds and loads it. But if there's a . in the name, handles it
properly.
"""
bits = name.split('.')
while len(bits) > 1:
# Treat the first bit as a package
packagename = bits.pop(0)
package = self._find_and_load_module(packagename, path)
try:
path = package.__path__
except AttributeError:
# This could be e.g. moves.
flog.debug('Package {0} has no __path__.'.format(package))
if name in sys.modules:
return sys.modules[name]
flog.debug('What to do here?')
name = bits[0]
module_info = imp.find_module(name, path)
return imp.load_module(name, *module_info)
class hooks(object):
"""
Acts as a context manager. Saves the state of sys.modules and restores it
after the 'with' block.
Use like this:
>>> from future import standard_library
>>> with standard_library.hooks():
... import http.client
>>> import requests
For this to work, http.client will be scrubbed from sys.modules after the
'with' block. That way the modules imported in the 'with' block will
continue to be accessible in the current namespace but not from any
imported modules (like requests).
"""
def __enter__(self):
# flog.debug('Entering hooks context manager')
self.old_sys_modules = copy.copy(sys.modules)
self.hooks_were_installed = detect_hooks()
# self.scrubbed = scrub_py2_sys_modules()
install_hooks()
return self
def __exit__(self, *args):
# flog.debug('Exiting hooks context manager')
# restore_sys_modules(self.scrubbed)
if not self.hooks_were_installed:
remove_hooks()
# scrub_future_sys_modules()
# Sanity check for is_py2_stdlib_module(): We aren't replacing any
# builtin modules names:
if PY2:
assert len(set(RENAMES.values()) & set(sys.builtin_module_names)) == 0
def is_py2_stdlib_module(m):
"""
Tries to infer whether the module m is from the Python 2 standard library.
This may not be reliable on all systems.
"""
if PY3:
return False
if not 'stdlib_path' in is_py2_stdlib_module.__dict__:
stdlib_files = [contextlib.__file__, os.__file__, copy.__file__]
stdlib_paths = [os.path.split(f)[0] for f in stdlib_files]
if not len(set(stdlib_paths)) == 1:
# This seems to happen on travis-ci.org. Very strange. We'll try to
# ignore it.
flog.warn('Multiple locations found for the Python standard '
'library: %s' % stdlib_paths)
# Choose the first one arbitrarily
is_py2_stdlib_module.stdlib_path = stdlib_paths[0]
if m.__name__ in sys.builtin_module_names:
return True
if hasattr(m, '__file__'):
modpath = os.path.split(m.__file__)
if (modpath[0].startswith(is_py2_stdlib_module.stdlib_path) and
'site-packages' not in modpath[0]):
return True
return False
def scrub_py2_sys_modules():
"""
Removes any Python 2 standard library modules from ``sys.modules`` that
would interfere with Py3-style imports using import hooks. Examples are
modules with the same names (like urllib or email).
(Note that currently import hooks are disabled for modules like these
with ambiguous names anyway ...)
"""
if PY3:
return {}
scrubbed = {}
for modulename in REPLACED_MODULES & set(RENAMES.keys()):
if not modulename in sys.modules:
continue
module = sys.modules[modulename]
if is_py2_stdlib_module(module):
flog.debug('Deleting (Py2) {} from sys.modules'.format(modulename))
scrubbed[modulename] = sys.modules[modulename]
del sys.modules[modulename]
return scrubbed
def scrub_future_sys_modules():
"""
Deprecated.
"""
return {}
class suspend_hooks(object):
"""
Acts as a context manager. Use like this:
>>> from future import standard_library
>>> standard_library.install_hooks()
>>> import http.client
>>> # ...
>>> with standard_library.suspend_hooks():
>>> import requests # incompatible with ``future``'s standard library hooks
If the hooks were disabled before the context, they are not installed when
the context is left.
"""
def __enter__(self):
self.hooks_were_installed = detect_hooks()
remove_hooks()
# self.scrubbed = scrub_future_sys_modules()
return self
def __exit__(self, *args):
if self.hooks_were_installed:
install_hooks()
# restore_sys_modules(self.scrubbed)
def restore_sys_modules(scrubbed):
"""
Add any previously scrubbed modules back to the sys.modules cache,
but only if it's safe to do so.
"""
clash = set(sys.modules) & set(scrubbed)
if len(clash) != 0:
# If several, choose one arbitrarily to raise an exception about
first = list(clash)[0]
raise ImportError('future module {} clashes with Py2 module'
.format(first))
sys.modules.update(scrubbed)
def install_aliases():
"""
Monkey-patches the standard library in Py2.6/7 to provide
aliases for better Py3 compatibility.
"""
if PY3:
return
# if hasattr(install_aliases, 'run_already'):
# return
for (newmodname, newobjname, oldmodname, oldobjname) in MOVES:
__import__(newmodname)
# We look up the module in sys.modules because __import__ just returns the
# top-level package:
newmod = sys.modules[newmodname]
# newmod.__future_module__ = True
__import__(oldmodname)
oldmod = sys.modules[oldmodname]
obj = getattr(oldmod, oldobjname)
setattr(newmod, newobjname, obj)
# Hack for urllib so it appears to have the same structure on Py2 as on Py3
import urllib
from future.backports.urllib import request
from future.backports.urllib import response
from future.backports.urllib import parse
from future.backports.urllib import error
from future.backports.urllib import robotparser
urllib.request = request
urllib.response = response
urllib.parse = parse
urllib.error = error
urllib.robotparser = robotparser
sys.modules['urllib.request'] = request
sys.modules['urllib.response'] = response
sys.modules['urllib.parse'] = parse
sys.modules['urllib.error'] = error
sys.modules['urllib.robotparser'] = robotparser
# Patch the test module so it appears to have the same structure on Py2 as on Py3
try:
import test
except ImportError:
pass
try:
from future.moves.test import support
except ImportError:
pass
else:
test.support = support
sys.modules['test.support'] = support
# Patch the dbm module so it appears to have the same structure on Py2 as on Py3
try:
import dbm
except ImportError:
pass
else:
from future.moves.dbm import dumb
dbm.dumb = dumb
sys.modules['dbm.dumb'] = dumb
try:
from future.moves.dbm import gnu
except ImportError:
pass
else:
dbm.gnu = gnu
sys.modules['dbm.gnu'] = gnu
try:
from future.moves.dbm import ndbm
except ImportError:
pass
else:
dbm.ndbm = ndbm
sys.modules['dbm.ndbm'] = ndbm
# install_aliases.run_already = True
def install_hooks():
"""
This function installs the future.standard_library import hook into
sys.meta_path.
"""
if PY3:
return
install_aliases()
flog.debug('sys.meta_path was: {0}'.format(sys.meta_path))
flog.debug('Installing hooks ...')
# Add it unless it's there already
newhook = RenameImport(RENAMES)
if not detect_hooks():
sys.meta_path.append(newhook)
flog.debug('sys.meta_path is now: {0}'.format(sys.meta_path))
def enable_hooks():
"""
Deprecated. Use install_hooks() instead. This will be removed by
``future`` v1.0.
"""
install_hooks()
def remove_hooks(scrub_sys_modules=False):
"""
This function removes the import hook from sys.meta_path.
"""
if PY3:
return
flog.debug('Uninstalling hooks ...')
# Loop backwards, so deleting items keeps the ordering:
for i, hook in list(enumerate(sys.meta_path))[::-1]:
if hasattr(hook, 'RENAMER'):
del sys.meta_path[i]
# Explicit is better than implicit. In the future the interface should
# probably change so that scrubbing the import hooks requires a separate
# function call. Left as is for now for backward compatibility with
# v0.11.x.
if scrub_sys_modules:
scrub_future_sys_modules()
def disable_hooks():
"""
Deprecated. Use remove_hooks() instead. This will be removed by
``future`` v1.0.
"""
remove_hooks()
def detect_hooks():
"""
Returns True if the import hooks are installed, False if not.
"""
flog.debug('Detecting hooks ...')
present = any([hasattr(hook, 'RENAMER') for hook in sys.meta_path])
if present:
flog.debug('Detected.')
else:
flog.debug('Not detected.')
return present
# As of v0.12, this no longer happens implicitly:
# if not PY3:
# install_hooks()
if not hasattr(sys, 'py2_modules'):
sys.py2_modules = {}
def cache_py2_modules():
"""
Currently this function is unneeded, as we are not attempting to provide import hooks
for modules with ambiguous names: email, urllib, pickle.
"""
if len(sys.py2_modules) != 0:
return
assert not detect_hooks()
import urllib
sys.py2_modules['urllib'] = urllib
import email
sys.py2_modules['email'] = email
import pickle
sys.py2_modules['pickle'] = pickle
# Not all Python installations have test module. (Anaconda doesn't, for example.)
# try:
# import test
# except ImportError:
# sys.py2_modules['test'] = None
# sys.py2_modules['test'] = test
# import dbm
# sys.py2_modules['dbm'] = dbm
def import_(module_name, backport=False):
"""
Pass a (potentially dotted) module name of a Python 3 standard library
module. This function imports the module compatibly on Py2 and Py3 and
returns the top-level module.
Example use:
>>> http = import_('http.client')
>>> http = import_('http.server')
>>> urllib = import_('urllib.request')
Then:
>>> conn = http.client.HTTPConnection(...)
>>> response = urllib.request.urlopen('http://mywebsite.com')
>>> # etc.
Use as follows:
>>> package_name = import_(module_name)
On Py3, equivalent to this:
>>> import module_name
On Py2, equivalent to this if backport=False:
>>> from future.moves import module_name
or to this if backport=True:
>>> from future.backports import module_name
except that it also handles dotted module names such as ``http.client``
The effect then is like this:
>>> from future.backports import module
>>> from future.backports.module import submodule
>>> module.submodule = submodule
Note that this would be a SyntaxError in Python:
>>> from future.backports import http.client
"""
# Python 2.6 doesn't have importlib in the stdlib, so it requires
# the backported ``importlib`` package from PyPI as a dependency to use
# this function:
import importlib
if PY3:
return __import__(module_name)
else:
# client.blah = blah
# Then http.client = client
# etc.
if backport:
prefix = 'future.backports'
else:
prefix = 'future.moves'
parts = prefix.split('.') + module_name.split('.')
modules = []
for i, part in enumerate(parts):
sofar = '.'.join(parts[:i+1])
modules.append(importlib.import_module(sofar))
for i, part in reversed(list(enumerate(parts))):
if i == 0:
break
setattr(modules[i-1], part, modules[i])
# Return the next-most top-level module after future.backports / future.moves:
return modules[2]
def from_import(module_name, *symbol_names, **kwargs):
"""
Example use:
>>> HTTPConnection = from_import('http.client', 'HTTPConnection')
>>> HTTPServer = from_import('http.server', 'HTTPServer')
>>> urlopen, urlparse = from_import('urllib.request', 'urlopen', 'urlparse')
Equivalent to this on Py3:
>>> from module_name import symbol_names[0], symbol_names[1], ...
and this on Py2:
>>> from future.moves.module_name import symbol_names[0], ...
or:
>>> from future.backports.module_name import symbol_names[0], ...
except that it also handles dotted module names such as ``http.client``.
"""
if PY3:
return __import__(module_name)
else:
if 'backport' in kwargs and bool(kwargs['backport']):
prefix = 'future.backports'
else:
prefix = 'future.moves'
parts = prefix.split('.') + module_name.split('.')
module = importlib.import_module(prefix + '.' + module_name)
output = [getattr(module, name) for name in symbol_names]
if len(output) == 1:
return output[0]
else:
return output
class exclude_local_folder_imports(object):
"""
A context-manager that prevents standard library modules like configparser
from being imported from the local python-future source folder on Py3.
(This was need prior to v0.16.0 because the presence of a configparser
folder would otherwise have prevented setuptools from running on Py3. Maybe
it's not needed any more?)
"""
def __init__(self, *args):
assert len(args) > 0
self.module_names = args
# Disallow dotted module names like http.client:
if any(['.' in m for m in self.module_names]):
raise NotImplementedError('Dotted module names are not supported')
def __enter__(self):
self.old_sys_path = copy.copy(sys.path)
self.old_sys_modules = copy.copy(sys.modules)
if sys.version_info[0] < 3:
return
# The presence of all these indicates we've found our source folder,
# because `builtins` won't have been installed in site-packages by setup.py:
FUTURE_SOURCE_SUBFOLDERS = ['future', 'past', 'libfuturize', 'libpasteurize', 'builtins']
# Look for the future source folder:
for folder in self.old_sys_path:
if all([os.path.exists(os.path.join(folder, subfolder))
for subfolder in FUTURE_SOURCE_SUBFOLDERS]):
# Found it. Remove it.
sys.path.remove(folder)
# Ensure we import the system module:
for m in self.module_names:
# Delete the module and any submodules from sys.modules:
# for key in list(sys.modules):
# if key == m or key.startswith(m + '.'):
# try:
# del sys.modules[key]
# except KeyError:
# pass
try:
module = __import__(m, level=0)
except ImportError:
# There's a problem importing the system module. E.g. the
# winreg module is not available except on Windows.
pass
def __exit__(self, *args):
# Restore sys.path and sys.modules:
sys.path = self.old_sys_path
for m in set(self.old_sys_modules.keys()) - set(sys.modules.keys()):
sys.modules[m] = self.old_sys_modules[m]
TOP_LEVEL_MODULES = ['builtins',
'copyreg',
'html',
'http',
'queue',
'reprlib',
'socketserver',
'test',
'tkinter',
'winreg',
'xmlrpc',
'_dummy_thread',
'_markupbase',
'_thread',
]
def import_top_level_modules():
with exclude_local_folder_imports(*TOP_LEVEL_MODULES):
for m in TOP_LEVEL_MODULES:
try:
__import__(m)
except ImportError: # e.g. winreg
pass
| gpl-3.0 |
pizzathief/numpy | numpy/lib/type_check.py | 3 | 19811 | """Automatically adapted for numpy Sep 19, 2005 by convertcode.py
"""
from __future__ import division, absolute_import, print_function
import functools
import warnings
__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',
'isreal', 'nan_to_num', 'real', 'real_if_close',
'typename', 'asfarray', 'mintypecode', 'asscalar',
'common_type']
import numpy.core.numeric as _nx
from numpy.core.numeric import asarray, asanyarray, isnan, zeros
from numpy.core.overrides import set_module
from numpy.core import overrides
from .ufunclike import isneginf, isposinf
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?'
@set_module('numpy')
def mintypecode(typechars, typeset='GDFgdf', default='d'):
"""
Return the character for the minimum-size type to which given types can
be safely cast.
The returned type character must represent the smallest size dtype such
that an array of the returned type can handle the data from an array of
all types in `typechars` (or if `typechars` is an array, then its
dtype.char).
Parameters
----------
typechars : list of str or array_like
If a list of strings, each string should represent a dtype.
If array_like, the character representation of the array dtype is used.
typeset : str or list of str, optional
The set of characters that the returned character is chosen from.
The default set is 'GDFgdf'.
default : str, optional
The default character, this is returned if none of the characters in
`typechars` matches a character in `typeset`.
Returns
-------
typechar : str
The character representing the minimum-size type that was found.
See Also
--------
dtype, sctype2char, maximum_sctype
Examples
--------
>>> np.mintypecode(['d', 'f', 'S'])
'd'
>>> x = np.array([1.1, 2-3.j])
>>> np.mintypecode(x)
'D'
>>> np.mintypecode('abceh', default='G')
'G'
"""
typecodes = [(isinstance(t, str) and t) or asarray(t).dtype.char
for t in typechars]
intersection = [t for t in typecodes if t in typeset]
if not intersection:
return default
if 'F' in intersection and 'd' in intersection:
return 'D'
l = [(_typecodes_by_elsize.index(t), t) for t in intersection]
l.sort()
return l[0][1]
def _asfarray_dispatcher(a, dtype=None):
return (a,)
@array_function_dispatch(_asfarray_dispatcher)
def asfarray(a, dtype=_nx.float_):
"""
Return an array converted to a float type.
Parameters
----------
a : array_like
The input array.
dtype : str or dtype object, optional
Float type code to coerce input array `a`. If `dtype` is one of the
'int' dtypes, it is replaced with float64.
Returns
-------
out : ndarray
The input `a` as a float ndarray.
Examples
--------
>>> np.asfarray([2, 3])
array([2., 3.])
>>> np.asfarray([2, 3], dtype='float')
array([2., 3.])
>>> np.asfarray([2, 3], dtype='int8')
array([2., 3.])
"""
if not _nx.issubdtype(dtype, _nx.inexact):
dtype = _nx.float_
return asarray(a, dtype=dtype)
def _real_dispatcher(val):
return (val,)
@array_function_dispatch(_real_dispatcher)
def real(val):
"""
Return the real part of the complex argument.
Parameters
----------
val : array_like
Input array.
Returns
-------
out : ndarray or scalar
The real component of the complex argument. If `val` is real, the type
of `val` is used for the output. If `val` has complex elements, the
returned type is float.
See Also
--------
real_if_close, imag, angle
Examples
--------
>>> a = np.array([1+2j, 3+4j, 5+6j])
>>> a.real
array([1., 3., 5.])
>>> a.real = 9
>>> a
array([9.+2.j, 9.+4.j, 9.+6.j])
>>> a.real = np.array([9, 8, 7])
>>> a
array([9.+2.j, 8.+4.j, 7.+6.j])
>>> np.real(1 + 1j)
1.0
"""
try:
return val.real
except AttributeError:
return asanyarray(val).real
def _imag_dispatcher(val):
return (val,)
@array_function_dispatch(_imag_dispatcher)
def imag(val):
"""
Return the imaginary part of the complex argument.
Parameters
----------
val : array_like
Input array.
Returns
-------
out : ndarray or scalar
The imaginary component of the complex argument. If `val` is real,
the type of `val` is used for the output. If `val` has complex
elements, the returned type is float.
See Also
--------
real, angle, real_if_close
Examples
--------
>>> a = np.array([1+2j, 3+4j, 5+6j])
>>> a.imag
array([2., 4., 6.])
>>> a.imag = np.array([8, 10, 12])
>>> a
array([1. +8.j, 3.+10.j, 5.+12.j])
>>> np.imag(1 + 1j)
1.0
"""
try:
return val.imag
except AttributeError:
return asanyarray(val).imag
def _is_type_dispatcher(x):
return (x,)
@array_function_dispatch(_is_type_dispatcher)
def iscomplex(x):
"""
Returns a bool array, where True if input element is complex.
What is tested is whether the input has a non-zero imaginary part, not if
the input type is complex.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray of bools
Output array.
See Also
--------
isreal
iscomplexobj : Return True if x is a complex type or an array of complex
numbers.
Examples
--------
>>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j])
array([ True, False, False, False, False, True])
"""
ax = asanyarray(x)
if issubclass(ax.dtype.type, _nx.complexfloating):
return ax.imag != 0
res = zeros(ax.shape, bool)
return res[()] # convert to scalar if needed
@array_function_dispatch(_is_type_dispatcher)
def isreal(x):
"""
Returns a bool array, where True if input element is real.
If element has complex type with zero complex part, the return value
for that element is True.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray, bool
Boolean array of same shape as `x`.
See Also
--------
iscomplex
isrealobj : Return True if x is not a complex type.
Examples
--------
>>> np.isreal([1+1j, 1+0j, 4.5, 3, 2, 2j])
array([False, True, True, True, True, False])
"""
return imag(x) == 0
@array_function_dispatch(_is_type_dispatcher)
def iscomplexobj(x):
"""
Check for a complex type or an array of complex numbers.
The type of the input is checked, not the value. Even if the input
has an imaginary part equal to zero, `iscomplexobj` evaluates to True.
Parameters
----------
x : any
The input can be of any type and shape.
Returns
-------
iscomplexobj : bool
The return value, True if `x` is of a complex type or has at least
one complex element.
See Also
--------
isrealobj, iscomplex
Examples
--------
>>> np.iscomplexobj(1)
False
>>> np.iscomplexobj(1+0j)
True
>>> np.iscomplexobj([3, 1+0j, True])
True
"""
try:
dtype = x.dtype
type_ = dtype.type
except AttributeError:
type_ = asarray(x).dtype.type
return issubclass(type_, _nx.complexfloating)
@array_function_dispatch(_is_type_dispatcher)
def isrealobj(x):
"""
Return True if x is a not complex type or an array of complex numbers.
The type of the input is checked, not the value. So even if the input
has an imaginary part equal to zero, `isrealobj` evaluates to False
if the data type is complex.
Parameters
----------
x : any
The input can be of any type and shape.
Returns
-------
y : bool
The return value, False if `x` is of a complex type.
See Also
--------
iscomplexobj, isreal
Examples
--------
>>> np.isrealobj(1)
True
>>> np.isrealobj(1+0j)
False
>>> np.isrealobj([3, 1+0j, True])
False
"""
return not iscomplexobj(x)
#-----------------------------------------------------------------------------
def _getmaxmin(t):
from numpy.core import getlimits
f = getlimits.finfo(t)
return f.max, f.min
def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None):
return (x,)
@array_function_dispatch(_nan_to_num_dispatcher)
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
"""
Replace NaN with zero and infinity with large finite numbers (default
behaviour) or with the numbers defined by the user using the `nan`,
`posinf` and/or `neginf` keywords.
If `x` is inexact, NaN is replaced by zero or by the user defined value in
`nan` keyword, infinity is replaced by the largest finite floating point
values representable by ``x.dtype`` or by the user defined value in
`posinf` keyword and -infinity is replaced by the most negative finite
floating point values representable by ``x.dtype`` or by the user defined
value in `neginf` keyword.
For complex dtypes, the above is applied to each of the real and
imaginary components of `x` separately.
If `x` is not inexact, then no replacements are made.
Parameters
----------
x : scalar or array_like
Input data.
copy : bool, optional
Whether to create a copy of `x` (True) or to replace values
in-place (False). The in-place operation only occurs if
casting to an array does not require a copy.
Default is True.
.. versionadded:: 1.13
nan : int, float, optional
Value to be used to fill NaN values. If no value is passed
then NaN values will be replaced with 0.0.
.. versionadded:: 1.17
posinf : int, float, optional
Value to be used to fill positive infinity values. If no value is
passed then positive infinity values will be replaced with a very
large number.
.. versionadded:: 1.17
neginf : int, float, optional
Value to be used to fill negative infinity values. If no value is
passed then negative infinity values will be replaced with a very
small (or negative) number.
.. versionadded:: 1.17
Returns
-------
out : ndarray
`x`, with the non-finite values replaced. If `copy` is False, this may
be `x` itself.
See Also
--------
isinf : Shows which elements are positive or negative infinity.
isneginf : Shows which elements are negative infinity.
isposinf : Shows which elements are positive infinity.
isnan : Shows which elements are Not a Number (NaN).
isfinite : Shows which elements are finite (not NaN, not infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.nan_to_num(np.inf)
1.7976931348623157e+308
>>> np.nan_to_num(-np.inf)
-1.7976931348623157e+308
>>> np.nan_to_num(np.nan)
0.0
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
-1.28000000e+002, 1.28000000e+002])
>>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)
array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03,
-1.2800000e+02, 1.2800000e+02])
>>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)])
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
-1.28000000e+002, 1.28000000e+002])
>>> np.nan_to_num(y)
array([ 1.79769313e+308 +0.00000000e+000j, # may vary
0.00000000e+000 +0.00000000e+000j,
0.00000000e+000 +1.79769313e+308j])
>>> np.nan_to_num(y, nan=111111, posinf=222222)
array([222222.+111111.j, 111111. +0.j, 111111.+222222.j])
"""
x = _nx.array(x, subok=True, copy=copy)
xtype = x.dtype.type
isscalar = (x.ndim == 0)
if not issubclass(xtype, _nx.inexact):
return x[()] if isscalar else x
iscomplex = issubclass(xtype, _nx.complexfloating)
dest = (x.real, x.imag) if iscomplex else (x,)
maxf, minf = _getmaxmin(x.real.dtype)
if posinf is not None:
maxf = posinf
if neginf is not None:
minf = neginf
for d in dest:
idx_nan = isnan(d)
idx_posinf = isposinf(d)
idx_neginf = isneginf(d)
_nx.copyto(d, nan, where=idx_nan)
_nx.copyto(d, maxf, where=idx_posinf)
_nx.copyto(d, minf, where=idx_neginf)
return x[()] if isscalar else x
#-----------------------------------------------------------------------------
def _real_if_close_dispatcher(a, tol=None):
return (a,)
@array_function_dispatch(_real_if_close_dispatcher)
def real_if_close(a, tol=100):
"""
If complex input returns a real array if complex parts are close to zero.
"Close to zero" is defined as `tol` * (machine epsilon of the type for
`a`).
Parameters
----------
a : array_like
Input array.
tol : float
Tolerance in machine epsilons for the complex part of the elements
in the array.
Returns
-------
out : ndarray
If `a` is real, the type of `a` is used for the output. If `a`
has complex elements, the returned type is float.
See Also
--------
real, imag, angle
Notes
-----
Machine epsilon varies from machine to machine and between data types
but Python floats on most platforms have a machine epsilon equal to
2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print
out the machine epsilon for floats.
Examples
--------
>>> np.finfo(float).eps
2.2204460492503131e-16 # may vary
>>> np.real_if_close([2.1 + 4e-14j], tol=1000)
array([2.1])
>>> np.real_if_close([2.1 + 4e-13j], tol=1000)
array([2.1+4.e-13j])
"""
a = asanyarray(a)
if not issubclass(a.dtype.type, _nx.complexfloating):
return a
if tol > 1:
from numpy.core import getlimits
f = getlimits.finfo(a.dtype.type)
tol = f.eps * tol
if _nx.all(_nx.absolute(a.imag) < tol):
a = a.real
return a
def _asscalar_dispatcher(a):
# 2018-10-10, 1.16
warnings.warn('np.asscalar(a) is deprecated since NumPy v1.16, use '
'a.item() instead', DeprecationWarning, stacklevel=3)
return (a,)
@array_function_dispatch(_asscalar_dispatcher)
def asscalar(a):
"""
Convert an array of size 1 to its scalar equivalent.
.. deprecated:: 1.16
Deprecated, use `numpy.ndarray.item()` instead.
Parameters
----------
a : ndarray
Input array of size 1.
Returns
-------
out : scalar
Scalar representation of `a`. The output data type is the same type
returned by the input's `item` method.
Examples
--------
>>> np.asscalar(np.array([24]))
24
"""
return a.item()
#-----------------------------------------------------------------------------
_namefromtype = {'S1': 'character',
'?': 'bool',
'b': 'signed char',
'B': 'unsigned char',
'h': 'short',
'H': 'unsigned short',
'i': 'integer',
'I': 'unsigned integer',
'l': 'long integer',
'L': 'unsigned long integer',
'q': 'long long integer',
'Q': 'unsigned long long integer',
'f': 'single precision',
'd': 'double precision',
'g': 'long precision',
'F': 'complex single precision',
'D': 'complex double precision',
'G': 'complex long double precision',
'S': 'string',
'U': 'unicode',
'V': 'void',
'O': 'object'
}
@set_module('numpy')
def typename(char):
"""
Return a description for the given data type code.
Parameters
----------
char : str
Data type code.
Returns
-------
out : str
Description of the input data type code.
See Also
--------
dtype, typecodes
Examples
--------
>>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q',
... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q']
>>> for typechar in typechars:
... print(typechar, ' : ', np.typename(typechar))
...
S1 : character
? : bool
B : unsigned char
D : complex double precision
G : complex long double precision
F : complex single precision
I : unsigned integer
H : unsigned short
L : unsigned long integer
O : object
Q : unsigned long long integer
S : string
U : unicode
V : void
b : signed char
d : double precision
g : long precision
f : single precision
i : integer
h : short
l : long integer
q : long long integer
"""
return _namefromtype[char]
#-----------------------------------------------------------------------------
#determine the "minimum common type" for a group of arrays.
array_type = [[_nx.half, _nx.single, _nx.double, _nx.longdouble],
[None, _nx.csingle, _nx.cdouble, _nx.clongdouble]]
array_precision = {_nx.half: 0,
_nx.single: 1,
_nx.double: 2,
_nx.longdouble: 3,
_nx.csingle: 1,
_nx.cdouble: 2,
_nx.clongdouble: 3}
def _common_type_dispatcher(*arrays):
return arrays
@array_function_dispatch(_common_type_dispatcher)
def common_type(*arrays):
"""
Return a scalar type which is common to the input arrays.
The return type will always be an inexact (i.e. floating point) scalar
type, even if all the arrays are integer arrays. If one of the inputs is
an integer array, the minimum precision type that is returned is a
64-bit floating point dtype.
All input arrays except int64 and uint64 can be safely cast to the
returned dtype without loss of information.
Parameters
----------
array1, array2, ... : ndarrays
Input arrays.
Returns
-------
out : data type code
Data type code.
See Also
--------
dtype, mintypecode
Examples
--------
>>> np.common_type(np.arange(2, dtype=np.float32))
<class 'numpy.float32'>
>>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2))
<class 'numpy.float64'>
>>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0]))
<class 'numpy.complex128'>
"""
is_complex = False
precision = 0
for a in arrays:
t = a.dtype.type
if iscomplexobj(a):
is_complex = True
if issubclass(t, _nx.integer):
p = 2 # array_precision[_nx.double]
else:
p = array_precision.get(t, None)
if p is None:
raise TypeError("can't get common type for non-numeric array")
precision = max(precision, p)
if is_complex:
return array_type[1][precision]
else:
return array_type[0][precision]
| bsd-3-clause |
tquilian/exelearningTest | exe/engine/verdaderofalsofpdidevice.py | 11 | 18058 | # ===========================================================================
# eXe
# Copyright 2004-2006, University of Auckland
# Copyright 2004-2008 eXe Project, http://eXeLearning.org/
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
FPD - True/False Activity
A true false idevice is one built up from question and options
"""
import logging
from exe.engine.persist import Persistable
from exe.engine.idevice import Idevice
from exe.engine.translate import lateTranslate
from exe.engine.field import TextAreaField
import re
log = logging.getLogger(__name__)
# ===========================================================================
class TrueFalseQuestion(Persistable):
"""
A TrueFalse iDevice is built up of questions. Each question can
be rendered as an XHTML element
"""
def __init__(self, idevice, question="", isCorrect=False, feedback="", hint=""):
"""
Initialize
"""
self.idevice = idevice
self.questionTextArea = TextAreaField(x_(u'Question:'),
self.idevice.questionInstruc, question)
self.questionTextArea.idevice = idevice
self.isCorrect = isCorrect
self.feedbackTextArea = TextAreaField(x_(u'Feedback'),
self.idevice.feedbackInstruc, feedback)
self.feedbackTextArea.idevice = idevice
self.hintTextArea = TextAreaField(x_(u'Hint'),
self.idevice.hintInstruc, hint)
self.hintTextArea.idevice = idevice
def getResourcesField(self, this_resource):
"""
implement the specific resource finding mechanism for this iDevice:
"""
# be warned that before upgrading, this iDevice field could not exist:
if hasattr(self, 'questionTextArea')\
and hasattr(self.questionTextArea, 'images'):
for this_image in self.questionTextArea.images:
if hasattr(this_image, '_imageResource') \
and this_resource == this_image._imageResource:
return self.questionTextArea
# be warned that before upgrading, this iDevice field could not exist:
if hasattr(self, 'feedbackTextArea')\
and hasattr(self.feedbackTextArea, 'images'):
for this_image in self.feedbackTextArea.images:
if hasattr(this_image, '_imageResource') \
and this_resource == this_image._imageResource:
return self.feedbackTextArea
# be warned that before upgrading, this iDevice field could not exist:
if hasattr(self, 'hintTextArea')\
and hasattr(self.hintTextArea, 'images'):
for this_image in self.hintTextArea.images:
if hasattr(this_image, '_imageResource') \
and this_resource == this_image._imageResource:
return self.hintTextArea
return None
def getRichTextFields(self):
"""
Like getResourcesField(), a general helper to allow nodes to search
through all of their fields without having to know the specifics of each
iDevice type.
"""
fields_list = []
if hasattr(self, 'questionTextArea'):
fields_list.append(self.questionTextArea)
if hasattr(self, 'feedbackTextArea'):
fields_list.append(self.feedbackTextArea)
if hasattr(self, 'hintTextArea'):
fields_list.append(self.hintTextArea)
return fields_list
def upgrade_setIdevice(self, idevice):
"""
While some of this might typically be done in an automatic upgrade
method called from in increased persistence version, the problem
with that approach is that the idevice was not previously stored,
and cannot easily be gotten at that stage of operation.
Rather than making such an upgrade method more messy than necessary,
this method allows the parent TrueFalseIdevice to merely set
itself on each of its TrueFalseQuestions during its own upgrade.
Helps upgrade to somewhere before version 0.25 (post-v0.24),
taking the old unicode string fields,
and converting them into a image-enabled TextAreaFields:
"""
self.idevice = idevice
self.questionTextArea = TextAreaField(x_(u'Question:'),
self.idevice.questionInstruc, self.question)
self.questionTextArea.idevice = self.idevice
self.feedbackTextArea = TextAreaField(x_(u'Feedback'),
self.idevice.feedbackInstruc, self.feedback)
self.feedbackTextArea.idevice = self.idevice
self.hintTextArea = TextAreaField(x_(u'Hint'),
self.idevice.hintInstruc, self.hint)
self.hintTextArea.idevice = self.idevice
# ===========================================================================
class VerdaderofalsofpdIdevice(Idevice):
"""
A TrueFalse Idevice is one built up from question and options
"""
persistenceVersion = 12
def __init__(self):
"""
Initialize
"""
Idevice.__init__(self,
x_(u"FPD - True/False Activity"),
x_(u"University of Auckland"),
x_(u"""True/false questions present a statement where
the learner must decide if the statement is true. This type of question works
well for factual information and information that lends itself to either/or
responses."""), u"", u"autoevaluacionfpd")
# self.emphasis = Idevice.SomeEmphasis
self.emphasis = "_autoevaluacionfpd"
self._hintInstruc = x_(u"""A hint may be provided to assist the
learner in answering the question.""")
self.questions = []
self._questionInstruc = x_(u"""Type the question stem. The question
should be clear and unambiguous. Avoid negative premises as these can tend to
be ambiguous.""")
self._keyInstruc = ""
self._feedbackInstruc = x_(u"""Enter any feedback you wish to provide
to the learner. This field may be left blank. if this field is left blank
default feedback will be provided.""")
self.questions.append(TrueFalseQuestion(self))
self.systemResources += ["common.js", "panel-amusements.png", "stock-stop.png"]
self.instructionsForLearners = TextAreaField(
x_(u'Instructions'),
x_(u"""Provide instruction on how the True/False Question should be
completed."""),
u'')
self.instructionsForLearners.idevice = self
# Properties
hintInstruc = lateTranslate('hintInstruc')
questionInstruc = lateTranslate('questionInstruc')
keyInstruc = lateTranslate('keyInstruc')
feedbackInstruc = lateTranslate('feedbackInstruc')
def addQuestion(self):
"""
Add a new question to this iDevice.
"""
self.questions.append(TrueFalseQuestion(self))
def getResourcesField(self, this_resource):
"""
implement the specific resource finding mechanism for this iDevice:
"""
# be warned that before upgrading, this iDevice field could not exist:
if hasattr(self, 'instructionsForLearners')\
and hasattr(self.instructionsForLearners, 'images'):
for this_image in self.instructionsForLearners.images:
if hasattr(this_image, '_imageResource') \
and this_resource == this_image._imageResource:
return self.instructionsForLearners
for this_question in self.questions:
this_field = this_question.getResourcesField(this_resource)
if this_field is not None:
return this_field
return None
def getRichTextFields(self):
"""
Like getResourcesField(), a general helper to allow nodes to search
through all of their fields without having to know the specifics of each
iDevice type.
"""
fields_list = []
if hasattr(self, 'instructionsForLearners'):
fields_list.append(self.instructionsForLearners)
for this_question in self.questions:
fields_list.extend(this_question.getRichTextFields())
return fields_list
def burstHTML(self, i):
"""
takes a BeautifulSoup fragment (i) and bursts its contents to
import this idevice from a CommonCartridge export
"""
# True-False Idevice:
title = i.find(name='span', attrs={'class' : 'iDeviceTitle' })
self.title = title.renderContents().decode('utf-8')
inner = i.find(name='div', attrs={'class' : 'iDevice_inner' })
instruct = inner.find(name='div', attrs={'class' : 'block' ,
'style' : 'display:block' })
self.instructionsForLearners.content_wo_resourcePaths = \
instruct.renderContents().decode('utf-8')
# and add the LOCAL resource paths back in:
self.instructionsForLearners.content_w_resourcePaths = \
self.instructionsForLearners.MassageResourceDirsIntoContent( \
self.instructionsForLearners.content_wo_resourcePaths)
self.instructionsForLearners.content = \
self.instructionsForLearners.content_w_resourcePaths
# copied and modified from Multi-Select, and others :-) :
tf_questions = inner.findAll(name='div', attrs={'class' : 'question'})
if len(tf_questions) < 1:
# need to remove the default 1st question
del self.questions[0]
for question_num in range(len(tf_questions)):
if question_num > 0:
# only created with the first question, add others:
self.addQuestion()
question = tf_questions[question_num]
questions = question.findAll(name='div', attrs={'class' : 'block',
'id' : re.compile('^taquestion') })
if len(questions) == 1:
# ELSE: should warn of unexpected result!
inner_question = questions[0]
self.questions[question_num].questionTextArea.content_wo_resourcePaths \
= inner_question.renderContents().decode('utf-8')
# and add the LOCAL resource paths back in:
self.questions[question_num].questionTextArea.content_w_resourcePaths \
= self.questions[question_num].questionTextArea.MassageResourceDirsIntoContent( \
self.questions[question_num].questionTextArea.content_wo_resourcePaths)
self.questions[question_num].questionTextArea.content = \
self.questions[question_num].questionTextArea.content_w_resourcePaths
answer_true = question.find(name='div',
attrs={'id' : re.compile('^s0b') })
answer_false = question.find(name='div',
attrs={'id' : re.compile('^s1b') })
# true-false only has 1 feedback per question:
feedbacks = question.findAll(name='div',
attrs={'id' : re.compile('^sfb') })
# true-false only has 1 hint per question:
hints = question.findAll(name='div',
attrs={'id' : re.compile('^tahint') })
# and finally, see if this is a correct answer:
even_score = int(answer_true.attrMap['even_steven'])
if not (even_score % 2):
# i.e., if it IS even, then this is correct:
self.questions[question_num].isCorrect = True
if len(hints) >= 1:
inner_hint = hints[0]
self.questions[question_num].hintTextArea.content_wo_resourcePaths \
= inner_hint.renderContents().decode('utf-8')
# and add the LOCAL resource paths back in:
self.questions[question_num].hintTextArea.content_w_resourcePaths \
= self.questions[question_num].hintTextArea.MassageResourceDirsIntoContent( \
self.questions[question_num].hintTextArea.content_wo_resourcePaths)
self.questions[question_num].hintTextArea.content = \
self.questions[question_num].hintTextArea.content_w_resourcePaths
else:
# no user-defined feedback, just using the default:
self.questions[question_num].hintTextArea.content = ""
self.questions[question_num].hintTextArea.content_w_resourcePaths \
= ""
self.questions[question_num].hintTextArea.content_wo_resourcePaths \
= ""
if len(feedbacks) >= 1:
inner_feedback = feedbacks[0]
self.questions[question_num].feedbackTextArea.content_wo_resourcePaths \
= inner_feedback.renderContents().decode('utf-8')
# and add the LOCAL resource paths back in:
self.questions[question_num].feedbackTextArea.content_w_resourcePaths \
= self.questions[question_num].feedbackTextArea.MassageResourceDirsIntoContent( \
self.questions[question_num].feedbackTextArea.content_wo_resourcePaths)
self.questions[question_num].feedbackTextArea.content = \
self.questions[question_num].feedbackTextArea.content_w_resourcePaths
else:
# no user-defined feedback, just using the default:
self.questions[question_num].feedbackTextArea.content = ""
self.questions[question_num].feedbackTextArea.content_w_resourcePaths \
= ""
self.questions[question_num].feedbackTextArea.content_wo_resourcePaths \
= ""
def upgradeToVersion1(self):
"""
Upgrades the node from version 0 to 1.
Old packages will loose their icons, but they will load.
"""
log.debug(u"Upgrading iDevice")
self.icon = u"autoevaluacionfpd"
def upgradeToVersion2(self):
"""
Upgrades the node from 1 (v0.5) to 2 (v0.6).
Old packages will loose their icons, but they will load.
"""
log.debug(u"Upgrading iDevice")
# self.emphasis = Idevice.SomeEmphasis
self.emphasis = "_autoevaluacionfpd"
def upgradeToVersion3(self):
"""
Upgrades the node from 1 (v0.6) to 2 (v0.7).
Change icon from 'multichoice' to 'question'
"""
log.debug(u"Upgrading iDevice icon")
self.icon = "autoevaluacionfpd"
def upgradeToVersion4(self):
"""
Upgrades v0.6 to v0.7.
"""
self.lastIdevice = False
def upgradeToVersion5(self):
"""
Upgrades exe to v0.10
"""
self._upgradeIdeviceToVersion1()
self._hintInstruc = self.__dict__['hintInstruc']
self._questionInstruc = self.__dict__['questionInstruc']
self._keyInstruc = self.__dict__['keyInstruc']
def upgradeToVersion6(self):
"""
Upgrades exe to v0.11
"""
self._feedbackInstruc = x_(u"""Type in the feedback that you want the
student to see when selecting the particular question. If you don't complete
this box, eXe will automatically provide default feedback as follows:
"Correct answer" as indicated by the selection for the correct answer; or
"Wrong answer" for the other alternatives.""")
def upgradeToVersion7(self):
"""
Upgrades to v0.12
"""
self._upgradeIdeviceToVersion2()
self.systemResources += ["common.js", "libot_drag.js",
"panel-amusements.png", "stock-stop.png"]
def upgradeToVersion8(self):
"""
Upgrades to v0.15
"""
self.instructionsForLearners = TextAreaField(
x_(u'Instructions'),
x_(u"""Provide instruction on how the True/False Question should be
completed."""),
x_(u'Read the paragraph below and '
'fill in the missing words.'))
self.instructionsForLearners.idevice = self
def upgradeToVersion9(self):
"""
Upgrades to somewhere before version 0.25 (post-v0.24)
Taking the TrueFalseQuestions' old unicode string fields,
and converting them into a image-enabled TextAreaFields:
"""
for question in self.questions:
question.upgrade_setIdevice(self)
def upgradeToVersion10(self):
if "libot_drag.js" in self.systemResources:
self.systemResources.remove("libot_drag.js")
def upgradeToVersion11(self):
"""
Delete icon from system resources
"""
self._upgradeIdeviceToVersion3()
def upgradeToVersion12(self):
if self._title == u"FPD - Actividad de Verdadero/Falso":
self._title = u"FPD - True/False Activity"
# ===========================================================================
| gpl-2.0 |
Odingod/mne-python | mne/time_frequency/tests/test_tfr.py | 5 | 8846 | import numpy as np
import os.path as op
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
import mne
from mne import io, Epochs, read_events, pick_types, create_info, EpochsArray
from mne.utils import _TempDir, run_tests_if_main, slow_test, requires_h5py
from mne.time_frequency import single_trial_power
from mne.time_frequency.tfr import cwt_morlet, morlet, tfr_morlet
from mne.time_frequency.tfr import _dpss_wavelet, tfr_multitaper
from mne.time_frequency.tfr import AverageTFR, read_tfrs, write_tfrs
raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
'test_raw.fif')
event_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-eve.fif')
def test_morlet():
"""Test morlet with and without zero mean"""
Wz = morlet(1000, [10], 2., zero_mean=True)
W = morlet(1000, [10], 2., zero_mean=False)
assert_true(np.abs(np.mean(np.real(Wz[0]))) < 1e-5)
assert_true(np.abs(np.mean(np.real(W[0]))) > 1e-3)
def test_time_frequency():
"""Test time frequency transform (PSD and phase lock)
"""
# Set parameters
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = read_events(event_fname)
include = []
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False,
stim=False, include=include, exclude=exclude)
picks = picks[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
data = epochs.get_data()
times = epochs.times
nave = len(data)
freqs = np.arange(6, 20, 5) # define frequencies of interest
n_cycles = freqs / 4.
# Test first with a single epoch
power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True)
evoked = epochs.average()
power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True,
return_itc=False)
assert_raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True)
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True)
# the actual data arrays here are equivalent, too...
assert_array_almost_equal(power.data, power_evoked.data)
print(itc) # test repr
print(itc.ch_names) # test property
itc += power # test add
itc -= power # test add
power.apply_baseline(baseline=(-0.1, 0), mode='logratio')
assert_true('meg' in power)
assert_true('grad' in power)
assert_false('mag' in power)
assert_false('eeg' in power)
assert_equal(power.nave, nave)
assert_equal(itc.nave, nave)
assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
assert_true(power.data.shape == itc.data.shape)
assert_true(np.sum(itc.data >= 1) == 0)
assert_true(np.sum(itc.data <= 0) == 0)
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False,
return_itc=True)
assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
assert_true(power.data.shape == itc.data.shape)
assert_true(np.sum(itc.data >= 1) == 0)
assert_true(np.sum(itc.data <= 0) == 0)
Fs = raw.info['sfreq'] # sampling in Hz
tfr = cwt_morlet(data[0], Fs, freqs, use_fft=True, n_cycles=2)
assert_true(tfr.shape == (len(picks), len(freqs), len(times)))
single_power = single_trial_power(data, Fs, freqs, use_fft=False,
n_cycles=2)
assert_array_almost_equal(np.mean(single_power), power.data)
power_pick = power.pick_channels(power.ch_names[:10:2])
assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2]))
assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2]))
power_drop = power.drop_channels(power.ch_names[1:10:2])
assert_equal(power_drop.ch_names, power_pick.ch_names)
assert_equal(power_pick.data.shape[0], len(power_drop.ch_names))
mne.equalize_channels([power_pick, power_drop])
assert_equal(power_pick.ch_names, power_drop.ch_names)
assert_equal(power_pick.data.shape, power_drop.data.shape)
def test_dpsswavelet():
"""Test DPSS wavelet"""
freqs = np.arange(5, 25, 3)
Ws = _dpss_wavelet(1000, freqs=freqs, n_cycles=freqs / 2.,
time_bandwidth=4.0, zero_mean=True)
assert_true(len(Ws) == 3) # 3 tapers expected
# Check that zero mean is true
assert_true(np.abs(np.mean(np.real(Ws[0][0]))) < 1e-5)
assert_true(len(Ws[0]) == len(freqs)) # As many wavelets as asked for
@slow_test
def test_tfr_multitaper():
"""Test tfr_multitaper"""
sfreq = 200.0
ch_names = ['SIM0001', 'SIM0002', 'SIM0003']
ch_types = ['grad', 'grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = int(sfreq) # Second long epochs
n_epochs = 3
seed = 42
rng = np.random.RandomState(seed)
noise = 0.1 * rng.randn(n_epochs, len(ch_names), n_times)
t = np.arange(n_times, dtype=np.float) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
dat = noise + signal
reject = dict(grad=4000.)
events = np.empty((n_epochs, 3))
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=dat, info=info, events=events, event_id=event_id,
reject=reject)
freqs = np.arange(5, 100, 3, dtype=np.float)
power, itc = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,
time_bandwidth=4.0)
power_evoked = tfr_multitaper(epochs.average(), freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0,
return_itc=False)
# one is squared magnitude of the average (evoked) and
# the other is average of the squared magnitudes (epochs PSD)
# so values shouldn't match, but shapes should
assert_array_equal(power.data.shape, power_evoked.data.shape)
assert_raises(AssertionError, assert_array_almost_equal,
power.data, power_evoked.data)
tmax = t[np.argmax(itc.data[0, freqs == 50, :])]
fmax = freqs[np.argmax(power.data[1, :, t == 0.5])]
assert_true(tmax > 0.3 and tmax < 0.7)
assert_false(np.any(itc.data < 0.))
assert_true(fmax > 40 and fmax < 60)
def test_crop():
"""Test TFR cropping"""
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.crop(0.2, 0.3)
assert_array_equal(tfr.times, [0.2, 0.3])
assert_equal(tfr.data.shape[-1], 2)
@requires_h5py
def test_io():
"""Test TFR IO capacities"""
tempdir = _TempDir()
fname = op.join(tempdir, 'test-tfr.h5')
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.save(fname)
tfr2 = read_tfrs(fname, condition='test')
assert_array_equal(tfr.data, tfr2.data)
assert_array_equal(tfr.times, tfr2.times)
assert_array_equal(tfr.freqs, tfr2.freqs)
assert_equal(tfr.comment, tfr2.comment)
assert_equal(tfr.nave, tfr2.nave)
assert_raises(IOError, tfr.save, fname)
tfr.comment = None
tfr.save(fname, overwrite=True)
assert_equal(read_tfrs(fname, condition=0).comment, tfr.comment)
tfr.comment = 'test-A'
tfr2.comment = 'test-B'
fname = op.join(tempdir, 'test2-tfr.h5')
write_tfrs(fname, [tfr, tfr2])
tfr3 = read_tfrs(fname, condition='test-A')
assert_equal(tfr.comment, tfr3.comment)
tfrs = read_tfrs(fname, condition=None)
assert_equal(len(tfrs), 2)
tfr4 = tfrs[1]
assert_equal(tfr2.comment, tfr4.comment)
assert_raises(ValueError, read_tfrs, fname, condition='nonono')
run_tests_if_main()
| bsd-3-clause |
Perferom/android_external_chromium_org | third_party/protobuf/python/google/protobuf/text_format.py | 162 | 22004 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains routines for printing protocol messages in text format."""
__author__ = 'kenton@google.com (Kenton Varda)'
import cStringIO
import re
from collections import deque
from google.protobuf.internal import type_checkers
from google.protobuf import descriptor
__all__ = [ 'MessageToString', 'PrintMessage', 'PrintField',
'PrintFieldValue', 'Merge' ]
_INTEGER_CHECKERS = (type_checkers.Uint32ValueChecker(),
type_checkers.Int32ValueChecker(),
type_checkers.Uint64ValueChecker(),
type_checkers.Int64ValueChecker())
_FLOAT_INFINITY = re.compile('-?inf(?:inity)?f?', re.IGNORECASE)
_FLOAT_NAN = re.compile('nanf?', re.IGNORECASE)
class ParseError(Exception):
"""Thrown in case of ASCII parsing error."""
def MessageToString(message, as_utf8=False, as_one_line=False):
out = cStringIO.StringIO()
PrintMessage(message, out, as_utf8=as_utf8, as_one_line=as_one_line)
result = out.getvalue()
out.close()
if as_one_line:
return result.rstrip()
return result
def PrintMessage(message, out, indent=0, as_utf8=False, as_one_line=False):
for field, value in message.ListFields():
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
for element in value:
PrintField(field, element, out, indent, as_utf8, as_one_line)
else:
PrintField(field, value, out, indent, as_utf8, as_one_line)
def PrintField(field, value, out, indent=0, as_utf8=False, as_one_line=False):
"""Print a single field name/value pair. For repeated fields, the value
should be a single element."""
out.write(' ' * indent);
if field.is_extension:
out.write('[')
if (field.containing_type.GetOptions().message_set_wire_format and
field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL):
out.write(field.message_type.full_name)
else:
out.write(field.full_name)
out.write(']')
elif field.type == descriptor.FieldDescriptor.TYPE_GROUP:
# For groups, use the capitalized name.
out.write(field.message_type.name)
else:
out.write(field.name)
if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
# The colon is optional in this case, but our cross-language golden files
# don't include it.
out.write(': ')
PrintFieldValue(field, value, out, indent, as_utf8, as_one_line)
if as_one_line:
out.write(' ')
else:
out.write('\n')
def PrintFieldValue(field, value, out, indent=0,
as_utf8=False, as_one_line=False):
"""Print a single field value (not including name). For repeated fields,
the value should be a single element."""
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
if as_one_line:
out.write(' { ')
PrintMessage(value, out, indent, as_utf8, as_one_line)
out.write('}')
else:
out.write(' {\n')
PrintMessage(value, out, indent + 2, as_utf8, as_one_line)
out.write(' ' * indent + '}')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
enum_value = field.enum_type.values_by_number.get(value, None)
if enum_value is not None:
out.write(enum_value.name)
else:
out.write(str(value))
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
out.write('\"')
if type(value) is unicode:
out.write(_CEscape(value.encode('utf-8'), as_utf8))
else:
out.write(_CEscape(value, as_utf8))
out.write('\"')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
if value:
out.write("true")
else:
out.write("false")
else:
out.write(str(value))
def Merge(text, message):
"""Merges an ASCII representation of a protocol message into a message.
Args:
text: Message ASCII representation.
message: A protocol buffer message to merge into.
Raises:
ParseError: On ASCII parsing problems.
"""
tokenizer = _Tokenizer(text)
while not tokenizer.AtEnd():
_MergeField(tokenizer, message)
def _MergeField(tokenizer, message):
"""Merges a single protocol message field into a message.
Args:
tokenizer: A tokenizer to parse the field name and values.
message: A protocol message to record the data.
Raises:
ParseError: In case of ASCII parsing problems.
"""
message_descriptor = message.DESCRIPTOR
if tokenizer.TryConsume('['):
name = [tokenizer.ConsumeIdentifier()]
while tokenizer.TryConsume('.'):
name.append(tokenizer.ConsumeIdentifier())
name = '.'.join(name)
if not message_descriptor.is_extendable:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" does not have extensions.' %
message_descriptor.full_name)
field = message.Extensions._FindExtensionByName(name)
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" not registered.' % name)
elif message_descriptor != field.containing_type:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" does not extend message type "%s".' % (
name, message_descriptor.full_name))
tokenizer.Consume(']')
else:
name = tokenizer.ConsumeIdentifier()
field = message_descriptor.fields_by_name.get(name, None)
# Group names are expected to be capitalized as they appear in the
# .proto file, which actually matches their type names, not their field
# names.
if not field:
field = message_descriptor.fields_by_name.get(name.lower(), None)
if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP:
field = None
if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and
field.message_type.name != name):
field = None
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" has no field named "%s".' % (
message_descriptor.full_name, name))
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
tokenizer.TryConsume(':')
if tokenizer.TryConsume('<'):
end_token = '>'
else:
tokenizer.Consume('{')
end_token = '}'
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
sub_message = message.Extensions[field].add()
else:
sub_message = getattr(message, field.name).add()
else:
if field.is_extension:
sub_message = message.Extensions[field]
else:
sub_message = getattr(message, field.name)
sub_message.SetInParent()
while not tokenizer.TryConsume(end_token):
if tokenizer.AtEnd():
raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token))
_MergeField(tokenizer, sub_message)
else:
_MergeScalarField(tokenizer, message, field)
def _MergeScalarField(tokenizer, message, field):
"""Merges a single protocol message scalar field into a message.
Args:
tokenizer: A tokenizer to parse the field value.
message: A protocol message to record the data.
field: The descriptor of the field to be merged.
Raises:
ParseError: In case of ASCII parsing problems.
RuntimeError: On runtime errors.
"""
tokenizer.Consume(':')
value = None
if field.type in (descriptor.FieldDescriptor.TYPE_INT32,
descriptor.FieldDescriptor.TYPE_SINT32,
descriptor.FieldDescriptor.TYPE_SFIXED32):
value = tokenizer.ConsumeInt32()
elif field.type in (descriptor.FieldDescriptor.TYPE_INT64,
descriptor.FieldDescriptor.TYPE_SINT64,
descriptor.FieldDescriptor.TYPE_SFIXED64):
value = tokenizer.ConsumeInt64()
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32,
descriptor.FieldDescriptor.TYPE_FIXED32):
value = tokenizer.ConsumeUint32()
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64,
descriptor.FieldDescriptor.TYPE_FIXED64):
value = tokenizer.ConsumeUint64()
elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT,
descriptor.FieldDescriptor.TYPE_DOUBLE):
value = tokenizer.ConsumeFloat()
elif field.type == descriptor.FieldDescriptor.TYPE_BOOL:
value = tokenizer.ConsumeBool()
elif field.type == descriptor.FieldDescriptor.TYPE_STRING:
value = tokenizer.ConsumeString()
elif field.type == descriptor.FieldDescriptor.TYPE_BYTES:
value = tokenizer.ConsumeByteString()
elif field.type == descriptor.FieldDescriptor.TYPE_ENUM:
value = tokenizer.ConsumeEnum(field)
else:
raise RuntimeError('Unknown field type %d' % field.type)
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
message.Extensions[field].append(value)
else:
getattr(message, field.name).append(value)
else:
if field.is_extension:
message.Extensions[field] = value
else:
setattr(message, field.name, value)
class _Tokenizer(object):
"""Protocol buffer ASCII representation tokenizer.
This class handles the lower level string parsing by splitting it into
meaningful tokens.
It was directly ported from the Java protocol buffer API.
"""
_WHITESPACE = re.compile('(\\s|(#.*$))+', re.MULTILINE)
_TOKEN = re.compile(
'[a-zA-Z_][0-9a-zA-Z_+-]*|' # an identifier
'[0-9+-][0-9a-zA-Z_.+-]*|' # a number
'\"([^\"\n\\\\]|\\\\.)*(\"|\\\\?$)|' # a double-quoted string
'\'([^\'\n\\\\]|\\\\.)*(\'|\\\\?$)') # a single-quoted string
_IDENTIFIER = re.compile('\w+')
def __init__(self, text_message):
self._text_message = text_message
self._position = 0
self._line = -1
self._column = 0
self._token_start = None
self.token = ''
self._lines = deque(text_message.split('\n'))
self._current_line = ''
self._previous_line = 0
self._previous_column = 0
self._SkipWhitespace()
self.NextToken()
def AtEnd(self):
"""Checks the end of the text was reached.
Returns:
True iff the end was reached.
"""
return self.token == ''
def _PopLine(self):
while len(self._current_line) <= self._column:
if not self._lines:
self._current_line = ''
return
self._line += 1
self._column = 0
self._current_line = self._lines.popleft()
def _SkipWhitespace(self):
while True:
self._PopLine()
match = self._WHITESPACE.match(self._current_line, self._column)
if not match:
break
length = len(match.group(0))
self._column += length
def TryConsume(self, token):
"""Tries to consume a given piece of text.
Args:
token: Text to consume.
Returns:
True iff the text was consumed.
"""
if self.token == token:
self.NextToken()
return True
return False
def Consume(self, token):
"""Consumes a piece of text.
Args:
token: Text to consume.
Raises:
ParseError: If the text couldn't be consumed.
"""
if not self.TryConsume(token):
raise self._ParseError('Expected "%s".' % token)
def ConsumeIdentifier(self):
"""Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed.
"""
result = self.token
if not self._IDENTIFIER.match(result):
raise self._ParseError('Expected identifier.')
self.NextToken()
return result
def ConsumeInt32(self):
"""Consumes a signed 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 32bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=True, is_long=False)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeUint32(self):
"""Consumes an unsigned 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 32bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=False, is_long=False)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeInt64(self):
"""Consumes a signed 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 64bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=True, is_long=True)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeUint64(self):
"""Consumes an unsigned 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 64bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=False, is_long=True)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeFloat(self):
"""Consumes an floating point number.
Returns:
The number parsed.
Raises:
ParseError: If a floating point number couldn't be consumed.
"""
try:
result = ParseFloat(self.token)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeBool(self):
"""Consumes a boolean value.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed.
"""
try:
result = ParseBool(self.token)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeString(self):
"""Consumes a string value.
Returns:
The string parsed.
Raises:
ParseError: If a string value couldn't be consumed.
"""
bytes = self.ConsumeByteString()
try:
return unicode(bytes, 'utf-8')
except UnicodeDecodeError, e:
raise self._StringParseError(e)
def ConsumeByteString(self):
"""Consumes a byte array value.
Returns:
The array parsed (as a string).
Raises:
ParseError: If a byte array value couldn't be consumed.
"""
list = [self._ConsumeSingleByteString()]
while len(self.token) > 0 and self.token[0] in ('\'', '"'):
list.append(self._ConsumeSingleByteString())
return "".join(list)
def _ConsumeSingleByteString(self):
"""Consume one token of a string literal.
String literals (whether bytes or text) can come in multiple adjacent
tokens which are automatically concatenated, like in C or Python. This
method only consumes one token.
"""
text = self.token
if len(text) < 1 or text[0] not in ('\'', '"'):
raise self._ParseError('Expected string.')
if len(text) < 2 or text[-1] != text[0]:
raise self._ParseError('String missing ending quote.')
try:
result = _CUnescape(text[1:-1])
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeEnum(self, field):
try:
result = ParseEnum(field, self.token)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ParseErrorPreviousToken(self, message):
"""Creates and *returns* a ParseError for the previously read token.
Args:
message: A message to set for the exception.
Returns:
A ParseError instance.
"""
return ParseError('%d:%d : %s' % (
self._previous_line + 1, self._previous_column + 1, message))
def _ParseError(self, message):
"""Creates and *returns* a ParseError for the current token."""
return ParseError('%d:%d : %s' % (
self._line + 1, self._column + 1, message))
def _StringParseError(self, e):
return self._ParseError('Couldn\'t parse string: ' + str(e))
def NextToken(self):
"""Reads the next meaningful token."""
self._previous_line = self._line
self._previous_column = self._column
self._column += len(self.token)
self._SkipWhitespace()
if not self._lines and len(self._current_line) <= self._column:
self.token = ''
return
match = self._TOKEN.match(self._current_line, self._column)
if match:
token = match.group(0)
self.token = token
else:
self.token = self._current_line[self._column]
# text.encode('string_escape') does not seem to satisfy our needs as it
# encodes unprintable characters using two-digit hex escapes whereas our
# C++ unescaping function allows hex escapes to be any length. So,
# "\0011".encode('string_escape') ends up being "\\x011", which will be
# decoded in C++ as a single-character string with char code 0x11.
def _CEscape(text, as_utf8):
def escape(c):
o = ord(c)
if o == 10: return r"\n" # optional escape
if o == 13: return r"\r" # optional escape
if o == 9: return r"\t" # optional escape
if o == 39: return r"\'" # optional escape
if o == 34: return r'\"' # necessary escape
if o == 92: return r"\\" # necessary escape
# necessary escapes
if not as_utf8 and (o >= 127 or o < 32): return "\\%03o" % o
return c
return "".join([escape(c) for c in text])
_CUNESCAPE_HEX = re.compile('\\\\x([0-9a-fA-F]{2}|[0-9a-fA-F])')
def _CUnescape(text):
def ReplaceHex(m):
return chr(int(m.group(0)[2:], 16))
# This is required because the 'string_escape' encoding doesn't
# allow single-digit hex escapes (like '\xf').
result = _CUNESCAPE_HEX.sub(ReplaceHex, text)
return result.decode('string_escape')
def ParseInteger(text, is_signed=False, is_long=False):
"""Parses an integer.
Args:
text: The text to parse.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
"""
# Do the actual parsing. Exception handling is propagated to caller.
try:
result = int(text, 0)
except ValueError:
raise ValueError('Couldn\'t parse integer: %s' % text)
# Check if the integer is sane. Exceptions handled by callers.
checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]
checker.CheckValue(result)
return result
def ParseFloat(text):
"""Parse a floating point number.
Args:
text: Text to parse.
Returns:
The number parsed.
Raises:
ValueError: If a floating point number couldn't be parsed.
"""
try:
# Assume Python compatible syntax.
return float(text)
except ValueError:
# Check alternative spellings.
if _FLOAT_INFINITY.match(text):
if text[0] == '-':
return float('-inf')
else:
return float('inf')
elif _FLOAT_NAN.match(text):
return float('nan')
else:
# assume '1.0f' format
try:
return float(text.rstrip('f'))
except ValueError:
raise ValueError('Couldn\'t parse float: %s' % text)
def ParseBool(text):
"""Parse a boolean value.
Args:
text: Text to parse.
Returns:
Boolean values parsed
Raises:
ValueError: If text is not a valid boolean.
"""
if text in ('true', 't', '1'):
return True
elif text in ('false', 'f', '0'):
return False
else:
raise ValueError('Expected "true" or "false".')
def ParseEnum(field, value):
"""Parse an enum value.
The value can be specified by a number (the enum value), or by
a string literal (the enum name).
Args:
field: Enum field descriptor.
value: String value.
Returns:
Enum value number.
Raises:
ValueError: If the enum value could not be parsed.
"""
enum_descriptor = field.enum_type
try:
number = int(value, 0)
except ValueError:
# Identifier.
enum_value = enum_descriptor.values_by_name.get(value, None)
if enum_value is None:
raise ValueError(
'Enum type "%s" has no value named %s.' % (
enum_descriptor.full_name, value))
else:
# Numeric value.
enum_value = enum_descriptor.values_by_number.get(number, None)
if enum_value is None:
raise ValueError(
'Enum type "%s" has no value with number %d.' % (
enum_descriptor.full_name, number))
return enum_value.number
| bsd-3-clause |
hasadna/open-shot | qa/tasks.py | 1 | 3325 | import httplib
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import EmailMultiAlternatives
from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from facepy import GraphAPI
from facepy.exceptions import OAuthError
from celery import task
from celery.utils.log import get_task_logger
from social_auth.models import UserSocialAuth
from actstream.models import Follow
from oshot.utils import get_root_url
logger = get_task_logger(__name__)
def get_graph_api(user):
try:
access_token = UserSocialAuth.objects.get(user=user).extra_data['access_token']
return GraphAPI(access_token)
except ObjectDoesNotExist:
return None
def get_full_url(path):
return 'http://%s%s' % (Site.objects.get_current().domain,path)
@task(max_retries=3, default_retry_delay=10)
def publish_question_to_facebook(question):
graph = get_graph_api(question.author)
if graph:
question_url = get_full_url(question.get_absolute_url())
try:
graph.post(path="me/localshot:ask", question=question_url)
except Exception, exc:
logger.warn("failed to publish question to facebook %s" % unicode(question))
publish_question_to_facebook.retry(exc=exc)
@task(max_retries=3, default_retry_delay=10)
def publish_upvote_to_facebook(upvote):
graph = get_graph_api(upvote.user)
if graph:
question_url = get_full_url(upvote.question.get_absolute_url())
try:
graph.post(path="me/localshot:join", question=question_url)
except Exception, exc:
logger.warn("failed to publish upvote to facebook")
publish_upvote_to_facebook.retry(exc=exc)
@task()
def publish_answer(answer, send_email=True):
logger.info("publishing answer %s" % unicode(answer))
question = answer.question
# publish to facebook
graph = get_graph_api(answer.author)
if graph:
answer_url = get_full_url(answer.get_absolute_url())
try:
graph.post(path="me/localshot:answer", question=answer_url)
except Exception, exc:
logger.warn("-- Failed to publish answer to facebook")
if send_email:
# send an email to interesed users
editors = Membership.objects.filter(entity__in=question.entity,
is_editor=True).values_list('profile__user__email', flat=True)
content_type = ContentType.objects.get_for_model(question)
followers = Follow.objects.filter(content_type=content_type,
object_id=question.id).values_list('user__email', flat=True)
html_content = render_to_string("email/new_answer.html",
{'answer': answer,
'ROOT_URL': get_root_url(),
})
text_content = 'Sorry, we only support html based email'
msg = EmailMultiAlternatives(_("A new answer for your question"),
text_content,
settings.DEFAULT_FROM_EMAIL,
bcc=list(editors)+list(followers))
msg.attach_alternative(html_content, "text/html")
msg.send()
| bsd-3-clause |
qwcode/pip | pip/_vendor/distlib/resources.py | 5 | 10337 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import bisect
import io
import logging
import os
import pkgutil
import shutil
import sys
import types
import zipimport
from . import DistlibException
from .util import cached_property, get_cache_base, path_to_cache_dir
logger = logging.getLogger(__name__)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries.
"""
def __init__(self, base=None):
"""
Initialise an instance.
:param base: The base directory where the cache should be located. If
not specified, this will be the ``resource-cache``
directory under whatever :func:`get_cache_base` returns.
"""
if base is None:
base = os.path.join(get_cache_base(), 'resource-cache')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base):
os.makedirs(base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def is_stale(self, resource, path):
"""
Is the cache stale for the given resource?
:param resource: The :class:`Resource` being cached.
:param path: The path of the resource in the cache.
:return: True if the cache is stale.
"""
# Cache invalidation is a hard problem :-)
return True
def get(self, resource):
"""
Get a resource into the cache,
:param resource: A :class:`Resource` instance.
:return: The pathname of the resource in the cache.
"""
prefix, path = resource.finder.get_cache_info(resource)
if prefix is None:
result = path
else:
result = os.path.join(self.base, self.prefix_to_dir(prefix), path)
dirname = os.path.dirname(result)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if not os.path.exists(result):
stale = True
else:
stale = self.is_stale(resource, path)
if stale:
# write the bytes of the resource to the cache location
with open(result, 'wb') as f:
f.write(resource.bytes)
return result
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
cache = Cache()
class ResourceBase(object):
def __init__(self, finder, name):
self.finder = finder
self.name = name
class Resource(ResourceBase):
"""
A class representing an in-package resource, such as a data file. This is
not normally instantiated by user code, but rather by a
:class:`ResourceFinder` which manages the resource.
"""
is_container = False # Backwards compatibility
def as_stream(self):
"""
Get the resource as a stream.
This is not a property to make it obvious that it returns a new stream
each time.
"""
return self.finder.get_stream(self)
@cached_property
def file_path(self):
return cache.get(self)
@cached_property
def bytes(self):
return self.finder.get_bytes(self)
@cached_property
def size(self):
return self.finder.get_size(self)
class ResourceContainer(ResourceBase):
is_container = True # Backwards compatibility
@cached_property
def resources(self):
return self.finder.get_resources(self)
class ResourceFinder(object):
"""
Resource finder for file system resources.
"""
def __init__(self, module):
self.module = module
self.loader = getattr(module, '__loader__', None)
self.base = os.path.dirname(getattr(module, '__file__', ''))
def _make_path(self, resource_name):
parts = resource_name.split('/')
parts.insert(0, self.base)
return os.path.realpath(os.path.join(*parts))
def _find(self, path):
return os.path.exists(path)
def get_cache_info(self, resource):
return None, resource.path
def find(self, resource_name):
path = self._make_path(resource_name)
if not self._find(path):
result = None
else:
if self._is_directory(path):
result = ResourceContainer(self, resource_name)
else:
result = Resource(self, resource_name)
result.path = path
return result
def get_stream(self, resource):
return open(resource.path, 'rb')
def get_bytes(self, resource):
with open(resource.path, 'rb') as f:
return f.read()
def get_size(self, resource):
return os.path.getsize(resource.path)
def get_resources(self, resource):
def allowed(f):
return f != '__pycache__' and not f.endswith(('.pyc', '.pyo'))
return set([f for f in os.listdir(resource.path) if allowed(f)])
def is_container(self, resource):
return self._is_directory(resource.path)
_is_directory = staticmethod(os.path.isdir)
class ZipResourceFinder(ResourceFinder):
"""
Resource finder for resources in .zip files.
"""
def __init__(self, module):
super(ZipResourceFinder, self).__init__(module)
archive = self.loader.archive
self.prefix_len = 1 + len(archive)
# PyPy doesn't have a _files attr on zipimporter, and you can't set one
if hasattr(self.loader, '_files'):
self._files = self.loader._files
else:
self._files = zipimport._zip_directory_cache[archive]
self.index = sorted(self._files)
def _find(self, path):
path = path[self.prefix_len:]
if path in self._files:
result = True
else:
if path and path[-1] != os.sep:
path = path + os.sep
i = bisect.bisect(self.index, path)
try:
result = self.index[i].startswith(path)
except IndexError:
result = False
if not result:
logger.debug('_find failed: %r %r', path, self.loader.prefix)
else:
logger.debug('_find worked: %r %r', path, self.loader.prefix)
return result
def get_cache_info(self, resource):
prefix = self.loader.archive
path = resource.path[1 + len(prefix):]
return prefix, path
def get_bytes(self, resource):
return self.loader.get_data(resource.path)
def get_stream(self, resource):
return io.BytesIO(self.get_bytes(resource))
def get_size(self, resource):
path = resource.path[self.prefix_len:]
return self._files[path][3]
def get_resources(self, resource):
path = resource.path[self.prefix_len:]
if path and path[-1] != os.sep:
path += os.sep
plen = len(path)
result = set()
i = bisect.bisect(self.index, path)
while i < len(self.index):
if not self.index[i].startswith(path):
break
s = self.index[i][plen:]
result.add(s.split(os.sep, 1)[0]) # only immediate children
i += 1
return result
def _is_directory(self, path):
path = path[self.prefix_len:]
if path and path[-1] != os.sep:
path += os.sep
i = bisect.bisect(self.index, path)
try:
result = self.index[i].startswith(path)
except IndexError:
result = False
return result
_finder_registry = {
type(None): ResourceFinder,
zipimport.zipimporter: ZipResourceFinder
}
try:
import _frozen_importlib
_finder_registry[_frozen_importlib.SourceFileLoader] = ResourceFinder
_finder_registry[_frozen_importlib.FileFinder] = ResourceFinder
except (ImportError, AttributeError):
pass
def register_finder(loader, finder_maker):
_finder_registry[type(loader)] = finder_maker
_finder_cache = {}
def finder(package):
"""
Return a resource finder for a package.
:param package: The name of the package.
:return: A :class:`ResourceFinder` instance for the package.
"""
if package in _finder_cache:
result = _finder_cache[package]
else:
if package not in sys.modules:
__import__(package)
module = sys.modules[package]
path = getattr(module, '__path__', None)
if path is None:
raise DistlibException('You cannot get a finder for a module, '
'only for a package')
loader = getattr(module, '__loader__', None)
finder_maker = _finder_registry.get(type(loader))
if finder_maker is None:
raise DistlibException('Unable to locate finder for %r' % package)
result = finder_maker(module)
_finder_cache[package] = result
return result
_dummy_module = types.ModuleType(str('__dummy__'))
def finder_for_path(path):
"""
Return a resource finder for a path, which should represent a container.
:param path: The path.
:return: A :class:`ResourceFinder` instance for the path.
"""
result = None
# calls any path hooks, gets importer into cache
pkgutil.get_importer(path)
loader = sys.path_importer_cache.get(path)
finder = _finder_registry.get(type(loader))
if finder:
module = _dummy_module
module.__file__ = os.path.join(path, '')
module.__loader__ = loader
result = finder(module)
return result
| mit |
ovnicraft/odoo | addons/hr_recruitment/wizard/hr_recruitment_create_partner_job.py | 337 | 3434 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_recruitment_partner_create(osv.osv_memory):
_name = 'hr.recruitment.partner.create'
_description = 'Create Partner from job application'
_columns = {
'close': fields.boolean('Close job request'),
}
def view_init(self, cr, uid, fields_list, context=None):
case_obj = self.pool.get('hr.applicant')
if context is None:
context = {}
for case in case_obj.browse(cr, uid, context['active_ids'], context=context):
if case.partner_id:
raise osv.except_osv(_('Error!'),
_('A contact is already defined on this job request.'))
pass
def make_order(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
partner_obj = self.pool.get('res.partner')
case_obj = self.pool.get('hr.applicant')
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
result = mod_obj._get_id(cr, uid, 'base', 'view_res_partner_filter')
res = mod_obj.read(cr, uid, result, ['res_id'], context=context)
for case in case_obj.browse(cr, uid, context['active_ids'], context=context):
partner_id = partner_obj.search(cr, uid, [('name', '=', case.partner_name or case.name)], context=context)
if partner_id:
raise osv.except_osv(_('Error!'),_('A contact is already existing with the same name.'))
partner_id = partner_obj.create(cr, uid, {
'name': case.partner_name or case.name,
'user_id': case.user_id.id,
'comment': case.description,
'phone': case.partner_phone,
'mobile': case.partner_mobile,
'email': case.email_from
}, context=context)
case_obj.write(cr, uid, [case.id], {
'partner_id': partner_id,
}, context=context)
return {
'domain': "[]",
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'res.partner',
'res_id': int(partner_id),
'view_id': False,
'type': 'ir.actions.act_window',
'search_view_id': res['res_id']
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
inuitwallet/plunge_android | client/jsonrpc/serviceHandler.py | 61 | 3239 |
"""
Copyright (c) 2007 Jan-Klaas Kollhof
This file is part of jsonrpc.
jsonrpc is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from jsonrpc import loads, dumps, JSONEncodeException
def ServiceMethod(fn):
fn.IsServiceMethod = True
return fn
class ServiceException(Exception):
pass
class ServiceRequestNotTranslatable(ServiceException):
pass
class BadServiceRequest(ServiceException):
pass
class ServiceMethodNotFound(ServiceException):
def __init__(self, name):
self.methodName=name
class ServiceHandler(object):
def __init__(self, service):
self.service=service
def handleRequest(self, json):
err=None
result = None
id_=''
try:
req = self.translateRequest(json)
except ServiceRequestNotTranslatable, e:
err = e
req={'id':id_}
if err==None:
try:
id_ = req['id']
methName = req['method']
args = req['params']
except:
err = BadServiceRequest(json)
if err == None:
try:
meth = self.findServiceEndpoint(methName)
except Exception, e:
err = e
if err == None:
try:
result = self.invokeServiceEndpoint(meth, args)
except Exception, e:
err = e
resultdata = self.translateResult(result, err, id_)
return resultdata
def translateRequest(self, data):
try:
req = loads(data)
except:
raise ServiceRequestNotTranslatable(data)
return req
def findServiceEndpoint(self, name):
try:
meth = getattr(self.service, name)
if getattr(meth, "IsServiceMethod"):
return meth
else:
raise ServiceMethodNotFound(name)
except AttributeError:
raise ServiceMethodNotFound(name)
def invokeServiceEndpoint(self, meth, args):
return meth(*args)
def translateResult(self, rslt, err, id_):
if err != None:
err = {"name": err.__class__.__name__, "message":err.message}
rslt = None
try:
data = dumps({"result":rslt,"id":id_,"error":err})
except JSONEncodeException, e:
err = {"name": "JSONEncodeException", "message":"Result Object Not Serializable"}
data = dumps({"result":None, "id":id_,"error":err})
return data | mit |
Newsrecommender/newsrecommender | ArticleRecommendationProject/Recommendation/Collab_Content_Based.py | 1 | 5856 | import yaml
import pandas as pd
import numpy as np
import sys
import os
from math import sqrt
import matplotlib
import matplotlib.pyplot as plot
import networkx as nx
def get_script_directory():
"""
This function returns the directory of the script in scrip mode
In interactive mode returns interpreter name.
"""
path = os.path.realpath(sys.argv[0])
if os.path.isdir(path):
return path
else:
return os.path.dirname(path)
def similarity_score(Article1,Article2):
"""
This function calculates Euclidean distance between to objects
"""
both_viewed = {}
for item in dataset[Article1]:
if item in dataset[Article2]:
both_viewed[item] = 1
# The Conditions to check if they both have common rating items
if len(both_viewed) == 0:
return 0
# Finding Euclidean distance
sum_of_euclidean_distance = []
for item in dataset[Article1]:
if item in dataset[Article2]:
sum_of_euclidean_distance.append(pow(dataset[Article1][item] - dataset[Article2][item], 2))
sum_of_euclidean_distance = sum(sum_of_euclidean_distance)
#print (sum_of_euclidean_distance)
return 1/(1+sqrt(sum_of_euclidean_distance))
def pearson_correlation(Article1,Article2):
"""
This function calculates Pearson correlation between two vectors
"""
both_rated = {}
for item in dataset[Article1]:
if item in dataset[Article2]:
both_rated[item] = 1
number_of_ratings = len(both_rated)
# Checking for number of ratings in common
if number_of_ratings == 0:
return 0
# Add up all the preferences of each user
person1_preferences_sum = sum([dataset[Article1][item] for item in both_rated])
person2_preferences_sum = sum([dataset[Article2][item] for item in both_rated])
# Sum up the squares of preferences of each user
person1_square_preferences_sum = sum([pow(dataset[Article1][item],2) for item in both_rated])
person2_square_preferences_sum = sum([pow(dataset[Article2][item],2) for item in both_rated])
# Sum up the product value of both preferences for each item
product_sum_of_both_users = sum([dataset[Article1][item] * dataset[Article2][item] for item in both_rated])
# Calculate the pearson score
numerator_value = product_sum_of_both_users - (person1_preferences_sum*person2_preferences_sum/number_of_ratings)
denominator_value = sqrt((person1_square_preferences_sum - pow(person1_preferences_sum,2)/number_of_ratings) * (person2_square_preferences_sum -pow(person2_preferences_sum,2)/number_of_ratings))
if denominator_value == 0:
return 0
else:
r = numerator_value/denominator_value
return r
def find_most_similar_objects(Article1,number_of_users):
# returns the number_of_users (similar persons) for a given specific person.
scores = [(pearson_correlation(Article1,other_person),other_person) for other_person in dataset if other_person != Article1 ]
# Sort the similar persons so that highest scores person will appear at the first
scores.sort()
scores.reverse()
return (scores[0:number_of_users][0][1])
def get_recommendations(objects, no_of_recommendations):
"""
This function generates recommendations for specified object
"""
recommended_articles = []
input_articles = []
for article in objects:
# print (article, find_most_similar_objects(article,2)[0][1], find_most_similar_objects(article,2)[1][1])
input_articles.append(article)
recommended_articles.append(find_most_similar_objects(article,no_of_recommendations))
return input_articles,recommended_articles
# Find the path of script
path = get_script_directory()
print ('Script is located at {}'.format(path))
os.chdir(path)
# import config files
print("Reading configuration")
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
user_ratings_files_path = cfg['project_test_conf']['ratings_file_path']
user_ratings_csv_filename = cfg['project_test_conf']['ratings_file_name']
articles_files_path = cfg['project_test_conf']['articles_file_path']
articles_csv_filename = cfg['project_test_conf']['articles_file_name']
ratings_index = cfg['project_test_conf']['ratings_index_column']
output_file_path = cfg['project_test_conf']['output_path']
output_file_name = cfg['project_test_conf']['output_file_name']
ratings_file = os.path.join(user_ratings_files_path, user_ratings_csv_filename)
articles_file = os.path.join(articles_files_path, articles_csv_filename)
Output_Recommendations = os.path.join(output_file_path, output_file_name)
print("Configuration loaded successfully")
print ('Reading ratings from file {}'.format(ratings_file))
user_ratings = pd.read_csv(ratings_file, index_col=ratings_index)
articles_db = pd.read_csv(articles_file, index_col=ratings_index)
objects_list = list(user_ratings.index)
user_ratings_T = user_ratings.transpose()
dataset = user_ratings_T.to_dict()
# Get recommendations
print('Calculations in progress...')
Article, recommended_article = get_recommendations(objects_list, 5)
print('Calculations completed.')
# Create output files
print('Creating output file')
recommended_article_title = []
for content in recommended_article:
recommended_article_title.append(articles_db.Title[content])
input_article_title = []
for content in Article:
input_article_title.append(articles_db.Title[content])
df = pd.DataFrame()
df['Article'] = Article
df['Recommendation'] = recommended_article
df['News'] = input_article_title
df['Recommended_News'] = recommended_article_title
df = df.set_index('Article', drop=True, append=False, inplace=False, verify_integrity=False)
df.to_csv(Output_Recommendations)
print('Output file created.')
print('Check output files at {}'.format(Output_Recommendations))
| mit |
frederick-masterton/django | django/core/management/commands/testserver.py | 17 | 2017 | from django.core.management.base import BaseCommand
from optparse import make_option
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--addrport', action='store', dest='addrport',
type='string', default='',
help='port number or ipaddr:port to run the server on'),
make_option('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use a IPv6 address.'),
)
help = 'Runs a development server with data from the given fixture(s).'
args = '[fixture ...]'
requires_system_checks = False
def handle(self, *fixture_labels, **options):
from django.core.management import call_command
from django.db import connection
verbosity = int(options.get('verbosity'))
interactive = options.get('interactive')
addrport = options.get('addrport')
# Create a test database.
db_name = connection.creation.create_test_db(verbosity=verbosity, autoclobber=not interactive)
# Import the fixture data into the test database.
call_command('loaddata', *fixture_labels, **{'verbosity': verbosity})
# Run the development server. Turn off auto-reloading because it causes
# a strange error -- it causes this handle() method to be called
# multiple times.
shutdown_message = '\nServer stopped.\nNote that the test database, %r, has not been deleted. You can explore it on your own.' % db_name
use_threading = connection.features.test_db_allows_multiple_connections
call_command(
'runserver',
addrport=addrport,
shutdown_message=shutdown_message,
use_reloader=False,
use_ipv6=options['use_ipv6'],
use_threading=use_threading
)
| bsd-3-clause |
phammin1/QaManagement | QaManagement/env/Lib/site-packages/django/shortcuts.py | 135 | 7957 | """
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
import warnings
from django.core import urlresolvers
from django.db.models.base import ModelBase
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from django.http import (
Http404, HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.template import RequestContext, loader
from django.template.context import _current_app_undefined
from django.template.engine import (
_context_instance_undefined, _dictionary_undefined, _dirs_undefined,
)
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text
from django.utils.functional import Promise
def render_to_response(template_name, context=None,
context_instance=_context_instance_undefined,
content_type=None, status=None, dirs=_dirs_undefined,
dictionary=_dictionary_undefined, using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
if (context_instance is _context_instance_undefined
and dirs is _dirs_undefined
and dictionary is _dictionary_undefined):
# No deprecated arguments were passed - use the new code path
content = loader.render_to_string(template_name, context, using=using)
else:
# Some deprecated arguments were passed - use the legacy code path
content = loader.render_to_string(
template_name, context, context_instance, dirs, dictionary,
using=using)
return HttpResponse(content, content_type, status)
def render(request, template_name, context=None,
context_instance=_context_instance_undefined,
content_type=None, status=None, current_app=_current_app_undefined,
dirs=_dirs_undefined, dictionary=_dictionary_undefined,
using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
Uses a RequestContext by default.
"""
if (context_instance is _context_instance_undefined
and current_app is _current_app_undefined
and dirs is _dirs_undefined
and dictionary is _dictionary_undefined):
# No deprecated arguments were passed - use the new code path
# In Django 1.10, request should become a positional argument.
content = loader.render_to_string(
template_name, context, request=request, using=using)
else:
# Some deprecated arguments were passed - use the legacy code path
if context_instance is not _context_instance_undefined:
if current_app is not _current_app_undefined:
raise ValueError('If you provide a context_instance you must '
'set its current_app before calling render()')
else:
context_instance = RequestContext(request)
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of render is deprecated. "
"Set the current_app attribute of request instead.",
RemovedInDjango110Warning, stacklevel=2)
request.current_app = current_app
# Directly set the private attribute to avoid triggering the
# warning in RequestContext.__init__.
context_instance._current_app = current_app
content = loader.render_to_string(
template_name, context, context_instance, dirs, dictionary,
using=using)
return HttpResponse(content, content_type, status)
def redirect(to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the appropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
return redirect_class(resolve_url(to, *args, **kwargs))
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
Raises a ValueError if klass is not a Model, Manager, or QuerySet.
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, Manager):
manager = klass
elif isinstance(klass, ModelBase):
manager = klass._default_manager
else:
if isinstance(klass, type):
klass__name = klass.__name__
else:
klass__name = klass.__class__.__name__
raise ValueError("Object is of type '%s', but must be a Django Model, "
"Manager, or QuerySet" % klass__name)
return manager.all()
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
def resolve_url(to, *args, **kwargs):
"""
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be returned as-is.
"""
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return to.get_absolute_url()
if isinstance(to, Promise):
# Expand the lazy instance, as it can cause issues when it is passed
# further to some Python functions like urlparse.
to = force_text(to)
if isinstance(to, six.string_types):
# Handle relative URLs
if to.startswith(('./', '../')):
return to
# Next try a reverse URL resolution.
try:
return urlresolvers.reverse(to, args=args, kwargs=kwargs)
except urlresolvers.NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
| mit |
camptocamp/odoo | addons/l10n_bo/__init__.py | 2120 | 1456 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MER-GROUP/intellij-community | python/lib/Lib/site-packages/django/contrib/sessions/middleware.py | 323 | 1888 | import time
from django.conf import settings
from django.utils.cache import patch_vary_headers
from django.utils.http import cookie_date
from django.utils.importlib import import_module
class SessionMiddleware(object):
def process_request(self, request):
engine = import_module(settings.SESSION_ENGINE)
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME, None)
request.session = engine.SessionStore(session_key)
def process_response(self, request, response):
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
except AttributeError:
pass
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
if modified or settings.SESSION_SAVE_EVERY_REQUEST:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# Save the session data and refresh the client cookie.
request.session.save()
response.set_cookie(settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
return response
| apache-2.0 |
akretion/connector | connector/queue/model.py | 1 | 16341 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
import logging
from datetime import datetime, timedelta
from openerp.osv import orm, fields
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
from .job import STATES, DONE, PENDING, OpenERPJobStorage
from .worker import WORKER_TIMEOUT
from ..session import ConnectorSession
from .worker import watcher
_logger = logging.getLogger(__name__)
class QueueJob(orm.Model):
""" Job status and result """
_name = 'queue.job'
_description = 'Queue Job'
_inherit = ['mail.thread', 'ir.needaction_mixin']
_log_access = False
_order = 'date_created DESC, date_done DESC'
_removal_interval = 30 # days
_columns = {
'worker_id': fields.many2one('queue.worker', string='Worker',
ondelete='set null', readonly=True),
'uuid': fields.char('UUID', readonly=True, select=True, required=True),
'user_id': fields.many2one('res.users', string='User ID', required=True),
'company_id': fields.many2one('res.company', 'Company'),
'name': fields.char('Description', readonly=True),
'func_string': fields.char('Task', readonly=True),
'func': fields.binary('Pickled Function', readonly=True, required=True),
'state': fields.selection(STATES,
string='State',
readonly=True,
required=True),
'priority': fields.integer('Priority'),
'exc_info': fields.text('Exception Info', readonly=True),
'result': fields.text('Result', readonly=True),
'date_created': fields.datetime('Created Date', readonly=True),
'date_started': fields.datetime('Start Date', readonly=True),
'date_enqueued': fields.datetime('Enqueue Time', readonly=True),
'date_done': fields.datetime('Date Done', readonly=True),
'eta': fields.datetime('Execute only after'),
'active': fields.boolean('Active'),
'model_name': fields.char('Model', readonly=True),
'retry': fields.integer('Current try'),
'max_retries': fields.integer(
'Max. retries',
help="The job will fail if the number of tries reach the "
"max. retries.\n"
"Retries are infinite when empty."),
}
_defaults = {
'active': True,
}
def open_related_action(self, cr, uid, ids, context=None):
""" Open the related action associated to the job """
if hasattr(ids, '__iter__'):
assert len(ids) == 1, "1 ID expected, got %s" % ids
ids = ids[0]
session = ConnectorSession(cr, uid, context=context)
storage = OpenERPJobStorage(session)
job = self.browse(cr, uid, ids, context=context)
job = storage.load(job.uuid)
action = job.related_action(session)
if action is None:
raise orm.except_orm(
_('Error'),
_('No action available for this job'))
return action
def _change_job_state(self, cr, uid, ids, state, result=None, context=None):
""" Change the state of the `Job` object itself so it
will change the other fields (date, result, ...)
"""
if not hasattr(ids, '__iter__'):
ids = [ids]
session = ConnectorSession(cr, uid, context=context)
storage = OpenERPJobStorage(session)
for job in self.browse(cr, uid, ids, context=context):
job = storage.load(job.uuid)
if state == DONE:
job.set_done(result=result)
elif state == PENDING:
job.set_pending(result=result)
else:
raise ValueError('State not supported: %s' % state)
storage.store(job)
def button_done(self, cr, uid, ids, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
result = _('Manually set to done by %s') % user.name
self._change_job_state(cr, uid, ids, DONE,
result=result, context=context)
return True
def requeue(self, cr, uid, ids, context=None):
self._change_job_state(cr, uid, ids, PENDING, context=context)
return True
def write(self, cr, uid, ids, vals, context=None):
res = super(QueueJob, self).write(cr, uid, ids, vals, context=context)
if vals.get('state') == 'failed':
if not hasattr(ids, '__iter__'):
ids = [ids]
# subscribe the users now to avoid to subscribe them
# at every job creation
self._subscribe_users(cr, uid, ids, context=context)
for job_id in ids:
msg = self._message_failed_job(cr, uid, job_id,
context=context)
if msg:
self.message_post(cr, uid, job_id, body=msg,
subtype='connector.mt_job_failed',
context=context)
return res
def _subscribe_users(self, cr, uid, ids, context=None):
""" Subscribe all users having the 'Connector Manager' group """
group_ref = self.pool.get('ir.model.data').get_object_reference(
cr, uid, 'connector', 'group_connector_manager')
if not group_ref:
return
group_id = group_ref[1]
jobs = self.read(cr, uid, ids, ['company_id'], context=context)
company_ids = [val['company_id'][0] for val in jobs
if val['company_id']]
domain = [('groups_id', '=', group_id)]
if company_ids:
domain.append(('company_ids', 'child_of', company_ids))
user_ids = self.pool.get('res.users').search(
cr, uid, domain, context=context)
self.message_subscribe_users(cr, uid, ids,
user_ids=user_ids,
context=context)
def _message_failed_job(self, cr, uid, id, context=None):
""" Return a message which will be posted on the job when it is failed.
It can be inherited to allow more precise messages based on the
exception informations.
If nothing is returned, no message will be posted.
"""
return _("Something bad happened during the execution of the job. "
"More details in the 'Exception Information' section.")
def _needaction_domain_get(self, cr, uid, context=None):
""" Returns the domain to filter records that require an action
:return: domain or False is no action
"""
return [('state', '=', 'failed')]
def autovacuum(self, cr, uid, context=None):
""" Delete all jobs (active or not) done since more than
``_removal_interval`` days.
Called from a cron.
"""
if context is None:
context = {}
context = dict(context, active_test=False)
deadline = datetime.now() - timedelta(days=self._removal_interval)
deadline_fmt = deadline.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
job_ids = self.search(cr, uid,
[('date_done', '<=', deadline_fmt)],
context=context)
self.unlink(cr, uid, job_ids, context=context)
return True
class QueueWorker(orm.Model):
""" Worker """
_name = 'queue.worker'
_description = 'Queue Worker'
_log_access = False
_rec_name = 'uuid'
worker_timeout = WORKER_TIMEOUT
_columns = {
'uuid': fields.char('UUID', readonly=True, select=True, required=True),
'pid': fields.char('PID', readonly=True),
'date_start': fields.datetime('Start Date', readonly=True),
'date_alive': fields.datetime('Last Alive Check', readonly=True),
'job_ids': fields.one2many('queue.job', 'worker_id',
string='Jobs', readonly=True),
}
def _notify_alive(self, cr, uid, worker, context=None):
worker_ids = self.search(cr, uid,
[('uuid', '=', worker.uuid)],
context=context)
now_fmt = datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
if not worker_ids:
self.create(cr, uid,
{'uuid': worker.uuid,
'pid': os.getpid(),
'date_start': now_fmt,
'date_alive': now_fmt},
context=context)
else:
self.write(cr, uid, worker_ids,
{'date_alive': now_fmt}, context=context)
def _purge_dead_workers(self, cr, uid, context=None):
deadline = datetime.now() - timedelta(seconds=self.worker_timeout)
deadline_fmt = deadline.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
dead_ids = self.search(cr, uid,
[('date_alive', '<', deadline_fmt)],
context=context)
dead_workers = self.read(cr, uid, dead_ids, ['uuid'], context=context)
for worker in dead_workers:
_logger.debug('Worker %s is dead', worker['uuid'])
try:
self.unlink(cr, uid, dead_ids, context=context)
except Exception:
_logger.debug("Failed attempt to unlink a dead worker, likely due "
"to another transaction in progress.")
def _worker_id(self, cr, uid, context=None):
worker = watcher.worker_for_db(cr.dbname)
assert worker
worker_ids = self.search(cr, uid, [('uuid', '=', worker.uuid)],
context=context)
assert len(worker_ids) == 1, ("%s worker found in database instead "
"of 1" % len(worker_ids))
return worker_ids[0]
def assign_then_enqueue(self, cr, uid, max_jobs=None, context=None):
""" Assign all the jobs not already assigned to a worker.
Then enqueue all the jobs having a worker but not enqueued.
Each operation is atomic.
.. warning:: commit transaction
``cr.commit()`` is called, so please always call
this method in your own transaction, not in the main
OpenERP's transaction
:param max_jobs: maximal limit of jobs to assign on a worker
:type max_jobs: int
"""
self.assign_jobs(cr, uid, max_jobs=max_jobs, context=context)
cr.commit()
self.enqueue_jobs(cr, uid, context=context)
cr.commit()
return True
def assign_jobs(self, cr, uid, max_jobs=None, context=None):
""" Assign ``n`` jobs to the worker of the current process
``n`` is ``max_jobs`` or unlimited if ``max_jobs`` is None
:param max_jobs: maximal limit of jobs to assign on a worker
:type max_jobs: int
"""
worker = watcher.worker_for_db(cr.dbname)
if worker:
self._assign_jobs(cr, uid, max_jobs=max_jobs, context=context)
else:
_logger.debug('No worker started for process %s', os.getpid())
return True
def enqueue_jobs(self, cr, uid, context=None):
""" Enqueue all the jobs assigned to the worker of the current
process
"""
worker = watcher.worker_for_db(cr.dbname)
if worker:
self._enqueue_jobs(cr, uid, context=context)
else:
_logger.debug('No worker started for process %s', os.getpid())
return True
def _assign_jobs(self, cr, uid, max_jobs=None, context=None):
sql = ("SELECT id FROM queue_job "
"WHERE worker_id IS NULL "
"AND state not in ('failed', 'done') "
"AND active = true "
"ORDER BY eta NULLS LAST, priority, date_created ")
if max_jobs is not None:
sql += ' LIMIT %d' % max_jobs
sql += ' FOR UPDATE NOWAIT'
# use a SAVEPOINT to be able to rollback this part of the
# transaction without failing the whole transaction if the LOCK
# cannot be acquired
worker = watcher.worker_for_db(cr.dbname)
cr.execute("SAVEPOINT queue_assign_jobs")
try:
cr.execute(sql, log_exceptions=False)
except Exception:
# Here it's likely that the FOR UPDATE NOWAIT failed to get the LOCK,
# so we ROLLBACK to the SAVEPOINT to restore the transaction to its earlier
# state. The assign will be done the next time.
cr.execute("ROLLBACK TO queue_assign_jobs")
_logger.debug("Failed attempt to assign jobs, likely due to "
"another transaction in progress. "
"Trace of the failed assignment of jobs on worker "
"%s attempt: ", worker.uuid, exc_info=True)
return
job_rows = cr.fetchall()
if not job_rows:
_logger.debug('No job to assign to worker %s', worker.uuid)
return
job_ids = [id for id, in job_rows]
worker_id = self._worker_id(cr, uid, context=context)
_logger.debug('Assign %d jobs to worker %s', len(job_ids),
worker.uuid)
# ready to be enqueued in the worker
try:
self.pool.get('queue.job').write(cr, uid, job_ids,
{'state': 'pending',
'worker_id': worker_id},
context=context)
except Exception:
pass # will be assigned to another worker
def _enqueue_jobs(self, cr, uid, context=None):
""" Add to the queue of the worker all the jobs not
yet queued but already assigned."""
job_obj = self.pool.get('queue.job')
db_worker_id = self._worker_id(cr, uid, context=context)
job_ids = job_obj.search(cr, uid,
[('worker_id', '=', db_worker_id),
('state', '=', 'pending')],
context=context)
worker = watcher.worker_for_db(cr.dbname)
jobs = job_obj.read(cr, uid, job_ids, ['uuid'], context=context)
for job in jobs:
worker.enqueue_job_uuid(job['uuid'])
class requeue_job(orm.TransientModel):
_name = 'queue.requeue.job'
_description = 'Wizard to requeue a selection of jobs'
def _get_job_ids(self, cr, uid, context=None):
if context is None:
context = {}
res = False
if (context.get('active_model') == 'queue.job' and
context.get('active_ids')):
res = context['active_ids']
return res
_columns = {
'job_ids': fields.many2many('queue.job', string='Jobs'),
}
_defaults = {
'job_ids': _get_job_ids,
}
def requeue(self, cr, uid, ids, context=None):
if isinstance(ids, (tuple, list)):
assert len(ids) == 1, "One ID expected"
ids = ids[0]
form = self.browse(cr, uid, ids, context=context)
job_ids = [job.id for job in form.job_ids]
self.pool.get('queue.job').requeue(cr, uid, job_ids, context=context)
return {'type': 'ir.actions.act_window_close'}
| agpl-3.0 |
huangkuan/hack | lib/oauth2client/contrib/gce.py | 25 | 7200 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google Compute Engine
Utilities for making it easier to use OAuth 2.0 on Google Compute Engine.
"""
import json
import logging
import warnings
import httplib2
from six.moves import http_client
from six.moves import urllib
from oauth2client._helpers import _from_bytes
from oauth2client import util
from oauth2client.client import HttpAccessTokenRefreshError
from oauth2client.client import AssertionCredentials
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
logger = logging.getLogger(__name__)
# URI Template for the endpoint that returns access_tokens.
_METADATA_ROOT = ('http://metadata.google.internal/computeMetadata/v1/'
'instance/service-accounts/default/')
META = _METADATA_ROOT + 'token'
_DEFAULT_EMAIL_METADATA = _METADATA_ROOT + 'email'
_SCOPES_WARNING = """\
You have requested explicit scopes to be used with a GCE service account.
Using this argument will have no effect on the actual scopes for tokens
requested. These scopes are set at VM instance creation time and
can't be overridden in the request.
"""
def _get_service_account_email(http_request=None):
"""Get the GCE service account email from the current environment.
Args:
http_request: callable, (Optional) a callable that matches the method
signature of httplib2.Http.request, used to make
the request to the metadata service.
Returns:
tuple, A pair where the first entry is an optional response (from a
failed request) and the second is service account email found (as
a string).
"""
if http_request is None:
http_request = httplib2.Http().request
response, content = http_request(
_DEFAULT_EMAIL_METADATA, headers={'Metadata-Flavor': 'Google'})
if response.status == http_client.OK:
content = _from_bytes(content)
return None, content
else:
return response, content
class AppAssertionCredentials(AssertionCredentials):
"""Credentials object for Compute Engine Assertion Grants
This object will allow a Compute Engine instance to identify itself to
Google and other OAuth 2.0 servers that can verify assertions. It can be
used for the purpose of accessing data stored under an account assigned to
the Compute Engine instance itself.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens.
"""
@util.positional(2)
def __init__(self, scope='', **kwargs):
"""Constructor for AppAssertionCredentials
Args:
scope: string or iterable of strings, scope(s) of the credentials
being requested. Using this argument will have no effect on
the actual scopes for tokens requested. These scopes are
set at VM instance creation time and won't change.
"""
if scope:
warnings.warn(_SCOPES_WARNING)
# This is just provided for backwards compatibility, but is not
# used by this class.
self.scope = util.scopes_to_string(scope)
self.kwargs = kwargs
# Assertion type is no longer used, but still in the
# parent class signature.
super(AppAssertionCredentials, self).__init__(None)
self._service_account_email = None
@classmethod
def from_json(cls, json_data):
data = json.loads(_from_bytes(json_data))
return AppAssertionCredentials(data['scope'])
def _refresh(self, http_request):
"""Refreshes the access_token.
Skip all the storage hoops and just refresh using the API.
Args:
http_request: callable, a callable that matches the method
signature of httplib2.Http.request, used to make
the refresh request.
Raises:
HttpAccessTokenRefreshError: When the refresh fails.
"""
response, content = http_request(
META, headers={'Metadata-Flavor': 'Google'})
content = _from_bytes(content)
if response.status == http_client.OK:
try:
token_content = json.loads(content)
except Exception as e:
raise HttpAccessTokenRefreshError(str(e),
status=response.status)
self.access_token = token_content['access_token']
else:
if response.status == http_client.NOT_FOUND:
content += (' This can occur if a VM was created'
' with no service account or scopes.')
raise HttpAccessTokenRefreshError(content, status=response.status)
@property
def serialization_data(self):
raise NotImplementedError(
'Cannot serialize credentials for GCE service accounts.')
def create_scoped_required(self):
return False
def create_scoped(self, scopes):
return AppAssertionCredentials(scopes, **self.kwargs)
def sign_blob(self, blob):
"""Cryptographically sign a blob (of bytes).
This method is provided to support a common interface, but
the actual key used for a Google Compute Engine service account
is not available, so it can't be used to sign content.
Args:
blob: bytes, Message to be signed.
Raises:
NotImplementedError, always.
"""
raise NotImplementedError(
'Compute Engine service accounts cannot sign blobs')
@property
def service_account_email(self):
"""Get the email for the current service account.
Uses the Google Compute Engine metadata service to retrieve the email
of the default service account.
Returns:
string, The email associated with the Google Compute Engine
service account.
Raises:
AttributeError, if the email can not be retrieved from the Google
Compute Engine metadata service.
"""
if self._service_account_email is None:
failure, email = _get_service_account_email()
if failure is None:
self._service_account_email = email
else:
raise AttributeError('Failed to retrieve the email from the '
'Google Compute Engine metadata service',
failure, email)
return self._service_account_email
| apache-2.0 |
cl0ne/vital-records-registry | registry/registry/settings.py | 1 | 3274 | """
Django settings for registry project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1234567890-change-it'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'vital_records.apps.VitalRecordsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'registry.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'registry.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_USER_MODEL = 'vital_records.RegistryUser'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
from registry.settings_local import *
| gpl-3.0 |
pratikmallya/hue | desktop/core/ext-py/boto-2.38.0/boto/glacier/writer.py | 153 | 9668 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
# Copyright (c) 2012 Robie Basak <robie@justgohome.co.uk>
# Tree hash implementation from Aaron Brady bradya@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import hashlib
from boto.glacier.utils import chunk_hashes, tree_hash, bytes_to_hex
# This import is provided for backwards compatibility. This function is
# now in boto.glacier.utils, but any existing code can still import
# this directly from this module.
from boto.glacier.utils import compute_hashes_from_fileobj
_ONE_MEGABYTE = 1024 * 1024
class _Partitioner(object):
"""Convert variable-size writes into part-sized writes
Call write(data) with variable sized data as needed to write all data. Call
flush() after all data is written.
This instance will call send_fn(part_data) as needed in part_size pieces,
except for the final part which may be shorter than part_size. Make sure to
call flush() to ensure that a short final part results in a final send_fn
call.
"""
def __init__(self, part_size, send_fn):
self.part_size = part_size
self.send_fn = send_fn
self._buffer = []
self._buffer_size = 0
def write(self, data):
if data == b'':
return
self._buffer.append(data)
self._buffer_size += len(data)
while self._buffer_size > self.part_size:
self._send_part()
def _send_part(self):
data = b''.join(self._buffer)
# Put back any data remaining over the part size into the
# buffer
if len(data) > self.part_size:
self._buffer = [data[self.part_size:]]
self._buffer_size = len(self._buffer[0])
else:
self._buffer = []
self._buffer_size = 0
# The part we will send
part = data[:self.part_size]
self.send_fn(part)
def flush(self):
if self._buffer_size > 0:
self._send_part()
class _Uploader(object):
"""Upload to a Glacier upload_id.
Call upload_part for each part (in any order) and then close to complete
the upload.
"""
def __init__(self, vault, upload_id, part_size, chunk_size=_ONE_MEGABYTE):
self.vault = vault
self.upload_id = upload_id
self.part_size = part_size
self.chunk_size = chunk_size
self.archive_id = None
self._uploaded_size = 0
self._tree_hashes = []
self.closed = False
def _insert_tree_hash(self, index, raw_tree_hash):
list_length = len(self._tree_hashes)
if index >= list_length:
self._tree_hashes.extend([None] * (list_length - index + 1))
self._tree_hashes[index] = raw_tree_hash
def upload_part(self, part_index, part_data):
"""Upload a part to Glacier.
:param part_index: part number where 0 is the first part
:param part_data: data to upload corresponding to this part
"""
if self.closed:
raise ValueError("I/O operation on closed file")
# Create a request and sign it
part_tree_hash = tree_hash(chunk_hashes(part_data, self.chunk_size))
self._insert_tree_hash(part_index, part_tree_hash)
hex_tree_hash = bytes_to_hex(part_tree_hash)
linear_hash = hashlib.sha256(part_data).hexdigest()
start = self.part_size * part_index
content_range = (start,
(start + len(part_data)) - 1)
response = self.vault.layer1.upload_part(self.vault.name,
self.upload_id,
linear_hash,
hex_tree_hash,
content_range, part_data)
response.read()
self._uploaded_size += len(part_data)
def skip_part(self, part_index, part_tree_hash, part_length):
"""Skip uploading of a part.
The final close call needs to calculate the tree hash and total size
of all uploaded data, so this is the mechanism for resume
functionality to provide it without actually uploading the data again.
:param part_index: part number where 0 is the first part
:param part_tree_hash: binary tree_hash of part being skipped
:param part_length: length of part being skipped
"""
if self.closed:
raise ValueError("I/O operation on closed file")
self._insert_tree_hash(part_index, part_tree_hash)
self._uploaded_size += part_length
def close(self):
if self.closed:
return
if None in self._tree_hashes:
raise RuntimeError("Some parts were not uploaded.")
# Complete the multiplart glacier upload
hex_tree_hash = bytes_to_hex(tree_hash(self._tree_hashes))
response = self.vault.layer1.complete_multipart_upload(
self.vault.name, self.upload_id, hex_tree_hash,
self._uploaded_size)
self.archive_id = response['ArchiveId']
self.closed = True
def generate_parts_from_fobj(fobj, part_size):
data = fobj.read(part_size)
while data:
yield data.encode('utf-8')
data = fobj.read(part_size)
def resume_file_upload(vault, upload_id, part_size, fobj, part_hash_map,
chunk_size=_ONE_MEGABYTE):
"""Resume upload of a file already part-uploaded to Glacier.
The resumption of an upload where the part-uploaded section is empty is a
valid degenerate case that this function can handle. In this case,
part_hash_map should be an empty dict.
:param vault: boto.glacier.vault.Vault object.
:param upload_id: existing Glacier upload id of upload being resumed.
:param part_size: part size of existing upload.
:param fobj: file object containing local data to resume. This must read
from the start of the entire upload, not just from the point being
resumed. Use fobj.seek(0) to achieve this if necessary.
:param part_hash_map: {part_index: part_tree_hash, ...} of data already
uploaded. Each supplied part_tree_hash will be verified and the part
re-uploaded if there is a mismatch.
:param chunk_size: chunk size of tree hash calculation. This must be
1 MiB for Amazon.
"""
uploader = _Uploader(vault, upload_id, part_size, chunk_size)
for part_index, part_data in enumerate(
generate_parts_from_fobj(fobj, part_size)):
part_tree_hash = tree_hash(chunk_hashes(part_data, chunk_size))
if (part_index not in part_hash_map or
part_hash_map[part_index] != part_tree_hash):
uploader.upload_part(part_index, part_data)
else:
uploader.skip_part(part_index, part_tree_hash, len(part_data))
uploader.close()
return uploader.archive_id
class Writer(object):
"""
Presents a file-like object for writing to a Amazon Glacier
Archive. The data is written using the multi-part upload API.
"""
def __init__(self, vault, upload_id, part_size, chunk_size=_ONE_MEGABYTE):
self.uploader = _Uploader(vault, upload_id, part_size, chunk_size)
self.partitioner = _Partitioner(part_size, self._upload_part)
self.closed = False
self.next_part_index = 0
def write(self, data):
if self.closed:
raise ValueError("I/O operation on closed file")
self.partitioner.write(data)
def _upload_part(self, part_data):
self.uploader.upload_part(self.next_part_index, part_data)
self.next_part_index += 1
def close(self):
if self.closed:
return
self.partitioner.flush()
self.uploader.close()
self.closed = True
def get_archive_id(self):
self.close()
return self.uploader.archive_id
@property
def current_tree_hash(self):
"""
Returns the current tree hash for the data that's been written
**so far**.
Only once the writing is complete is the final tree hash returned.
"""
return tree_hash(self.uploader._tree_hashes)
@property
def current_uploaded_size(self):
"""
Returns the current uploaded size for the data that's been written
**so far**.
Only once the writing is complete is the final uploaded size returned.
"""
return self.uploader._uploaded_size
@property
def upload_id(self):
return self.uploader.upload_id
@property
def vault(self):
return self.uploader.vault
| apache-2.0 |
nvoron23/socialite | jython/Lib/ConfigParser.py | 105 | 23116 | """Configuration file parser.
A setup file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
The option values can contain format strings which refer to other values in
the same section, or values in a special [DEFAULT] section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None)
create the parser and specify a dictionary of intrinsic defaults. The
keys must be strings, the values must be appropriate for %()s string
interpolation. Note that `__name__' is always an intrinsic default;
its value is the section's name.
sections()
return all the configuration section names, sans DEFAULT
has_section(section)
return whether the given section exists
has_option(section, option)
return whether the given option exists in the given section
options(section)
return list of configuration options for the named section
read(filenames)
read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
readfp(fp, filename=None)
read and parse one configuration file, given as a file object.
The filename defaults to fp.name; it is only used in error
messages (if fp has no `name' attribute, the string `<???>' is used).
get(section, option, raw=False, vars=None)
return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults.
getint(section, options)
like get(), but convert value to an integer
getfloat(section, options)
like get(), but convert value to a float
getboolean(section, options)
like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section, raw=False, vars=None)
return a list of tuples with (name, value) for each option
in the section.
remove_section(section)
remove the given file section and all its options
remove_option(section, option)
remove the given option from the given section
set(section, option, value)
set the given option
write(fp)
write the configuration state in .ini format
"""
import re
__all__ = ["NoSectionError", "DuplicateSectionError", "NoOptionError",
"InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
class DuplicateSectionError(Error):
"""Raised when a section is multiply-created."""
def __init__(self, section):
Error.__init__(self, "Section %r already exists" % section)
self.section = section
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text into which substitutions are made
does not conform to the required syntax."""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, filename):
Error.__init__(self, 'File contains parsing errors: %s' % filename)
self.filename = filename
self.errors = []
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%r' %
(filename, lineno, line))
self.filename = filename
self.lineno = lineno
self.line = line
class RawConfigParser:
def __init__(self, defaults=None):
self._sections = {}
self._defaults = {}
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return self._sections.keys()
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists.
"""
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = {}
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
if '__name__' in opts:
del opts['__name__']
return opts.keys()
def read(self, filenames):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, basestring):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
fp = open(filename)
except IOError:
continue
self._read(fp, filename)
fp.close()
read_ok.append(filename)
return read_ok
def readfp(self, fp, filename=None):
"""Like read() but the argument must be a file-like object.
The `fp' argument must have a `readline' method. Optional
second argument is the `filename', which if not given, is
taken from fp.name. If fp has no `name' attribute, `<???>' is
used.
"""
if filename is None:
try:
filename = fp.name
except AttributeError:
filename = '<???>'
self._read(fp, filename)
def get(self, section, option):
opt = self.optionxform(option)
if section not in self._sections:
if section != DEFAULTSECT:
raise NoSectionError(section)
if opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
elif opt in self._sections[section]:
return self._sections[section][opt]
elif opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
def items(self, section):
try:
d2 = self._sections[section]
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
d2 = {}
d = self._defaults.copy()
d.update(d2)
if "__name__" in d:
del d["__name__"]
return d.items()
def _get(self, section, conv, option):
return conv(self.get(section, option))
def getint(self, section, option):
return self._get(section, int, option)
def getfloat(self, section, option):
return self._get(section, float, option)
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def getboolean(self, section, option):
v = self.get(section, option)
if v.lower() not in self._boolean_states:
raise ValueError, 'Not a boolean: %s' % v
return self._boolean_states[v.lower()]
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section."""
if not section or section == DEFAULTSECT:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value):
"""Set an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp):
"""Write an .ini-format representation of the configuration state."""
if self._defaults:
fp.write("[%s]\n" % DEFAULTSECT)
for (key, value) in self._defaults.items():
fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
fp.write("\n")
for section in self._sections:
fp.write("[%s]\n" % section)
for (key, value) in self._sections[section].items():
if key != "__name__":
fp.write("%s = %s\n" %
(key, str(value).replace('\n', '\n\t')))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
return existed
#
# Regular expressions for parsing section headers and options.
#
SECTCRE = re.compile(
r'\[' # [
r'(?P<header>[^]]+)' # very permissive!
r'\]' # ]
)
OPTCRE = re.compile(
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
r'\s*(?P<vi>[:=])\s*' # any number of space/tab,
# followed by separator
# (either : or =), followed
# by any # space/tab
r'(?P<value>.*)$' # everything up to eol
)
def _read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname] = "%s\n%s" % (cursect[optname], value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self._defaults
else:
cursect = {'__name__': sectname}
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self.OPTCRE.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != -1 and optval[pos-1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
optname = self.optionxform(optname.rstrip())
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
class ConfigParser(RawConfigParser):
def get(self, section, option, raw=False, vars=None):
"""Get an option value for a given section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
raise NoOptionError(option, section)
if raw:
return value
else:
return self._interpolate(section, option, value, d)
def items(self, section, raw=False, vars=None):
"""Return a list of tuples with (name, value) for each option
in the section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
options = d.keys()
if "__name__" in options:
options.remove("__name__")
if raw:
return [(option, d[option])
for option in options]
else:
return [(option, self._interpolate(section, option, d[option], d))
for option in options]
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
value = rawval
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if "%(" in value:
value = self._KEYCRE.sub(self._interpolation_replace, value)
try:
value = value % vars
except KeyError, e:
raise InterpolationMissingOptionError(
option, section, rawval, e[0])
else:
break
if "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def _interpolation_replace(self, match):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % self.optionxform(s)
class SafeConfigParser(ConfigParser):
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
L = []
self._interpolate_some(option, L, rawval, section, vars, 1)
return ''.join(L)
_interpvar_match = re.compile(r"%\(([^)]+)\)s").match
def _interpolate_some(self, option, accum, rest, section, map, depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._interpvar_match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = self.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', found: %r" % (rest,))
def set(self, section, option, value):
"""Set an option. Extend ConfigParser.set: check for string values."""
if not isinstance(value, basestring):
raise TypeError("option values must be strings")
ConfigParser.set(self, section, option, value)
| apache-2.0 |
coxmediagroup/googleads-python-lib | examples/dfp/v201411/proposal_line_item_service/update_proposal_line_items.py | 4 | 2576 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates a proposal line item's notes.
To determine which proposal line items exist,
run get_all_proposal_line_items.py.
"""
__author__ = 'Nicholas Chen'
# Import appropriate modules from the client library.
from googleads import dfp
# Set id of the proposal line item to update.
PROPOSAL_LINE_ITEM_ID = 'INSERT_PROPOSAL_LINE_ITEM_ID_HERE'
def main(client, proposal_line_item_id):
# Initialize appropriate service.
proposal_line_item_service = client.GetService(
'ProposalLineItemService', version='v201411')
# Create statement to select a proposal line item.
values = [{
'key': 'id',
'value': {
'xsi_type': 'NumberValue',
'value': proposal_line_item_id
}
}]
query = 'WHERE id = :id'
statement = dfp.FilterStatement(query, values, 1)
# Get proposal line items by statement.
response = proposal_line_item_service.getProposalLineItemsByStatement(
statement.ToStatement())
if 'results' in response:
# Update each the proposal line item's notes field.
proposal_line_item = response['results'][0]
proposal_line_item['notes'] = 'Proposal line item ready for submission.'
# Update proposal line items remotely.
proposal_line_items = proposal_line_item_service.updateProposalLineItems(
[proposal_line_item])
# Display results.
if proposal_line_items:
for proposal_line_item in proposal_line_items:
print ('Line item with id \'%s\', belonging to proposal id \'%s\' and,'
' named \'%s\' was updated.' % (
proposal_line_item['id'], proposal_line_item['proposalId'],
proposal_line_item['name']))
else:
print 'No proposal line items were updated.'
else:
print 'No proposal line items found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, PROPOSAL_LINE_ITEM_ID)
| apache-2.0 |
SteveXiSong/UW-Madison-ECE757-S15-MulticastSnooping | src/mem/slicc/ast/TypeFieldStateAST.py | 48 | 2745 | # Copyright (c) 2011 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.TypeFieldAST import TypeFieldAST
from slicc.symbols import Event, State
class TypeFieldStateAST(TypeFieldAST):
def __init__(self, slicc, field_id, perm_ast, pairs_ast):
super(TypeFieldStateAST, self).__init__(slicc, pairs_ast)
self.field_id = field_id
self.perm_ast = perm_ast
if not (perm_ast.type_ast.ident == "AccessPermission"):
self.error("AccessPermission enum value must be specified")
self.pairs_ast = pairs_ast
def __repr__(self):
return "[TypeFieldState: %r]" % self.field_id
def generate(self, type):
if not str(type) == "State":
self.error("State Declaration must be of type State.")
# Add enumeration
if not type.addEnum(self.field_id, self.pairs_ast.pairs):
self.error("Duplicate enumeration: %s:%s" % (type, self.field_id))
# Fill machine info
machine = self.symtab.state_machine
if not machine:
self.error("State declaration not part of a machine.")
s = State(self.symtab, self.field_id, self.location, self.pairs)
machine.addState(s)
type.statePermPairAdd(s, self.perm_ast.value)
| bsd-3-clause |
ArcherCraftStore/ArcherVMPeridot | Python/Lib/distutils/tests/test_dir_util.py | 16 | 4273 | """Tests for distutils.dir_util."""
import unittest
import os
import stat
import shutil
import sys
from distutils.dir_util import (mkpath, remove_tree, create_tree, copy_tree,
ensure_relative)
from distutils import log
from distutils.tests import support
from test.support import run_unittest
class DirUtilTestCase(support.TempdirManager, unittest.TestCase):
def _log(self, msg, *args):
if len(args) > 0:
self._logs.append(msg % args)
else:
self._logs.append(msg)
def setUp(self):
super(DirUtilTestCase, self).setUp()
self._logs = []
tmp_dir = self.mkdtemp()
self.root_target = os.path.join(tmp_dir, 'deep')
self.target = os.path.join(self.root_target, 'here')
self.target2 = os.path.join(tmp_dir, 'deep2')
self.old_log = log.info
log.info = self._log
def tearDown(self):
log.info = self.old_log
super(DirUtilTestCase, self).tearDown()
def test_mkpath_remove_tree_verbosity(self):
mkpath(self.target, verbose=0)
wanted = []
self.assertEqual(self._logs, wanted)
remove_tree(self.root_target, verbose=0)
mkpath(self.target, verbose=1)
wanted = ['creating %s' % self.root_target,
'creating %s' % self.target]
self.assertEqual(self._logs, wanted)
self._logs = []
remove_tree(self.root_target, verbose=1)
wanted = ["removing '%s' (and everything under it)" % self.root_target]
self.assertEqual(self._logs, wanted)
@unittest.skipIf(sys.platform.startswith('win'),
"This test is only appropriate for POSIX-like systems.")
def test_mkpath_with_custom_mode(self):
# Get and set the current umask value for testing mode bits.
umask = os.umask(0o002)
os.umask(umask)
mkpath(self.target, 0o700)
self.assertEqual(
stat.S_IMODE(os.stat(self.target).st_mode), 0o700 & ~umask)
mkpath(self.target2, 0o555)
self.assertEqual(
stat.S_IMODE(os.stat(self.target2).st_mode), 0o555 & ~umask)
def test_create_tree_verbosity(self):
create_tree(self.root_target, ['one', 'two', 'three'], verbose=0)
self.assertEqual(self._logs, [])
remove_tree(self.root_target, verbose=0)
wanted = ['creating %s' % self.root_target]
create_tree(self.root_target, ['one', 'two', 'three'], verbose=1)
self.assertEqual(self._logs, wanted)
remove_tree(self.root_target, verbose=0)
def test_copy_tree_verbosity(self):
mkpath(self.target, verbose=0)
copy_tree(self.target, self.target2, verbose=0)
self.assertEqual(self._logs, [])
remove_tree(self.root_target, verbose=0)
mkpath(self.target, verbose=0)
a_file = os.path.join(self.target, 'ok.txt')
with open(a_file, 'w') as f:
f.write('some content')
wanted = ['copying %s -> %s' % (a_file, self.target2)]
copy_tree(self.target, self.target2, verbose=1)
self.assertEqual(self._logs, wanted)
remove_tree(self.root_target, verbose=0)
remove_tree(self.target2, verbose=0)
def test_copy_tree_skips_nfs_temp_files(self):
mkpath(self.target, verbose=0)
a_file = os.path.join(self.target, 'ok.txt')
nfs_file = os.path.join(self.target, '.nfs123abc')
for f in a_file, nfs_file:
with open(f, 'w') as fh:
fh.write('some content')
copy_tree(self.target, self.target2)
self.assertEqual(os.listdir(self.target2), ['ok.txt'])
remove_tree(self.root_target, verbose=0)
remove_tree(self.target2, verbose=0)
def test_ensure_relative(self):
if os.sep == '/':
self.assertEqual(ensure_relative('/home/foo'), 'home/foo')
self.assertEqual(ensure_relative('some/path'), 'some/path')
else: # \\
self.assertEqual(ensure_relative('c:\\home\\foo'), 'c:home\\foo')
self.assertEqual(ensure_relative('home\\foo'), 'home\\foo')
def test_suite():
return unittest.makeSuite(DirUtilTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| apache-2.0 |
tqtran7/horizon | openstack_dashboard/dashboards/identity/groups/forms.py | 62 | 2820 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class CreateGroupForm(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"))
description = forms.CharField(widget=forms.widgets.Textarea(
attrs={'rows': 4}),
label=_("Description"),
required=False)
def handle(self, request, data):
try:
LOG.info('Creating group with name "%s"' % data['name'])
domain_context = request.session.get('domain_context', None)
api.keystone.group_create(
request,
domain_id=domain_context,
name=data['name'],
description=data['description'])
messages.success(request,
_('Group "%s" was successfully created.')
% data['name'])
except Exception:
exceptions.handle(request, _('Unable to create group.'))
return False
return True
class UpdateGroupForm(forms.SelfHandlingForm):
group_id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(label=_("Name"))
description = forms.CharField(widget=forms.widgets.Textarea(
attrs={'rows': 4}),
label=_("Description"),
required=False)
def handle(self, request, data):
group_id = data.pop('group_id')
try:
api.keystone.group_update(request,
group_id=group_id,
name=data['name'],
description=data['description'])
messages.success(request,
_('Group has been updated successfully.'))
except Exception:
exceptions.handle(request, _('Unable to update the group.'))
return False
return True
| apache-2.0 |
yuecong/dd-agent | checks.d/lighttpd.py | 35 | 6352 | # stdlib
import re
import urlparse
# 3rd party
import requests
# project
from checks import AgentCheck
from util import headers
VERSION_REGEX = re.compile(r".*/(\d)")
class Lighttpd(AgentCheck):
"""Tracks basic connection/requests/workers metrics
See http://redmine.lighttpd.net/projects/1/wiki/Docs_ModStatus for Lighttpd details
See http://redmine.lighttpd.net/projects/lighttpd2/wiki/Mod_status for Lighttpd2 details
"""
SERVICE_CHECK_NAME = 'lighttpd.can_connect'
URL_SUFFIX_PER_VERSION = {
1: '?auto',
2: '?format=plain',
'Unknown': '?auto'
}
GAUGES = {
'IdleServers': 'lighttpd.performance.idle_server',
'BusyServers': 'lighttpd.performance.busy_servers',
'Uptime': 'lighttpd.performance.uptime',
'Total kBytes': 'lighttpd.net.bytes',
'Total Accesses': 'lighttpd.net.hits',
'memory_usage': 'lighttpd.performance.memory_usage',
'requests_avg': 'lighttpd.net.requests_avg',
'traffic_out_avg': 'lighttpd.net.bytes_out_avg',
'traffic_in_avg': 'lighttpd.net.bytes_in_avg',
'connections_avg': 'lighttpd.net.connections_avg',
'connection_state_start': 'lighttpd.connections.state_start',
'connection_state_read_header': 'lighttpd.connections.state_read_header',
'connection_state_handle_request': 'lighttpd.connections.state_handle_request',
'connection_state_write_response': 'lighttpd.connections.state_write_response',
'connection_state_keep_alive': 'lighttpd.connections.state_keep_alive',
'requests_avg_5sec': 'lighttpd.net.requests_avg_5sec',
'traffic_out_avg_5sec': 'lighttpd.net.bytes_out_avg_5sec',
'traffic_in_avg_5sec': 'lighttpd.net.bytes_in_avg_5sec',
'connections_avg_5sec': 'lighttpd.net.connections_avg_5sec',
}
COUNTERS = {
'requests_abs': 'lighttpd.net.requests_total',
'traffic_out_abs': 'lighttpd.net.bytes_out',
'traffic_in_abs': 'lighttpd.net.bytes_in',
'connections_abs': 'lighttpd.net.connections_total',
'status_1xx': 'lighttpd.response.status_1xx',
'status_2xx': 'lighttpd.response.status_2xx',
'status_3xx': 'lighttpd.response.status_3xx',
'status_4xx': 'lighttpd.response.status_4xx',
'status_5xx': 'lighttpd.response.status_5xx',
}
RATES = {
'Total kBytes': 'lighttpd.net.bytes_per_s',
'Total Accesses': 'lighttpd.net.request_per_s'
}
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.assumed_url = {}
def check(self, instance):
if 'lighttpd_status_url' not in instance:
raise Exception("Missing 'lighttpd_status_url' variable in Lighttpd config")
url = self.assumed_url.get(instance['lighttpd_status_url'], instance['lighttpd_status_url'])
tags = instance.get('tags', [])
self.log.debug("Connecting to %s" % url)
auth = None
if 'user' in instance and 'password' in instance:
auth = (instance['user'], instance['password'])
# Submit a service check for status page availability.
parsed_url = urlparse.urlparse(url)
lighttpd_url = parsed_url.hostname
lighttpd_port = parsed_url.port or 80
service_check_tags = ['host:%s' % lighttpd_url, 'port:%s' % lighttpd_port]
try:
r = requests.get(url, auth=auth, headers=headers(self.agentConfig))
r.raise_for_status()
except Exception:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags)
raise
else:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=service_check_tags)
headers_resp = r.headers
server_version = self._get_server_version(headers_resp)
response = r.content
metric_count = 0
# Loop through and extract the numerical values
for line in response.split('\n'):
values = line.split(': ')
if len(values) == 2: # match
metric, value = values
try:
value = float(value)
except ValueError:
continue
# Special case: kBytes => bytes
if metric == 'Total kBytes':
value = value * 1024
# Send metric as a gauge, if applicable
if metric in self.GAUGES:
metric_count += 1
metric_name = self.GAUGES[metric]
self.gauge(metric_name, value, tags=tags)
# Send metric as a rate, if applicable
if metric in self.RATES:
metric_count += 1
metric_name = self.RATES[metric]
self.rate(metric_name, value, tags=tags)
# Send metric as a counter, if applicable
if metric in self.COUNTERS:
metric_count += 1
metric_name = self.COUNTERS[metric]
self.increment(metric_name, value, tags=tags)
if metric_count == 0:
url_suffix = self.URL_SUFFIX_PER_VERSION[server_version]
if self.assumed_url.get(instance['lighttpd_status_url']) is None and url[-len(url_suffix):] != url_suffix:
self.assumed_url[instance['lighttpd_status_url']] = '%s%s' % (url, url_suffix)
self.warning("Assuming url was not correct. Trying to add %s suffix to the url" % url_suffix)
self.check(instance)
else:
raise Exception("No metrics were fetched for this instance. Make sure "
"that %s is the proper url." % instance['lighttpd_status_url'])
def _get_server_version(self, headers):
server_version = headers.get("server", "")
match = VERSION_REGEX.match(server_version)
if match is None:
self.log.debug("Lighttpd server version is Unknown")
return "Unknown"
version = int(match.group(1))
self.log.debug("Lighttpd server version is %s" % version)
return version
| bsd-3-clause |
aferr/TemporalPartitioningMemCtl | src/python/m5/main.py | 16 | 12839 | # Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import code
import datetime
import os
import socket
import sys
__all__ = [ 'options', 'arguments', 'main' ]
usage="%prog [gem5 options] script.py [script options]"
version="%prog 2.0"
brief_copyright=\
"gem5 is copyrighted software; use the --copyright option for details."
def parse_options():
import config
from options import OptionParser
options = OptionParser(usage=usage, version=version,
description=brief_copyright)
option = options.add_option
group = options.set_group
# Help options
option('-B', "--build-info", action="store_true", default=False,
help="Show build information")
option('-C', "--copyright", action="store_true", default=False,
help="Show full copyright information")
option('-R', "--readme", action="store_true", default=False,
help="Show the readme")
# Options for configuring the base simulator
option('-d', "--outdir", metavar="DIR", default="m5out",
help="Set the output directory to DIR [Default: %default]")
option('-r', "--redirect-stdout", action="store_true", default=False,
help="Redirect stdout (& stderr, without -e) to file")
option('-e', "--redirect-stderr", action="store_true", default=False,
help="Redirect stderr to file")
option("--stdout-file", metavar="FILE", default="simout",
help="Filename for -r redirection [Default: %default]")
option("--stderr-file", metavar="FILE", default="simerr",
help="Filename for -e redirection [Default: %default]")
option('-i', "--interactive", action="store_true", default=False,
help="Invoke the interactive interpreter after running the script")
option("--pdb", action="store_true", default=False,
help="Invoke the python debugger before running the script")
option('-p', "--path", metavar="PATH[:PATH]", action='append', split=':',
help="Prepend PATH to the system path when invoking the script")
option('-q', "--quiet", action="count", default=0,
help="Reduce verbosity")
option('-v', "--verbose", action="count", default=0,
help="Increase verbosity")
# Statistics options
group("Statistics Options")
option("--stats-file", metavar="FILE", default="stats.txt",
help="Sets the output file for statistics [Default: %default]")
# Configuration Options
group("Configuration Options")
option("--dump-config", metavar="FILE", default="config.ini",
help="Dump configuration output file [Default: %default]")
option("--json-config", metavar="FILE", default="config.json",
help="Create JSON output of the configuration [Default: %default]")
option("--dot-config", metavar="FILE", default="config.dot",
help="Create DOT & pdf outputs of the configuration [Default: %default]")
# Debugging options
group("Debugging Options")
option("--debug-break", metavar="TIME[,TIME]", action='append', split=',',
help="Cycle to create a breakpoint")
option("--debug-help", action='store_true',
help="Print help on trace flags")
option("--debug-flags", metavar="FLAG[,FLAG]", action='append', split=',',
help="Sets the flags for tracing (-FLAG disables a flag)")
option("--remote-gdb-port", type='int', default=7000,
help="Remote gdb base port (set to 0 to disable listening)")
# Tracing options
group("Trace Options")
option("--trace-start", metavar="TIME", type='int',
help="Start tracing at TIME (must be in ticks)")
option("--trace-file", metavar="FILE", default="cout",
help="Sets the output file for tracing [Default: %default]")
option("--trace-ignore", metavar="EXPR", action='append', split=':',
help="Ignore EXPR sim objects")
# Help options
group("Help Options")
option("--list-sim-objects", action='store_true', default=False,
help="List all built-in SimObjects, their params and default values")
# load the options.py config file to allow people to set their own
# default options
options_file = config.get('options.py')
if options_file:
scope = { 'options' : options }
execfile(options_file, scope)
arguments = options.parse_args()
return options,arguments
def interact(scope):
banner = "gem5 Interactive Console"
sys.argv = []
try:
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(banner=banner,user_ns=scope)
ipshell()
except ImportError:
code.InteractiveConsole(scope).interact(banner)
def main(*args):
import m5
import core
import debug
import defines
import event
import info
import stats
import trace
from util import fatal
if len(args) == 0:
options, arguments = parse_options()
elif len(args) == 2:
options, arguments = args
else:
raise TypeError, "main() takes 0 or 2 arguments (%d given)" % len(args)
m5.options = options
def check_tracing():
if defines.TRACING_ON:
return
fatal("Tracing is not enabled. Compile with TRACING_ON")
if not os.path.isdir(options.outdir):
os.makedirs(options.outdir)
# These filenames are used only if the redirect_std* options are set
stdout_file = os.path.join(options.outdir, options.stdout_file)
stderr_file = os.path.join(options.outdir, options.stderr_file)
# Print redirection notices here before doing any redirection
if options.redirect_stdout and not options.redirect_stderr:
print "Redirecting stdout and stderr to", stdout_file
else:
if options.redirect_stdout:
print "Redirecting stdout to", stdout_file
if options.redirect_stderr:
print "Redirecting stderr to", stderr_file
# Now redirect stdout/stderr as desired
if options.redirect_stdout:
redir_fd = os.open(stdout_file, os. O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.dup2(redir_fd, sys.stdout.fileno())
if not options.redirect_stderr:
os.dup2(redir_fd, sys.stderr.fileno())
if options.redirect_stderr:
redir_fd = os.open(stderr_file, os. O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.dup2(redir_fd, sys.stderr.fileno())
done = False
if options.build_info:
done = True
print 'Build information:'
print
print 'compiled %s' % defines.compileDate;
print 'build options:'
keys = defines.buildEnv.keys()
keys.sort()
for key in keys:
val = defines.buildEnv[key]
print ' %s = %s' % (key, val)
print
if options.copyright:
done = True
print info.COPYING
print
if options.readme:
done = True
print 'Readme:'
print
print info.README
print
if options.debug_help:
done = True
check_tracing()
debug.help()
if options.list_sim_objects:
import SimObject
done = True
print "SimObjects:"
objects = SimObject.allClasses.keys()
objects.sort()
for name in objects:
obj = SimObject.allClasses[name]
print " %s" % obj
params = obj._params.keys()
params.sort()
for pname in params:
param = obj._params[pname]
default = getattr(param, 'default', '')
print " %s" % pname
if default:
print " default: %s" % default
print " desc: %s" % param.desc
print
print
if done:
sys.exit(0)
# setting verbose and quiet at the same time doesn't make sense
if options.verbose > 0 and options.quiet > 0:
options.usage(2)
verbose = options.verbose - options.quiet
if options.verbose >= 0:
print "gem5 Simulator System. http://gem5.org"
print brief_copyright
print
print "gem5 compiled %s" % defines.compileDate;
print "gem5 started %s" % \
datetime.datetime.now().strftime("%b %e %Y %X")
print "gem5 executing on %s" % socket.gethostname()
print "command line:",
for argv in sys.argv:
print argv,
print
# check to make sure we can find the listed script
if not arguments or not os.path.isfile(arguments[0]):
if arguments and not os.path.isfile(arguments[0]):
print "Script %s not found" % arguments[0]
options.usage(2)
# tell C++ about output directory
core.setOutputDir(options.outdir)
# update the system path with elements from the -p option
sys.path[0:0] = options.path
# set stats options
stats.initText(options.stats_file)
# set debugging options
debug.setRemoteGDBPort(options.remote_gdb_port)
for when in options.debug_break:
debug.schedBreakCycle(int(when))
if options.debug_flags:
check_tracing()
on_flags = []
off_flags = []
for flag in options.debug_flags:
off = False
if flag.startswith('-'):
flag = flag[1:]
off = True
if flag not in debug.flags:
print >>sys.stderr, "invalid debug flag '%s'" % flag
sys.exit(1)
if off:
debug.flags[flag].disable()
else:
debug.flags[flag].enable()
if options.trace_start:
check_tracing()
e = event.create(trace.enable, event.Event.Trace_Enable_Pri)
event.mainq.schedule(e, options.trace_start)
else:
trace.enable()
trace.output(options.trace_file)
for ignore in options.trace_ignore:
check_tracing()
trace.ignore(ignore)
sys.argv = arguments
sys.path = [ os.path.dirname(sys.argv[0]) ] + sys.path
filename = sys.argv[0]
filedata = file(filename, 'r').read()
filecode = compile(filedata, filename, 'exec')
scope = { '__file__' : filename,
'__name__' : '__m5_main__' }
# we want readline if we're doing anything interactive
if options.interactive or options.pdb:
exec "import readline" in scope
# if pdb was requested, execfile the thing under pdb, otherwise,
# just do the execfile normally
if options.pdb:
import pdb
import traceback
pdb = pdb.Pdb()
try:
pdb.run(filecode, scope)
except SystemExit:
print "The program exited via sys.exit(). Exit status: ",
print sys.exc_info()[1]
except:
traceback.print_exc()
print "Uncaught exception. Entering post mortem debugging"
t = sys.exc_info()[2]
while t.tb_next is not None:
t = t.tb_next
pdb.interaction(t.tb_frame,t)
else:
exec filecode in scope
# once the script is done
if options.interactive:
interact(scope)
if __name__ == '__main__':
from pprint import pprint
options, arguments = parse_options()
print 'opts:'
pprint(options, indent=4)
print
print 'args:'
pprint(arguments, indent=4)
| bsd-3-clause |
WriterOfAlicrow/servo | tests/wpt/web-platform-tests/tools/webdriver/webdriver/searchcontext.py | 251 | 2153 | """WebDriver element location functionality."""
class SearchContext(object):
"""Abstract class that provides the core element location functionality."""
def find_element_by_css(self, selector):
"""Find the first element matching a css selector."""
return self._find_element('css selector', selector)
def find_elements_by_css(self, selector):
"""Find all elements matching a css selector."""
return self._find_elements('css selector', selector)
def find_element_by_link_text(self, text):
"""Find the first link with the given text."""
return self._find_element('link text', text)
def find_elements_by_link_text(self, text):
"""Find all links with the given text."""
return self._find_elements('link text', text)
def find_element_by_partial_link_text(self, text):
"""Find the first link containing the given text."""
return self._find_element('partial link text', text)
def find_elements_by_partial_link_text(self, text):
"""Find all links containing the given text."""
return self._find_elements('partial link text', text)
def find_element_by_xpath(self, xpath):
"""Find the first element matching the xpath."""
return self._find_element('xpath', xpath)
def find_elements_by_xpath(self, xpath):
"""Find all elements matching the xpath."""
return self._find_elements('xpath', xpath)
def _find_element(self, strategy, value):
return self.execute('POST',
'/element',
'findElement',
self._get_locator(strategy, value))
def _find_elements(self, strategy, value):
return self.execute('POST',
'/elements',
'findElements',
self._get_locator(strategy, value))
def _get_locator(self, strategy, value):
if self.mode == 'strict':
return {'strategy': strategy, 'value': value}
elif self.mode == 'compatibility':
return {'using': strategy, 'value': value}
| mpl-2.0 |
cuongnv23/ansible | lib/ansible/modules/network/cloudengine/ce_vxlan_global.py | 27 | 19113 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_vxlan_global
version_added: "2.4"
short_description: Manages global attributes of VXLAN and bridge domain on HUAWEI CloudEngine devices.
description:
- Manages global attributes of VXLAN and bridge domain on HUAWEI CloudEngine devices.
author: QijunPan (@CloudEngine-Ansible)
options:
bridge_domain_id:
description:
- Specifies a bridge domain ID.
The value is an integer ranging from 1 to 16777215.
required: false
default: null
tunnel_mode_vxlan:
description:
- Set the tunnel mode to VXLAN when configuring the VXLAN feature.
required: false
choices: ['enable', 'disable']
default: null
nvo3_prevent_loops:
description:
- Loop prevention of VXLAN traffic in non-enhanced mode.
When the device works in non-enhanced mode,
inter-card forwarding of VXLAN traffic may result in loops.
required: false
choices: ['enable', 'disable']
default: null
nvo3_acl_extend:
description:
- Enabling or disabling the VXLAN ACL extension function.
required: false
choices: ['enable', 'disable']
default: null
nvo3_gw_enhanced:
description:
- Configuring the Layer 3 VXLAN Gateway to Work in Non-loopback Mode.
required: false
choices: ['l2', 'l3']
default: null
nvo3_service_extend:
description:
- Enabling or disabling the VXLAN service extension function.
required: false
choices: ['enable', 'disable']
default: null
nvo3_eth_trunk_hash:
description:
- Eth-Trunk from load balancing VXLAN packets in optimized mode.
required: false
choices: ['enable','disable']
default: null
nvo3_ecmp_hash:
description:
- Load balancing of VXLAN packets through ECMP in optimized mode.
required: false
choices: ['enable', 'disable']
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = '''
- name: vxlan global module test
hosts: ce128
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Create bridge domain and set tunnel mode to VXLAN
ce_vxlan_global:
bridge_domain_id: 100
nvo3_acl_extend: enable
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"bridge_domain_id": "100", "nvo3_acl_extend": "enable", state="present"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"bridge_domain": {"80", "90"}, "nvo3_acl_extend": "disable"}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"bridge_domain_id": {"80", "90", "100"}, "nvo3_acl_extend": "enable"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["bridge-domain 100",
"ip tunnel mode vxlan"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_config, load_config, get_nc_config
from ansible.module_utils.ce import ce_argument_spec
CE_NC_GET_BRIDGE_DOMAIN = """
<filter type="subtree">
<evc xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bds>
<bd>
<bdId></bdId>
</bd>
</bds>
</evc>
</filter>
"""
def is_config_exist(cmp_cfg, test_cfg):
"""is configuration exist?"""
if not cmp_cfg or not test_cfg:
return False
return bool(test_cfg in cmp_cfg)
def get_nvo3_gw_enhanced(cmp_cfg):
"""get the Layer 3 VXLAN Gateway to Work in Non-loopback Mode """
get = re.findall(
r"assign forward nvo3-gateway enhanced (l[2|3])", cmp_cfg)
if not get:
return None
else:
return get[0]
class VxlanGlobal(object):
"""
Manages global attributes of VXLAN and bridge domain.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.tunnel_mode_vxlan = self.module.params['tunnel_mode_vxlan']
self.nvo3_prevent_loops = self.module.params['nvo3_prevent_loops']
self.nvo3_acl_extend = self.module.params['nvo3_acl_extend']
self.nvo3_gw_enhanced = self.module.params['nvo3_gw_enhanced']
self.nvo3_service_extend = self.module.params['nvo3_service_extend']
self.nvo3_eth_trunk_hash = self.module.params['nvo3_eth_trunk_hash']
self.nvo3_ecmp_hash = self.module.params['nvo3_ecmp_hash']
self.bridge_domain_id = self.module.params['bridge_domain_id']
self.state = self.module.params['state']
# state
self.config = "" # current config
self.bd_info = list()
self.changed = False
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init module"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def get_current_config(self):
"""get current configuration"""
flags = list()
exp = " include-default | include vxlan|assign | exclude undo"
flags.append(exp)
return get_config(self.module, flags)
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd) # set to device
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd) # show updates result
def get_bd_list(self):
"""get bridge domain list"""
bd_info = list()
conf_str = CE_NC_GET_BRIDGE_DOMAIN
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return bd_info
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get bridge domain info
root = ElementTree.fromstring(xml_str)
bds = root.findall("data/evc/bds/bd/bdId")
if not bds:
return bd_info
for bridge_domain in bds:
if bridge_domain.tag == "bdId":
bd_info.append(bridge_domain.text)
return bd_info
def config_bridge_domain(self):
"""manage bridge domain"""
if not self.bridge_domain_id:
return
cmd = "bridge-domain %s" % self.bridge_domain_id
exist = self.bridge_domain_id in self.bd_info
if self.state == "present":
if not exist:
self.cli_add_command(cmd)
self.cli_add_command("quit")
else:
if exist:
self.cli_add_command(cmd, undo=True)
def config_tunnel_mode(self):
"""config tunnel mode vxlan"""
# ip tunnel mode vxlan
if self.tunnel_mode_vxlan:
cmd = "ip tunnel mode vxlan"
exist = is_config_exist(self.config, cmd)
if self.tunnel_mode_vxlan == "enable":
if not exist:
self.cli_add_command(cmd)
else:
if exist:
self.cli_add_command(cmd, undo=True)
def config_assign_forward(self):
"""config assign forward command"""
# [undo] assign forward nvo3-gateway enhanced {l2|l3)
if self.nvo3_gw_enhanced:
cmd = "assign forward nvo3-gateway enhanced %s" % self.nvo3_gw_enhanced
exist = is_config_exist(self.config, cmd)
if self.state == "present":
if not exist:
self.cli_add_command(cmd)
else:
if exist:
self.cli_add_command(cmd, undo=True)
# [undo] assign forward nvo3 f-linecard compatibility enable
if self.nvo3_prevent_loops:
cmd = "assign forward nvo3 f-linecard compatibility enable"
exist = is_config_exist(self.config, cmd)
if self.nvo3_prevent_loops == "enable":
if not exist:
self.cli_add_command(cmd)
else:
if exist:
self.cli_add_command(cmd, undo=True)
# [undo] assign forward nvo3 acl extend enable
if self.nvo3_acl_extend:
cmd = "assign forward nvo3 acl extend enable"
exist = is_config_exist(self.config, cmd)
if self.nvo3_acl_extend == "enable":
if not exist:
self.cli_add_command(cmd)
else:
if exist:
self.cli_add_command(cmd, undo=True)
# [undo] assign forward nvo3 service extend enable
if self.nvo3_service_extend:
cmd = "assign forward nvo3 service extend enable"
exist = is_config_exist(self.config, cmd)
if self.nvo3_service_extend == "enable":
if not exist:
self.cli_add_command(cmd)
else:
if exist:
self.cli_add_command(cmd, undo=True)
# assign forward nvo3 eth-trunk hash {enable|disable}
if self.nvo3_eth_trunk_hash:
cmd = "assign forward nvo3 eth-trunk hash enable"
exist = is_config_exist(self.config, cmd)
if self.nvo3_eth_trunk_hash == "enable":
if not exist:
self.cli_add_command(cmd)
else:
if exist:
self.cli_add_command(cmd, undo=True)
# [undo] assign forward nvo3 ecmp hash enable
if self.nvo3_ecmp_hash:
cmd = "assign forward nvo3 ecmp hash enable"
exist = is_config_exist(self.config, cmd)
if self.nvo3_ecmp_hash == "enable":
if not exist:
self.cli_add_command(cmd)
else:
if exist:
self.cli_add_command(cmd, undo=True)
def check_params(self):
"""Check all input params"""
# bridge domain id check
if self.bridge_domain_id:
if not self.bridge_domain_id.isdigit():
self.module.fail_json(
msg="Error: bridge domain id is not digit.")
if int(self.bridge_domain_id) < 1 or int(self.bridge_domain_id) > 16777215:
self.module.fail_json(
msg="Error: bridge domain id is not in the range from 1 to 16777215.")
def get_proposed(self):
"""get proposed info"""
if self.tunnel_mode_vxlan:
self.proposed["tunnel_mode_vxlan"] = self.tunnel_mode_vxlan
if self.nvo3_prevent_loops:
self.proposed["nvo3_prevent_loops"] = self.nvo3_prevent_loops
if self.nvo3_acl_extend:
self.proposed["nvo3_acl_extend"] = self.nvo3_acl_extend
if self.nvo3_gw_enhanced:
self.proposed["nvo3_gw_enhanced"] = self.nvo3_gw_enhanced
if self.nvo3_service_extend:
self.proposed["nvo3_service_extend"] = self.nvo3_service_extend
if self.nvo3_eth_trunk_hash:
self.proposed["nvo3_eth_trunk_hash"] = self.nvo3_eth_trunk_hash
if self.nvo3_ecmp_hash:
self.proposed["nvo3_ecmp_hash"] = self.nvo3_ecmp_hash
if self.bridge_domain_id:
self.proposed["bridge_domain_id"] = self.bridge_domain_id
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
self.existing["bridge_domain"] = self.bd_info
cmd = "ip tunnel mode vxlan"
exist = is_config_exist(self.config, cmd)
if exist:
self.existing["tunnel_mode_vxlan"] = "enable"
else:
self.existing["tunnel_mode_vxlan"] = "disable"
cmd = "assign forward nvo3 f-linecard compatibility enable"
exist = is_config_exist(self.config, cmd)
if exist:
self.existing["nvo3_prevent_loops"] = "enable"
else:
self.existing["nvo3_prevent_loops"] = "disable"
cmd = "assign forward nvo3 acl extend enable"
exist = is_config_exist(self.config, cmd)
if exist:
self.existing["nvo3_acl_extend"] = "enable"
else:
self.existing["nvo3_acl_extend"] = "disable"
self.existing["nvo3_gw_enhanced"] = get_nvo3_gw_enhanced(
self.config)
cmd = "assign forward nvo3 service extend enable"
exist = is_config_exist(self.config, cmd)
if exist:
self.existing["nvo3_service_extend"] = "enable"
else:
self.existing["nvo3_service_extend"] = "disable"
cmd = "assign forward nvo3 eth-trunk hash enable"
exist = is_config_exist(self.config, cmd)
if exist:
self.existing["nvo3_eth_trunk_hash"] = "enable"
else:
self.existing["nvo3_eth_trunk_hash"] = "disable"
cmd = "assign forward nvo3 ecmp hash enable"
exist = is_config_exist(self.config, cmd)
if exist:
self.existing["nvo3_ecmp_hash"] = "disable"
else:
self.existing["nvo3_ecmp_hash"] = "disable"
def get_end_state(self):
"""get end state info"""
config = self.get_current_config()
self.end_state["bridge_domain"] = self.get_bd_list()
cmd = "ip tunnel mode vxlan"
exist = is_config_exist(config, cmd)
if exist:
self.end_state["tunnel_mode_vxlan"] = "enable"
else:
self.end_state["tunnel_mode_vxlan"] = "disable"
cmd = "assign forward nvo3 f-linecard compatibility enable"
exist = is_config_exist(config, cmd)
if exist:
self.end_state["nvo3_prevent_loops"] = "enable"
else:
self.end_state["nvo3_prevent_loops"] = "disable"
cmd = "assign forward nvo3 acl extend enable"
exist = is_config_exist(config, cmd)
if exist:
self.end_state["nvo3_acl_extend"] = "enable"
else:
self.end_state["nvo3_acl_extend"] = "disable"
self.end_state["nvo3_gw_enhanced"] = get_nvo3_gw_enhanced(config)
cmd = "assign forward nvo3 service extend enable"
exist = is_config_exist(config, cmd)
if exist:
self.end_state["nvo3_service_extend"] = "enable"
else:
self.end_state["nvo3_service_extend"] = "disable"
cmd = "assign forward nvo3 eth-trunk hash enable"
exist = is_config_exist(config, cmd)
if exist:
self.end_state["nvo3_eth_trunk_hash"] = "enable"
else:
self.end_state["nvo3_eth_trunk_hash"] = "disable"
cmd = "assign forward nvo3 ecmp hash enable"
exist = is_config_exist(config, cmd)
if exist:
self.end_state["nvo3_ecmp_hash"] = "enable"
else:
self.end_state["nvo3_ecmp_hash"] = "disable"
def work(self):
"""worker"""
self.check_params()
self.config = self.get_current_config()
self.bd_info = self.get_bd_list()
self.get_existing()
self.get_proposed()
# deal present or absent
self.config_bridge_domain()
self.config_tunnel_mode()
self.config_assign_forward()
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
tunnel_mode_vxlan=dict(required=False, type='str',
choices=['enable', 'disable']),
nvo3_prevent_loops=dict(required=False, type='str',
choices=['enable', 'disable']),
nvo3_acl_extend=dict(required=False, type='str',
choices=['enable', 'disable']),
nvo3_gw_enhanced=dict(required=False, type='str',
choices=['l2', 'l3']),
nvo3_service_extend=dict(required=False, type='str',
choices=['enable', 'disable']),
nvo3_eth_trunk_hash=dict(required=False, type='str',
choices=['enable', 'disable']),
nvo3_ecmp_hash=dict(required=False, type='str',
choices=['enable', 'disable']),
bridge_domain_id=dict(required=False, type='str'),
state=dict(required=False, default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = VxlanGlobal(argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.