repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
zouyapeng/horizon_change | openstack_dashboard/dashboards/monitor/network_monitor/tags.py | 2 | 1890 | from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from horizon import messages
from openstack_dashboard.dashboards.monitor import monitor
from openstack_dashboard.dashboards.monitor.network_monitor import tables
class AddrBaseTab(tabs.TableTab):
name = _("")
slug = ""
table_classes = (tables.EquipmentListTable,)
template_name = ("horizon/common/_detail_table.html")
preload = False
def get_equipment_list_data(self):
try:
if self.request.method == "POST":
# print self.request.POST
result = monitor.add_blacklist(request = self.request)
if 0 == result:
messages.success(self.request, _("Add Blacklist ok"))
elif 1 == result:
messages.info(self.request, _("Alread in list"))
else:
messages.info(self.request, _("Error"))
equipments = monitor.network_monitor_equipment_list(request = self.request,
marker = None,
paginate = False,
addr = self.slug)
return equipments
except Exception:
self._has_more = False
error_message = _('Unable to get instances')
exceptions.handle(self.request, error_message)
return []
class ShangHaiTab(AddrBaseTab):
name = _("ShangHai")
slug = "shanghai"
class BeijingTab(AddrBaseTab):
name = _("BeiJing")
slug = "beijing"
class GuangZhouTab(AddrBaseTab):
name = _("GuangZhou")
slug = "guangzhou"
class NetworkMonitorTabs(tabs.TabGroup):
slug = "network_monitor_tabs"
tabs = (ShangHaiTab, BeijingTab, GuangZhouTab, )
sticky = True
| apache-2.0 |
tobegit3hub/cinder_docker | cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py | 4 | 18117 | # (c) Copyright 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Script to push the zone configuration to Cisco SAN switches.
"""
import random
import re
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import ssh_utils
from cinder import utils
import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant
LOG = logging.getLogger(__name__)
class CiscoFCZoneClientCLI(object):
"""Cisco FC zone client cli implementation.
OpenStack Fibre Channel zone client cli connector
to manage FC zoning in Cisco SAN fabrics.
Version history:
1.0 - Initial Cisco FC zone client cli
"""
switch_ip = None
switch_port = '22'
switch_user = 'admin'
switch_pwd = 'none'
def __init__(self, ipaddress, username, password, port, vsan):
"""initializing the client."""
self.switch_ip = ipaddress
self.switch_port = port
self.switch_user = username
self.switch_pwd = password
self.fabric_vsan = vsan
self.sshpool = None
def get_active_zone_set(self):
"""Return the active zone configuration.
Return active zoneset from fabric. When none of the configurations
are active then it will return empty map.
:returns: Map -- active zone set map in the following format
{
'zones':
{'openstack50060b0000c26604201900051ee8e329':
['50060b0000c26604', '201900051ee8e329']
},
'active_zone_config': 'OpenStack_Cfg'
}
"""
zone_set = {}
zone = {}
zone_member = None
zone_name = None
switch_data = None
zone_set_name = None
try:
switch_data = self._get_switch_info(
[ZoneConstant.GET_ACTIVE_ZONE_CFG, self.fabric_vsan,
' | no-more'])
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed getting active zone set "
"from fabric %s"), self.switch_ip)
try:
for line in switch_data:
# Split on non-word characters,
line_split = re.split('[\s\[\]]+', line)
if ZoneConstant.CFG_ZONESET in line_split:
# zoneset name [name] vsan [vsan]
zone_set_name = \
line_split[line_split.index(ZoneConstant.CFG_ZONESET)
+ 2]
continue
if ZoneConstant.CFG_ZONE in line_split:
# zone name [name] vsan [vsan]
zone_name = \
line_split[line_split.index(ZoneConstant.CFG_ZONE) + 2]
zone[zone_name] = list()
continue
if ZoneConstant.CFG_ZONE_MEMBER in line_split:
# Examples:
# pwwn c0:50:76:05:15:9f:00:12
# * fcid 0x1e01c0 [pwwn 50:05:07:68:02:20:48:04] [V7K_N1P2]
zone_member = \
line_split[
line_split.index(ZoneConstant.CFG_ZONE_MEMBER) + 1]
zone_member_list = zone.get(zone_name)
zone_member_list.append(zone_member)
zone_set[ZoneConstant.CFG_ZONES] = zone
zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] = zone_set_name
except Exception as ex:
# In case of parsing error here, it should be malformed cli output.
msg = _("Malformed zone configuration: (switch=%(switch)s "
"zone_config=%(zone_config)s)."
) % {'switch': self.switch_ip,
'zone_config': switch_data}
LOG.error(msg)
exc_msg = _("Exception: %s") % six.text_type(ex)
LOG.error(exc_msg)
raise exception.FCZoneDriverException(reason=msg)
return zone_set
def add_zones(self, zones, activate, fabric_vsan, active_zone_set,
zone_status):
"""Add zone configuration.
This method will add the zone configuration passed by user.
input params:
zones - zone names mapped to members and VSANs.
zone members are colon separated but case-insensitive
{ zonename1:[zonememeber1,zonemember2,...],
zonename2:[zonemember1, zonemember2,...]...}
e.g: {'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']
}
activate - True/False
"""
LOG.debug("Add Zones - Zones passed: %s", zones)
LOG.debug("Active zone set: %s", active_zone_set)
zone_list = active_zone_set[ZoneConstant.CFG_ZONES]
LOG.debug("zone list: %s", zone_list)
LOG.debug("zone status: %s", zone_status)
cfg_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG]
zone_cmds = [['conf'],
['zoneset', 'name', cfg_name, 'vsan', fabric_vsan]]
for zone in zones.keys():
# if zone exists, its an update. Delete & insert
LOG.debug("Update call")
if zone in zone_list:
# Response from get_active_zone_set strips colons from WWPNs
current_zone = set(zone_list[zone])
new_wwpns = map(lambda x: x.lower().replace(':', ''),
zones[zone])
new_zone = set(new_wwpns)
if current_zone != new_zone:
try:
self.delete_zones(zone, activate, fabric_vsan,
active_zone_set, zone_status)
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Deleting zone failed %s"), zone)
LOG.debug("Deleted Zone before insert : %s", zone)
zone_cmds.append(['zone', 'name', zone])
for member in zones[zone]:
zone_cmds.append(['member', 'pwwn', member])
zone_cmds.append(['end'])
try:
LOG.debug("Add zones: Config cmd to run: %s", zone_cmds)
self._ssh_execute(zone_cmds, True, 1)
if activate:
self.activate_zoneset(cfg_name, fabric_vsan, zone_status)
self._cfg_save()
except Exception as e:
msg = _("Creating and activating zone set failed: "
"(Zone set=%(zoneset)s error=%(err)s)."
) % {'zoneset': cfg_name, 'err': six.text_type(e)}
LOG.error(msg)
raise exception.CiscoZoningCliException(reason=msg)
def activate_zoneset(self, cfgname, fabric_vsan, zone_status):
"""Method to Activate the zone config. Param cfgname - ZonesetName."""
LOG.debug("zone status: %s", zone_status)
cmd_list = [['conf'],
['zoneset', 'activate', 'name', cfgname, 'vsan',
self.fabric_vsan]]
if zone_status['mode'] == 'enhanced':
cmd_list.append(['zone', 'commit', 'vsan', fabric_vsan])
cmd_list.append(['end'])
return self._ssh_execute(cmd_list, True, 1)
def get_zoning_status(self):
"""Return the zoning mode and session for a zoneset."""
zone_status = {}
try:
switch_data = self._get_switch_info(
[ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan])
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed getting zone status "
"from fabric %s"), self.switch_ip)
try:
for line in switch_data:
# Split on non-word characters,
line_split = re.split('[\s\[\]]+', line)
if 'mode:' in line_split:
# mode: <enhanced|basic>
zone_status['mode'] = line_split[line_split.index('mode:')
+ 1]
continue
if 'session:' in line_split:
# session: <none|a value other than none>
zone_status['session'] = \
line_split[line_split.index('session:') + 1]
continue
except Exception as ex:
# In case of parsing error here, it should be malformed cli output.
msg = _("Malformed zone status: (switch=%(switch)s "
"zone_config=%(zone_config)s)."
) % {'switch': self.switch_ip,
'zone_status': switch_data}
LOG.error(msg)
exc_msg = _("Exception: %s") % six.text_type(ex)
LOG.error(exc_msg)
raise exception.FCZoneDriverException(reason=msg)
return zone_status
def delete_zones(self, zone_names, activate, fabric_vsan, active_zone_set,
zone_status):
"""Delete zones from fabric.
Method to delete the active zone config zones
params zone_names: zoneNames separated by semicolon
params activate: True/False
"""
LOG.debug("zone_names %s", zone_names)
active_zoneset_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG]
cmds = [['conf'],
['zoneset', 'name', active_zoneset_name, 'vsan',
fabric_vsan]]
try:
for zone in set(zone_names.split(';')):
cmds.append(['no', 'zone', 'name', zone])
cmds.append(['end'])
LOG.debug("Delete zones: Config cmd to run: %s", cmds)
self._ssh_execute(cmds, True, 1)
if activate:
self.activate_zoneset(active_zoneset_name, fabric_vsan,
zone_status)
self._cfg_save()
except Exception as e:
msg = _("Deleting zones failed: (command=%(cmd)s error=%(err)s)."
) % {'cmd': cmds, 'err': six.text_type(e)}
LOG.error(msg)
raise exception.CiscoZoningCliException(reason=msg)
def get_nameserver_info(self):
"""Get name server data from fabric.
This method will return the connected node port wwn list(local
and remote) for the given switch fabric
show fcns database
"""
cli_output = None
return_list = []
try:
cli_output = self._get_switch_info([ZoneConstant.FCNS_SHOW,
self.fabric_vsan])
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed collecting fcns database "
"info for fabric %s"), self.switch_ip)
if (cli_output):
return_list = self._parse_ns_output(cli_output)
LOG.info(_LI("Connector returning fcnsinfo-%s"), return_list)
return return_list
@utils.retry(processutils.ProcessExecutionError, retries=5)
def _cfg_save(self):
cmd = ['copy', 'running-config', 'startup-config']
self._run_ssh(cmd, True)
def _get_switch_info(self, cmd_list):
stdout, stderr, sw_data = None, None, None
try:
stdout, stderr = self._run_ssh(cmd_list, True)
LOG.debug("CLI output from ssh - output: %s", stdout)
if (stdout):
sw_data = stdout.splitlines()
return sw_data
except processutils.ProcessExecutionError as e:
msg = _("Error while getting data via ssh: (command=%(cmd)s "
"error=%(err)s).") % {'cmd': cmd_list,
'err': six.text_type(e)}
LOG.error(msg)
raise exception.CiscoZoningCliException(reason=msg)
def _parse_ns_output(self, switch_data):
"""Parses name server data.
Parses nameserver raw data and adds the device port wwns to the list
:returns: List -- list of device port wwn from ns info
"""
return_list = []
for line in switch_data:
if not(" N " in line):
continue
linesplit = line.split()
if len(linesplit) > 2:
node_port_wwn = linesplit[2]
return_list.append(node_port_wwn)
else:
msg = _("Malformed show fcns database string: %s") % line
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
return return_list
def _run_ssh(self, cmd_list, check_exit_code=True):
command = ' '.join(cmd_list)
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
min_size=1,
max_size=5)
try:
with self.sshpool.item() as ssh:
return processutils.ssh_execute(
ssh,
command,
check_exit_code=check_exit_code)
except Exception:
with excutils.save_and_reraise_exception():
LOG.warning(_LW("Error running SSH command: %s"), command)
def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1):
"""Execute cli with status update.
Executes CLI commands where status return is expected.
cmd_list is a list of commands, where each command is itself
a list of parameters. We use utils.check_ssh_injection to check each
command, but then join then with " ; " to form a single command.
"""
# Check that each command is secure
for cmd in cmd_list:
utils.check_ssh_injection(cmd)
# Combine into a single command.
command = ' ; '.join(map(lambda x: ' '.join(x), cmd_list))
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
min_size=1,
max_size=5)
stdin, stdout, stderr = None, None, None
LOG.debug("Executing command via ssh: %s", command)
last_exception = None
try:
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
stdin, stdout, stderr = ssh.exec_command(command)
channel = stdout.channel
exit_status = channel.recv_exit_status()
LOG.debug("Exit Status from ssh: %s", exit_status)
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s', exit_status)
if check_exit_code and exit_status != 0:
raise processutils.ProcessExecutionError(
exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=command)
else:
return True
else:
return True
except Exception as e:
LOG.exception(_LE('Error executing SSH command.'))
last_exception = e
greenthread.sleep(random.randint(20, 500) / 100.0)
LOG.debug("Handling error case after SSH: %s", last_exception)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Error executing command via ssh."))
finally:
if stdin:
stdin.flush()
stdin.close()
if stdout:
stdout.close()
if stderr:
stderr.close()
def cleanup(self):
self.sshpool = None
| apache-2.0 |
corvorepack/REPOULTRA | plugin.video.tv.ultra.7k/resources/tools/unwise.py | 3 | 4297 | # -*- coding: utf-8 -*-
#--------------------------------------------------------
# TV Ultra 7K
# (http://forum.rojadirecta.es/
# (http://xbmcspain.com/foro/
# Version 0.0.1 (26.10.2014)
#--------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
import re
def unwise1(w):
int1 = 0
result = ""
while int1 < len(w):
result = result + chr(int(w[int1:int1 + 2], 36))
int1 += 2
return result
def logblock(s):
if len(s)>12:
return "("+str(len(s))+") "+s[0:5]+"..."+s[-5:]
else:
return "("+str(len(s))+") "+s
def unwise(w, i, s, e, wi, ii, si, ei):
#print "w="+logblock(w);print "i="+logblock(i);print "s="+logblock(s);print "e="+logblock(e);print "wi="+str(wi);print "ii="+str(ii);
#print "si="+str(si);print "ei="+str(ei);
int1 = 0
int2 = 0
int3 = 0
int4 = 0
string1 = ""
string2 = ""
while True:
if w != "":
if int1 < wi:
string2 = string2 + w[int1:int1+1]
elif int1 < len(w):
string1 = string1 + w[int1:int1+1]
int1 += 1
if i != "":
if int2 < ii:
string2 = string2 + i[int2:int2+1]
elif int2 < len(i):
string1 = string1 + i[int2:int2+1]
int2 += 1
if s != "":
if int3 < si:
string2 = string2 + s[int3:int3+1]
elif int3 < len(s):
string1 = string1 + s[int3:int3+1]
int3 = int3 + 1
if e != "":
if int4 < ei:
string2 = string2 + e[int4:int4+1]
elif int4 < len(e):
string1 = string1 + e[int4:int4+1]
int4 = int4 + 1
if len(w) + len(i) + len(s) + len(e) == len(string1) + len(string2):
break
#print "string1="+logblock(string1)
#print "string2="+logblock(string2)
int1 = 0
int2 = 0
result = ""
contador = 0
while int1 < len(string1):
flag = -1
if ord(string2[int2:int2+1]) % 2:
flag = 1
anadir = chr(int(string1[int1:int1+2], 36) - flag)
#print "contador=",contador,"flag=",flag,"anadir=",anadir
result = result + anadir
int2 += 1
if int2 >= len(string2):
int2 = 0
int1 += 2
contador = contador + 1
#print "Fin de bloque, result="+result
return result
def unwise_process(result):
while True:
a = re.compile(r';?eval\s*\(\s*function\s*\(\s*w\s*,\s*i\s*,\s*s\s*,\s*e\s*\).+?[\"\']\s*\)\s*\)(?:\s*;)?').search(result)
if not a:
break
a = a.group()
tmp = re.compile(r'\}\s*\(\s*[\"\'](\w*)[\"\']\s*,\s*[\"\'](\w*)[\"\']\s*,\s*[\"\'](\w*)[\"\']\s*,\s*[\"\'](\w*)[\"\']').search(a)
if not tmp:
#print "UNWISE ERROR --- " + a
result = result.replace(a, "")
else:
wise = ["", "", "", ""]
wise = tmp.groups()
if a.find("while") == -1:
result = result.replace(a, unwise1(wise[0]))
else:
c = 0
wisestr = ["", "", "", ""]
wiseint = [0, 0, 0, 0]
b = re.compile(r'while(.+?)var\s*\w+\s*=\s*\w+\.join\(\s*[\"\'][\"\']\s*\)').search(a).group(1)
for d in re.compile(r'if\s*\(\s*\w*\s*\<\s*(\d+)\)\s*\w+\.push').findall(b):
wisestr[c] = wise[c]
wiseint[c] = int(d)
c += 1
result = result.replace(a, unwise(wisestr[0], wisestr[1], wisestr[2], wisestr[3], wiseint[0], wiseint[1], wiseint[2], wiseint[3]))
return result
def resolve_var(HTML, key): #this should probably be located elsewhere
key = re.escape(key)
tmp1 = HTML.replace("\r", "")
tmp1 = tmp1.replace("\n", ";")
tmp2 = re.compile(r'[^\w\.]' + key + '\s*=\s*([^\"\']*?)[;,]').search(tmp1) #expect var first, movshare
if tmp2:
tmp2 = resolve_var(HTML, tmp2.group(1))
else:
tmp2 = re.compile(r'[^\w\.]' + key + '\s*=\s*[\"\'](.*?)[\"\']').search(tmp1)
if tmp2:
tmp2 = tmp2.group(1)
else:
tmp2 = "" #oops, should not happen in the variable is valid
return tmp2 | gpl-2.0 |
wemanuel/smry | server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/target_vpn_gateways/create.py | 4 | 2691 | # Copyright 2014 Google Inc. All Rights Reserved.
"""Command for creating target VPN Gateways."""
from googlecloudsdk.compute.lib import base_classes
from googlecloudsdk.compute.lib import utils
class Create(base_classes.BaseAsyncCreator):
"""Create a VPN Gateway."""
# Placeholder to indicate that a detailed_help field exists and should
# be set outside the class definition.
detailed_help = None
@staticmethod
def Args(parser):
"""Adds arguments to the supplied parser."""
parser.add_argument(
'--description',
help='An optional, textual description for the target VPN Gateway.')
network = parser.add_argument(
'--network',
required=True,
help='A reference to a network in this project')
network.detailed_help = """\
A reference to a network in this project to
contain the VPN Gateway.
"""
utils.AddRegionFlag(
parser,
resource_type='Target VPN Gatway',
operation_type='create')
parser.add_argument(
'name',
help='The name of the target VPN Gateway.')
@property
def service(self):
return self.compute.targetVpnGateways
@property
def method(self):
return 'Insert'
@property
def resource_type(self):
return 'targetVpnGateways'
def CreateRequests(self, args):
"""Builds API requests to construct Target VPN Gateways.
Args:
args: argparse.Namespace, The arguments received by this command.
Returns:
[protorpc.messages.Message], A list of requests to be executed
by the compute API.
"""
target_vpn_gateway_ref = self.CreateGlobalReference(
args.name, resource_type='targetVpnGateways')
network_ref = self.CreateGlobalReference(
args.network, resource_type='networks')
region_ref = self.CreateGlobalReference(
args.region, resource_type='regions')
request = self.messages.ComputeTargetVpnGatewaysInsertRequest(
project=self.project,
region=region_ref.Name(),
targetVpnGateway=self.messages.TargetVpnGateway(
description=args.description,
name=target_vpn_gateway_ref.Name(),
network=network_ref.SelfLink()
))
return [request]
Create.detailed_help = {
'brief': 'Create a target VPN Gateway',
'DESCRIPTION': """
*{command}* is used to create a target VPN Gateway. A target VPN
Gateway can reference one or more VPN tunnels that connect it to
external VPN gateways. A VPN Gateway may also be referenced by
one or more forwarding rules that define which packets the
gateway is responsible for routing.
"""
}
| apache-2.0 |
pombredanne/bokeh | bokeh/models/tests/test_sources.py | 6 | 4842 | from __future__ import absolute_import
import unittest
from unittest import skipIf
import warnings
import numpy as np
try:
import pandas as pd
is_pandas = True
except ImportError as e:
is_pandas = False
from bokeh.models.sources import DataSource, ColumnDataSource
class TestColumnDataSource(unittest.TestCase):
def test_basic(self):
ds = ColumnDataSource()
self.assertTrue(isinstance(ds, DataSource))
def test_init_dict_arg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
def test_init_dict_data_kwarg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data=data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_arg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertEquals(list(df[key]), data[key])
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_data_kwarg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(data=df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertEquals(list(df[key]), data[key])
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
def test_add_with_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], name="foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6], name="bar")
self.assertEquals(name, "bar")
def test_add_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3])
self.assertEquals(name, "Series 0")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_add_with_and_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_remove_exists(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
assert name
ds.remove("foo")
self.assertEquals(ds.column_names, [])
def test_remove_exists2(self):
with warnings.catch_warnings(record=True) as w:
ds = ColumnDataSource()
ds.remove("foo")
self.assertEquals(ds.column_names, [])
self.assertEquals(len(w), 1)
self.assertEquals(w[0].category, UserWarning)
self.assertEquals(str(w[0].message), "Unable to find column 'foo' in data source")
def test_stream_bad_data(self):
ds = ColumnDataSource(data=dict(a=[10], b=[20]))
with self.assertRaises(ValueError) as cm:
ds.stream(dict())
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (missing: a, b)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10]))
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (missing: b)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], b=[10], x=[10]))
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (extra: x)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], x=[10]))
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (missing: b, extra: x)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], b=[10, 20]))
self.assertEqual(str(cm.exception), "All streaming column updates must be the same length")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], b=np.ones((1,1))))
self.assertEqual(str(cm.exception), "stream(...) only supports 1d sequences, got ndarray with size (1, 1)")
def test_stream_good_data(self):
ds = ColumnDataSource(data=dict(a=[10], b=[20]))
ds._document = "doc"
stuff = {}
def mock(*args, **kw):
stuff['args'] = args
stuff['kw'] = kw
ds.data._stream = mock
ds.stream(dict(a=[11, 12], b=[21, 22]), "foo")
self.assertEqual(stuff['args'], ("doc", ds, dict(a=[11, 12], b=[21, 22]), "foo"))
self.assertEqual(stuff['kw'], {})
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
klonage/nlt-gcs | packages/IronPython.StdLib.2.7.4/content/Lib/sqlite3/__init__.py | 42 | 1199 | #-*- coding: ISO-8859-1 -*-
# pysqlite2/__init__.py: the pysqlite2 package.
#
# Copyright (C) 2005 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
def _():
import sys
if sys.platform == 'cli':
import clr
clr.AddReference('IronPython.SQLite')
_()
del _
from dbapi2 import *
| gpl-3.0 |
menardorama/ReadyNAS-Add-ons | headphones-1.0.0/files/apps/headphones/lib/unidecode/x007.py | 252 | 4122 | data = (
'//', # 0x00
'/', # 0x01
',', # 0x02
'!', # 0x03
'!', # 0x04
'-', # 0x05
',', # 0x06
',', # 0x07
';', # 0x08
'?', # 0x09
'~', # 0x0a
'{', # 0x0b
'}', # 0x0c
'*', # 0x0d
'[?]', # 0x0e
'', # 0x0f
'\'', # 0x10
'', # 0x11
'b', # 0x12
'g', # 0x13
'g', # 0x14
'd', # 0x15
'd', # 0x16
'h', # 0x17
'w', # 0x18
'z', # 0x19
'H', # 0x1a
't', # 0x1b
't', # 0x1c
'y', # 0x1d
'yh', # 0x1e
'k', # 0x1f
'l', # 0x20
'm', # 0x21
'n', # 0x22
's', # 0x23
's', # 0x24
'`', # 0x25
'p', # 0x26
'p', # 0x27
'S', # 0x28
'q', # 0x29
'r', # 0x2a
'sh', # 0x2b
't', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'a', # 0x30
'a', # 0x31
'a', # 0x32
'A', # 0x33
'A', # 0x34
'A', # 0x35
'e', # 0x36
'e', # 0x37
'e', # 0x38
'E', # 0x39
'i', # 0x3a
'i', # 0x3b
'u', # 0x3c
'u', # 0x3d
'u', # 0x3e
'o', # 0x3f
'', # 0x40
'`', # 0x41
'\'', # 0x42
'', # 0x43
'', # 0x44
'X', # 0x45
'Q', # 0x46
'@', # 0x47
'@', # 0x48
'|', # 0x49
'+', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'[?]', # 0x66
'[?]', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'[?]', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'h', # 0x80
'sh', # 0x81
'n', # 0x82
'r', # 0x83
'b', # 0x84
'L', # 0x85
'k', # 0x86
'\'', # 0x87
'v', # 0x88
'm', # 0x89
'f', # 0x8a
'dh', # 0x8b
'th', # 0x8c
'l', # 0x8d
'g', # 0x8e
'ny', # 0x8f
's', # 0x90
'd', # 0x91
'z', # 0x92
't', # 0x93
'y', # 0x94
'p', # 0x95
'j', # 0x96
'ch', # 0x97
'tt', # 0x98
'hh', # 0x99
'kh', # 0x9a
'th', # 0x9b
'z', # 0x9c
'sh', # 0x9d
's', # 0x9e
'd', # 0x9f
't', # 0xa0
'z', # 0xa1
'`', # 0xa2
'gh', # 0xa3
'q', # 0xa4
'w', # 0xa5
'a', # 0xa6
'aa', # 0xa7
'i', # 0xa8
'ee', # 0xa9
'u', # 0xaa
'oo', # 0xab
'e', # 0xac
'ey', # 0xad
'o', # 0xae
'oa', # 0xaf
'', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 |
thomashaw/SecGen | modules/utilities/unix/labtainers/files/Labtainers-master/scripts/designer/system/sbin/mynotify.py | 2 | 6530 | #!/usr/bin/env python
'''
This software was created by United States Government employees at
The Center for the Information Systems Studies and Research (CISR)
at the Naval Postgraduate School NPS. Please note that within the
United States, copyright protection is not available for any works
created by United States Government employees, pursuant to Title 17
United States Code Section 105. This software is in the public
domain and is not subject to copyright.
'''
import os
import time
import logging
import subprocess
from inotify_simple import INotify, flags
'''
This runs as a service on the containers. It uses inotify
to catch events defined in the .local/bin/notify file,
and will invoke notify_cb.sh for when those events occur.
We pass the file, the mode, the the first user in the system to
notify_cb.sh The timestamped output is appended to any
existing notify.stdout.... within 1 second of now.
Alternately, the notify file can include an optional output
filename.
It dies without a wimper. Debug by manually running and generating
inotify events.
'''
logger = logging.getLogger('mynotify')
hdlr = logging.FileHandler('/tmp/mynotify.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
class WatchType():
def __init__(self, path, flag, outfile=None):
self.path = path
self.flag = flag
self.outfile = outfile
def showMask(mask):
if mask & flags.CREATE:
print('CREATE')
if mask & flags.ACCESS:
print('ACCESS')
if mask & flags.OPEN:
print('OPEN')
def get_flag(flag):
if flag == 'CREATE':
return flags.CREATE
elif flag == 'ACCESS':
return flags.ACCESS
elif flag == 'OPEN':
return flags.OPEN
else:
return None
def get_first_user():
with open('/etc/passwd') as fh:
for line in fh:
parts = line.strip().split(':')
if parts[2] == '1000':
return parts[0]
return None
logger.debug('Start mynotify')
watches = {}
inotify = INotify()
first_user = get_first_user()
logger.debug('first user is %s' % first_user)
notify_file = '/home/%s/.local/bin/notify' % first_user
notify_cb = '/home/%s/.local/bin/notify_cb.sh' % first_user
results = '/home/%s/.local/result' % first_user
if not os.path.isfile(notify_file) or not os.path.isfile(notify_cb):
logger.error('missing notify %s' % (notify_file))
exit(0)
if not os.path.isfile(notify_cb):
logger.debug("no notify_cb.sh, just ouput path & cmd")
notify_cb = None
''' read in the notify file, set watches on file access as directed '''
with open(notify_file) as fh:
for line in fh:
if not line.strip().startswith('#'):
parts = line.strip().split()
outfile = None
if len(parts) > 2:
outfile = parts[2]
watch = WatchType(parts[0], parts[1], outfile)
flag = get_flag(watch.flag)
try:
wd = inotify.add_watch(watch.path, flag)
watches[wd] = watch
except:
logger.debug('could not add watch for %s %s' % (watch.path, watch.flag))
#
# forever loop responding to inotify events
#
while True:
for event in inotify.read():
print(event)
showMask(event.mask)
watch = watches[event.wd]
logger.debug('path: %s flag: %s' % (watch.path, watch.flag))
now = time.time()
ts = time.strftime('%Y%m%d%H%M%S', time.localtime(now))
''' use given outputfile name, if provided in the notify directive '''
if watch.outfile is None:
notifyoutfile = os.path.join(results, 'notify.stdout')
else:
notifyoutfile = os.path.join(results, '%s.stdout' % (watch.outfile))
notifyoutfile_ts = '%s.%s' % (notifyoutfile, ts)
#notifyoutfile = os.path.join(results, 'notify.stdin.%s' % ts)
hist_file = '/home/%s/.bash_history' % first_user
cmd_time_history = os.path.getmtime(hist_file)
root_hist_file = '/root/.bash_history'
cmd_user = first_user
if os.path.isfile(root_hist_file):
time_root_history = os.path.getmtime(root_hist_file)
if cmd_time_history > time_root_history:
cmd_time_history = time_root_history
hist_file = root_hist_file
cmd_user = 'root'
cmd = None
with open(hist_file) as fh:
hist = fh.readlines()
cmd = hist[-1].strip()
if cmd.startswith('sudo'):
cmd = cmd[5:]
cmd_user = 'root'
''' determine if we should append to an existig output file '''
is_a_file = False
if not os.path.isfile(notifyoutfile_ts):
''' no file, if from previous second, use that as hack to merge with output from command '''
now = now -1
ts = time.strftime('%Y%m%d%H%M%S', time.localtime(now))
tmpfile = '%s.%s' % (notifyoutfile, ts)
if os.path.isfile(tmpfile):
notifyoutfile_ts = tmpfile
is_a_file = True
else:
is_a_file = True
if is_a_file:
''' existing file, append to it '''
if notify_cb is not None:
sys_cmd = '%s %s %s %s %s >> %s 2>/dev/null' % (notify_cb, watch.path,
watch.flag, cmd_user, cmd, notifyoutfile_ts)
os.system(sys_cmd)
logger.debug('sys_cmd is %s' % sys_cmd)
else:
with open(notifyoutfile_ts, 'a') as fh:
fh.write('path: %s cmd: %s user: %s' % (watch.path, cmd, cmd_user))
else:
if notify_cb is not None:
''' only write to file if notify_cb generates output '''
sys_cmd = '%s %s %s %s "%s"' % (notify_cb, watch.path, watch.flag, cmd_user, cmd)
logger.debug('sys_cmd is %s' % sys_cmd)
child = subprocess.Popen(sys_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = child.communicate()
if len(output[0]) > 0:
with open(notifyoutfile_ts, 'w') as fh:
fh.write(output[0])
else:
with open(notifyoutfile_ts, 'a') as fh:
fh.write('path: %s cmd: %s user: %s' % (watch.path, cmd, cmd_user))
| gpl-3.0 |
castroflavio/ryu | ryu/ofproto/ofproto_v1_3.py | 8 | 48790 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenFlow 1.3 definitions.
"""
from ryu.ofproto import oxm_fields
from struct import calcsize
# struct ofp_header
OFP_HEADER_PACK_STR = '!BBHI'
OFP_HEADER_SIZE = 8
assert calcsize(OFP_HEADER_PACK_STR) == OFP_HEADER_SIZE
# enum ofp_type
OFPT_HELLO = 0 # Symmetric message
OFPT_ERROR = 1 # Symmetric message
OFPT_ECHO_REQUEST = 2 # Symmetric message
OFPT_ECHO_REPLY = 3 # Symmetric message
OFPT_EXPERIMENTER = 4 # Symmetric message
OFPT_FEATURES_REQUEST = 5 # Controller/switch message
OFPT_FEATURES_REPLY = 6 # Controller/switch message
OFPT_GET_CONFIG_REQUEST = 7 # Controller/switch message
OFPT_GET_CONFIG_REPLY = 8 # Controller/switch message
OFPT_SET_CONFIG = 9 # Controller/switch message
OFPT_PACKET_IN = 10 # Async message
OFPT_FLOW_REMOVED = 11 # Async message
OFPT_PORT_STATUS = 12 # Async message
OFPT_PACKET_OUT = 13 # Controller/switch message
OFPT_FLOW_MOD = 14 # Controller/switch message
OFPT_GROUP_MOD = 15 # Controller/switch message
OFPT_PORT_MOD = 16 # Controller/switch message
OFPT_TABLE_MOD = 17 # Controller/switch message
OFPT_MULTIPART_REQUEST = 18 # Controller/switch message
OFPT_MULTIPART_REPLY = 19 # Controller/switch message
OFPT_BARRIER_REQUEST = 20 # Controller/switch message
OFPT_BARRIER_REPLY = 21 # Controller/switch message
OFPT_QUEUE_GET_CONFIG_REQUEST = 22 # Controller/switch message
OFPT_QUEUE_GET_CONFIG_REPLY = 23 # Controller/switch message
OFPT_ROLE_REQUEST = 24 # Controller/switch message
OFPT_ROLE_REPLY = 25 # Controller/switch message
OFPT_GET_ASYNC_REQUEST = 26 # Controller/switch message
OFPT_GET_ASYNC_REPLY = 27 # Controller/switch message
OFPT_SET_ASYNC = 28 # Controller/switch message
OFPT_METER_MOD = 29 # Controller/switch message
# struct ofp_port
OFP_MAX_PORT_NAME_LEN = 16
OFP_ETH_ALEN = 6
OFP_ETH_ALEN_STR = str(OFP_ETH_ALEN)
_OFP_PORT_PACK_STR = 'I4x' + OFP_ETH_ALEN_STR + 's' + '2x' + \
str(OFP_MAX_PORT_NAME_LEN) + 's' + 'IIIIIIII'
OFP_PORT_PACK_STR = '!' + _OFP_PORT_PACK_STR
OFP_PORT_SIZE = 64
assert calcsize(OFP_PORT_PACK_STR) == OFP_PORT_SIZE
# enum ofp_port_config
OFPPC_PORT_DOWN = 1 << 0 # Port is administratively down.
OFPPC_NO_RECV = 1 << 2 # Drop all packets recieved by port.
OFPPC_NO_FWD = 1 << 5 # Drop packets forwarded to port.
OFPPC_NO_PACKET_IN = 1 << 6 # Do not send packet-in msgs for port.
# enum ofp_port_state
OFPPS_LINK_DOWN = 1 << 0 # No physical link present.
OFPPS_BLOCKED = 1 << 1 # Port is blocked.
OFPPS_LIVE = 1 << 2 # Live for Fast Failover Group.
# enum ofp_port_no
OFPP_MAX = 0xffffff00
OFPP_IN_PORT = 0xfffffff8 # Send the packet out the input port. This
# virtual port must be explicitly used
# in order to send back out of the input
# port.
OFPP_TABLE = 0xfffffff9 # Perform actions in flow table.
# NB: This can only be the destination
# port for packet-out messages.
OFPP_NORMAL = 0xfffffffa # Process with normal L2/L3 switching.
OFPP_FLOOD = 0xfffffffb # All physical ports except input port and
# those disabled by STP.
OFPP_ALL = 0xfffffffc # All physical ports except input port.
OFPP_CONTROLLER = 0xfffffffd # Send to controller.
OFPP_LOCAL = 0xfffffffe # Local openflow "port".
OFPP_ANY = 0xffffffff # Not associated with a physical port.
# All ones is used to indicate all queues in a port (for stats retrieval).
OFPQ_ALL = 0xffffffff
# enum ofp_port_features
OFPPF_10MB_HD = 1 << 0 # 10 Mb half-duplex rate support.
OFPPF_10MB_FD = 1 << 1 # 10 Mb full-duplex rate support.
OFPPF_100MB_HD = 1 << 2 # 100 Mb half-duplex rate support.
OFPPF_100MB_FD = 1 << 3 # 100 Mb full-duplex rate support.
OFPPF_1GB_HD = 1 << 4 # 1 Gb half-duplex rate support.
OFPPF_1GB_FD = 1 << 5 # 1 Gb full-duplex rate support.
OFPPF_10GB_FD = 1 << 6 # 10 Gb full-duplex rate support.
OFPPF_40GB_FD = 1 << 7 # 40 Gb full-duplex rate support.
OFPPF_100GB_FD = 1 << 8 # 100 Gb full-duplex rate support.
OFPPF_1TB_FD = 1 << 9 # 1 Tb full-duplex rate support.
OFPPF_OTHER = 1 << 10 # Other rate, not in the list.
OFPPF_COPPER = 1 << 11 # Copper medium.
OFPPF_FIBER = 1 << 12 # Fiber medium.
OFPPF_AUTONEG = 1 << 13 # Auto-negotiation.
OFPPF_PAUSE = 1 << 14 # Pause.
OFPPF_PAUSE_ASYM = 1 << 15 # Asymmetric pause.
# struct ofp_packet_queue
OFP_PACKET_QUEUE_PACK_STR = '!IIH6x'
OFP_PACKET_QUEUE_SIZE = 16
assert calcsize(OFP_PACKET_QUEUE_PACK_STR) == OFP_PACKET_QUEUE_SIZE
# enum ofp_queue_properties
OFPQT_MIN_RATE = 1 # Minimum datarate guaranteed.
OFPQT_MAX_RATE = 2 # Maximum datarate.
OFPQT_EXPERIMENTER = 0xffff # Experimenter defined property.
# struct ofp_queue_prop_header
OFP_QUEUE_PROP_HEADER_PACK_STR = '!HH4x'
OFP_QUEUE_PROP_HEADER_SIZE = 8
assert calcsize(OFP_QUEUE_PROP_HEADER_PACK_STR) == OFP_QUEUE_PROP_HEADER_SIZE
# struct ofp_queue_prop_min_rate
OFP_QUEUE_PROP_MIN_RATE_PACK_STR = '!H6x'
OFP_QUEUE_PROP_MIN_RATE_SIZE = 16
assert (calcsize(OFP_QUEUE_PROP_MIN_RATE_PACK_STR) +
OFP_QUEUE_PROP_HEADER_SIZE) == OFP_QUEUE_PROP_MIN_RATE_SIZE
# struct ofp_queue_prop_max_rate
OFP_QUEUE_PROP_MAX_RATE_PACK_STR = '!H6x'
OFP_QUEUE_PROP_MAX_RATE_SIZE = 16
assert (calcsize(OFP_QUEUE_PROP_MAX_RATE_PACK_STR) +
OFP_QUEUE_PROP_HEADER_SIZE) == OFP_QUEUE_PROP_MAX_RATE_SIZE
# struct ofp_queue_prop_experimenter
OFP_QUEUE_PROP_EXPERIMENTER_PACK_STR = '!I4x'
OFP_QUEUE_PROP_EXPERIMENTER_SIZE = 16
assert (calcsize(OFP_QUEUE_PROP_EXPERIMENTER_PACK_STR) +
OFP_QUEUE_PROP_HEADER_SIZE) == OFP_QUEUE_PROP_EXPERIMENTER_SIZE
# struct ofp_match
_OFP_MATCH_PACK_STR = 'HHBBBB'
OFP_MATCH_PACK_STR = '!' + _OFP_MATCH_PACK_STR
OFP_MATCH_SIZE = 8
assert calcsize(OFP_MATCH_PACK_STR) == OFP_MATCH_SIZE
# enum ofp_match_type
OFPMT_STANDARD = 0 # Deprecated
OFPMT_OXM = 1 # OpenFlow Extensible Match
# enum ofp_oxm_class
OFPXMC_NXM_0 = 0x0000 # Backward compatibility with NXM
OFPXMC_NXM_1 = 0x0001 # Backward compatibility with NXM
OFPXMC_OPENFLOW_BASIC = 0x8000 # Basic class for OpenFlow
OFPXMC_EXPERIMENTER = 0xFFFF # Experimenter class
# enum ofp_vlan_id
OFPVID_PRESENT = 0x1000 # bit that indicate that a VLAN id is set.
OFPVID_NONE = 0x0000 # No VLAN id was set.
# enum ofp_ipv6exthdr_flags
OFPIEH_NONEXT = 1 << 0 # "No next header" encountered.
OFPIEH_ESP = 1 << 1 # Encrypted Sec Payload header present.
OFPIEH_AUTH = 1 << 2 # Authentication header present.
OFPIEH_DEST = 1 << 3 # 1 or 2 dest headers present.
OFPIEH_FRAG = 1 << 4 # Fragment header present.
OFPIEH_ROUTER = 1 << 5 # Router header present.
OFPIEH_HOP = 1 << 6 # Hop-by-hop header present.
OFPIEH_UNREP = 1 << 7 # Unexpected repeats encountered.
OFPIEH_UNSEQ = 1 << 8 # Unexpected sequencing encountered.
# ofp_oxm_experimenter_header
OFP_OXM_EXPERIMENTER_HEADER_PACK_STR = '!II'
OFP_OXM_EXPERIMENTER_HEADER_SIZE = 8
assert (calcsize(OFP_OXM_EXPERIMENTER_HEADER_PACK_STR) ==
OFP_OXM_EXPERIMENTER_HEADER_SIZE)
# enum ofp_instruction_type
OFPIT_GOTO_TABLE = 1 # Setup the next table in the lookup pipeline.
OFPIT_WRITE_METADATA = 2 # Setup the metadata field for use later in
# pipeline.
OFPIT_WRITE_ACTIONS = 3 # Write the action(s) onto the datapath
# action set
OFPIT_APPLY_ACTIONS = 4 # Applies the action(s) immediately
OFPIT_CLEAR_ACTIONS = 5 # Clears all actions from the datapath action
# set
OFPIT_METER = 6 # Apply meter (rate limiter)
OFPIT_EXPERIMENTER = 0xFFFF # Experimenter instruction
# struct ofp_instruction_goto_table
OFP_INSTRUCTION_GOTO_TABLE_PACK_STR = '!HHB3x'
OFP_INSTRUCTION_GOTO_TABLE_SIZE = 8
assert (calcsize(OFP_INSTRUCTION_GOTO_TABLE_PACK_STR) ==
OFP_INSTRUCTION_GOTO_TABLE_SIZE)
# struct ofp_instruction_write_metadata
OFP_INSTRUCTION_WRITE_METADATA_PACK_STR = '!HH4xQQ'
OFP_INSTRUCTION_WRITE_METADATA_SIZE = 24
assert (calcsize(OFP_INSTRUCTION_WRITE_METADATA_PACK_STR) ==
OFP_INSTRUCTION_WRITE_METADATA_SIZE)
# struct ofp_instruction_actions
OFP_INSTRUCTION_ACTIONS_PACK_STR = '!HH4x'
OFP_INSTRUCTION_ACTIONS_SIZE = 8
assert (calcsize(OFP_INSTRUCTION_ACTIONS_PACK_STR) ==
OFP_INSTRUCTION_ACTIONS_SIZE)
# struct ofp_instruction_meter
OFP_INSTRUCTION_METER_PACK_STR = '!HHI'
OFP_INSTRUCTION_METER_SIZE = 8
assert calcsize(OFP_INSTRUCTION_METER_PACK_STR) == OFP_INSTRUCTION_METER_SIZE
# enum ofp_action_type
OFPAT_OUTPUT = 0 # Output to switch port.
OFPAT_COPY_TTL_OUT = 11 # Copy TTL "outwards" -- from
# next-to-outermost to outermost
OFPAT_COPY_TTL_IN = 12 # Copy TTL "inwards" -- from outermost to
# next-to-outermost
OFPAT_SET_MPLS_TTL = 15 # MPLS TTL.
OFPAT_DEC_MPLS_TTL = 16 # Decrement MPLS TTL
OFPAT_PUSH_VLAN = 17 # Push a new VLAN tag
OFPAT_POP_VLAN = 18 # Pop the outer VLAN tag
OFPAT_PUSH_MPLS = 19 # Push a new MPLS tag
OFPAT_POP_MPLS = 20 # Pop the outer MPLS tag
OFPAT_SET_QUEUE = 21 # Set queue id when outputting to a port
OFPAT_GROUP = 22 # Apply group
OFPAT_SET_NW_TTL = 23 # IP TTL.
OFPAT_DEC_NW_TTL = 24 # Decrement IP TTL.
OFPAT_SET_FIELD = 25 # Set a header field using OXM TLV format.
OFPAT_PUSH_PBB = 26 # Push a new PBB service tag (I-TAG)
OFPAT_POP_PBB = 27 # Pop the outer PBB service tag (I-TAG)
OFPAT_EXPERIMENTER = 0xffff
# struct ofp_action_header
OFP_ACTION_HEADER_PACK_STR = '!HH4x'
OFP_ACTION_HEADER_SIZE = 8
assert calcsize(OFP_ACTION_HEADER_PACK_STR) == OFP_ACTION_HEADER_SIZE
# struct ofp_action_output
OFP_ACTION_OUTPUT_PACK_STR = '!HHIH6x'
OFP_ACTION_OUTPUT_SIZE = 16
assert calcsize(OFP_ACTION_OUTPUT_PACK_STR) == OFP_ACTION_OUTPUT_SIZE
# enum ofp_controller_max_len
OFPCML_MAX = 0xffe5 # maximum max_len value which can be used to
# request a specific byte length.
OFPCML_NO_BUFFER = 0xffff # indicates that no buffering should be
# applied and the whole packet is to be
# sent to the controller.
# struct ofp_action_group
OFP_ACTION_GROUP_PACK_STR = '!HHI'
OFP_ACTION_GROUP_SIZE = 8
assert calcsize(OFP_ACTION_GROUP_PACK_STR) == OFP_ACTION_GROUP_SIZE
# struct ofp_action_set_queue
OFP_ACTION_SET_QUEUE_PACK_STR = '!HHI'
OFP_ACTION_SET_QUEUE_SIZE = 8
assert calcsize(OFP_ACTION_SET_QUEUE_PACK_STR) == OFP_ACTION_SET_QUEUE_SIZE
# struct ofp_action_mpls_ttl
OFP_ACTION_MPLS_TTL_PACK_STR = '!HHB3x'
OFP_ACTION_MPLS_TTL_SIZE = 8
assert calcsize(OFP_ACTION_MPLS_TTL_PACK_STR) == OFP_ACTION_MPLS_TTL_SIZE
# struct ofp_action_nw_ttl
OFP_ACTION_NW_TTL_PACK_STR = '!HHB3x'
OFP_ACTION_NW_TTL_SIZE = 8
assert calcsize(OFP_ACTION_NW_TTL_PACK_STR) == OFP_ACTION_NW_TTL_SIZE
# struct ofp_action_push
OFP_ACTION_PUSH_PACK_STR = '!HHH2x'
OFP_ACTION_PUSH_SIZE = 8
assert calcsize(OFP_ACTION_PUSH_PACK_STR) == OFP_ACTION_PUSH_SIZE
# struct ofp_action_pop_mpls
OFP_ACTION_POP_MPLS_PACK_STR = '!HHH2x'
OFP_ACTION_POP_MPLS_SIZE = 8
assert calcsize(OFP_ACTION_POP_MPLS_PACK_STR) == OFP_ACTION_POP_MPLS_SIZE
# struct ofp_action_set_field
OFP_ACTION_SET_FIELD_PACK_STR = '!HH4x'
OFP_ACTION_SET_FIELD_SIZE = 8
assert calcsize(OFP_ACTION_SET_FIELD_PACK_STR) == OFP_ACTION_SET_FIELD_SIZE
# struct ofp_action_experimenter_header
OFP_ACTION_EXPERIMENTER_HEADER_PACK_STR = '!HHI'
OFP_ACTION_EXPERIMENTER_HEADER_SIZE = 8
assert (calcsize(OFP_ACTION_EXPERIMENTER_HEADER_PACK_STR) ==
OFP_ACTION_EXPERIMENTER_HEADER_SIZE)
# ofp_switch_features
OFP_SWITCH_FEATURES_PACK_STR = '!QIBB2xII'
OFP_SWITCH_FEATURES_SIZE = 32
assert (calcsize(OFP_SWITCH_FEATURES_PACK_STR) + OFP_HEADER_SIZE ==
OFP_SWITCH_FEATURES_SIZE)
# enum ofp_capabilities
OFPC_FLOW_STATS = 1 << 0 # Flow statistics.
OFPC_TABLE_STATS = 1 << 1 # Table statistics.
OFPC_PORT_STATS = 1 << 2 # Port statistics.
OFPC_GROUP_STATS = 1 << 3 # Group statistics.
OFPC_IP_REASM = 1 << 5 # Can reassemble IP fragments.
OFPC_QUEUE_STATS = 1 << 6 # Queue statistics.
OFPC_PORT_BLOCKED = 1 << 8 # Switch will block looping ports.
# struct ofp_switch_config
OFP_SWITCH_CONFIG_PACK_STR = '!HH'
OFP_SWITCH_CONFIG_SIZE = 12
assert (calcsize(OFP_SWITCH_CONFIG_PACK_STR) + OFP_HEADER_SIZE ==
OFP_SWITCH_CONFIG_SIZE)
# enum ofp_config_flags
OFPC_FRAG_NORMAL = 0 # No special handling for fragments.
OFPC_FRAG_DROP = 1 # Drop fragments.
OFPC_FRAG_REASM = 2 # Reassemble (only if OFPC_IP_REASM set).
OFPC_FRAG_MASK = 3
# enum ofp_table
OFPTT_MAX = 0xfe
OFPTT_ALL = 0xff
# struct ofp_table_mod
OFP_TABLE_MOD_PACK_STR = '!B3xI'
OFP_TABLE_MOD_SIZE = 16
assert (calcsize(OFP_TABLE_MOD_PACK_STR) + OFP_HEADER_SIZE ==
OFP_TABLE_MOD_SIZE)
_OFP_FLOW_MOD_PACK_STR0 = 'QQBBHHHIIIH2x'
OFP_FLOW_MOD_PACK_STR = '!' + _OFP_FLOW_MOD_PACK_STR0 + _OFP_MATCH_PACK_STR
OFP_FLOW_MOD_PACK_STR0 = '!' + _OFP_FLOW_MOD_PACK_STR0
OFP_FLOW_MOD_SIZE = 56
assert (calcsize(OFP_FLOW_MOD_PACK_STR) + OFP_HEADER_SIZE ==
OFP_FLOW_MOD_SIZE)
# enum ofp_flow_mod_command
OFPFC_ADD = 0 # New flow.
OFPFC_MODIFY = 1 # Modify all matching flows.
OFPFC_MODIFY_STRICT = 2 # Modify entry strictly matching wildcards
OFPFC_DELETE = 3 # Delete all matching flows.
OFPFC_DELETE_STRICT = 4 # Strictly match wildcards and priority.
# By default, choose a priority in the middle.
OFP_DEFAULT_PRIORITY = 0x8000
# enum ofp_flow_mod_flags
OFPFF_SEND_FLOW_REM = 1 << 0 # Send flow removed message when flow
# expires or is deleted.
OFPFF_CHECK_OVERLAP = 1 << 1 # Check for overlapping entries first.
OFPFF_RESET_COUNTS = 1 << 2 # Reset flow packet and byte counts.
OFPFF_NO_PKT_COUNTS = 1 << 3 # Don't keep track of packet count.
OFPFF_NO_BYT_COUNTS = 1 << 4 # Don't keep track of byte count.
# struct ofp_group_mod
OFP_GROUP_MOD_PACK_STR = '!HBxI'
OFP_GROUP_MOD_SIZE = 16
assert (calcsize(OFP_GROUP_MOD_PACK_STR) + OFP_HEADER_SIZE ==
OFP_GROUP_MOD_SIZE)
# enum ofp_group_mod_command
OFPGC_ADD = 0 # New group.
OFPGC_MODIFY = 1 # Modify all matching groups.
OFPGC_DELETE = 2 # Delete all matching groups.
# enum ofp_group
OFPG_MAX = 0xffffff00 # Last usable group number.
# Fake groups
OFPG_ALL = 0xfffffffc # Represents all groups for group delete commands.
OFPG_ANY = 0xffffffff # Wildcard group used only for flow stats requests.
# Selects all flows regardless of group
# (including flows with no group).
# enum ofp_group_type
OFPGT_ALL = 0 # All (multicast/broadcast) group.
OFPGT_SELECT = 1 # Select group.
OFPGT_INDIRECT = 2 # Indirect group.
OFPGT_FF = 3 # Fast failover group.
# struct ofp_bucket
OFP_BUCKET_PACK_STR = '!HHII4x'
OFP_BUCKET_SIZE = 16
assert calcsize(OFP_BUCKET_PACK_STR) == OFP_BUCKET_SIZE
# struct ofp_port_mod
OFP_PORT_MOD_PACK_STR = '!I4x' + OFP_ETH_ALEN_STR + 's2xIII4x'
OFP_PORT_MOD_SIZE = 40
assert (calcsize(OFP_PORT_MOD_PACK_STR) + OFP_HEADER_SIZE ==
OFP_PORT_MOD_SIZE)
# struct ofp_meter_mod
OFP_METER_MOD_PACK_STR = '!HHI'
OFP_METER_MOD_SIZE = 16
assert (calcsize(OFP_METER_MOD_PACK_STR) + OFP_HEADER_SIZE ==
OFP_METER_MOD_SIZE)
# enum ofp_meter
OFPM_MAX = 0xffff0000
OFPM_SLOWPATH = 0xfffffffd # Meter for slow datapath, if any.
OFPM_CONTROLLER = 0xfffffffe # Meter for controller connection.
OFPM_ALL = 0xffffffff # Represents all meters for stat requests
# commands.
# enum ofp_meter_mod_command
OFPMC_ADD = 0 # New meter.
OFPMC_MODIFY = 1 # Modify specified meter.
OFPMC_DELETE = 2 # Delete specified meter.
# enum ofp_meter_flags
OFPMF_KBPS = 1 << 0 # Rate value in kb/s (kilo-bit per second).
OFPMF_PKTPS = 1 << 1 # Rate value in packet/sec.
OFPMF_BURST = 1 << 2 # Do burst size.
OFPMF_STATS = 1 << 3 # Collect statistics.
# struct ofp_meter_band_header
OFP_METER_BAND_HEADER_PACK_STR = '!HHII'
OFP_METER_BAND_HEADER_SIZE = 12
assert (calcsize(OFP_METER_BAND_HEADER_PACK_STR) ==
OFP_METER_BAND_HEADER_SIZE)
# enum ofp_meter_band_type
OFPMBT_DROP = 1 # Drop packet.
OFPMBT_DSCP_REMARK = 2 # Remark DSCP in the IP header.
OFPMBT_EXPERIMENTER = 0xFFFF # Experimenter meter band.
# struct ofp_meter_band_drop
OFP_METER_BAND_DROP_PACK_STR = '!HHII4x'
OFP_METER_BAND_DROP_SIZE = 16
assert (calcsize(OFP_METER_BAND_DROP_PACK_STR) ==
OFP_METER_BAND_DROP_SIZE)
# struct ofp_meter_band_dscp_remark
OFP_METER_BAND_DSCP_REMARK_PACK_STR = '!HHIIB3x'
OFP_METER_BAND_DSCP_REMARK_SIZE = 16
assert (calcsize(OFP_METER_BAND_DSCP_REMARK_PACK_STR) ==
OFP_METER_BAND_DSCP_REMARK_SIZE)
# struct ofp_meter_band_experimenter
OFP_METER_BAND_EXPERIMENTER_PACK_STR = '!HHIII'
OFP_METER_BAND_EXPERIMENTER_SIZE = 16
assert (calcsize(OFP_METER_BAND_EXPERIMENTER_PACK_STR) ==
OFP_METER_BAND_EXPERIMENTER_SIZE)
# struct ofp_multipart_request
OFP_MULTIPART_REQUEST_PACK_STR = '!HH4x'
OFP_MULTIPART_REQUEST_SIZE = 16
assert (calcsize(OFP_MULTIPART_REQUEST_PACK_STR) + OFP_HEADER_SIZE ==
OFP_MULTIPART_REQUEST_SIZE)
# enum ofp_multipart_request_flags
OFPMPF_REQ_MORE = 1 << 0 # More requests to follow.
# struct ofp_multipart_reply
OFP_MULTIPART_REPLY_PACK_STR = '!HH4x'
OFP_MULTIPART_REPLY_SIZE = 16
assert (calcsize(OFP_MULTIPART_REPLY_PACK_STR) + OFP_HEADER_SIZE ==
OFP_MULTIPART_REPLY_SIZE)
# enum ofp_multipart_reply_flags
OFPMPF_REPLY_MORE = 1 << 0 # More replies to follow.
# enum ofp_multipart_types
OFPMP_DESC = 0
OFPMP_FLOW = 1
OFPMP_AGGREGATE = 2
OFPMP_TABLE = 3
OFPMP_PORT_STATS = 4
OFPMP_QUEUE = 5
OFPMP_GROUP = 6
OFPMP_GROUP_DESC = 7
OFPMP_GROUP_FEATURES = 8
OFPMP_METER = 9
OFPMP_METER_CONFIG = 10
OFPMP_METER_FEATURES = 11
OFPMP_TABLE_FEATURES = 12
OFPMP_PORT_DESC = 13
OFPMP_EXPERIMENTER = 0xffff
# struct ofp_desc
DESC_STR_LEN = 256
DESC_STR_LEN_STR = str(DESC_STR_LEN)
SERIAL_NUM_LEN = 32
SERIAL_NUM_LEN_STR = str(SERIAL_NUM_LEN)
OFP_DESC_PACK_STR = '!' + \
DESC_STR_LEN_STR + 's' + \
DESC_STR_LEN_STR + 's' + \
DESC_STR_LEN_STR + 's' + \
SERIAL_NUM_LEN_STR + 's' + \
DESC_STR_LEN_STR + 's'
OFP_DESC_SIZE = 1056
assert calcsize(OFP_DESC_PACK_STR) == OFP_DESC_SIZE
# struct ofp_flow_stats_request
_OFP_FLOW_STATS_REQUEST_0_PACK_STR = 'B3xII4xQQ'
OFP_FLOW_STATS_REQUEST_0_PACK_STR = '!' + _OFP_FLOW_STATS_REQUEST_0_PACK_STR
OFP_FLOW_STATS_REQUEST_0_SIZE = 32
assert (calcsize(OFP_FLOW_STATS_REQUEST_0_PACK_STR) ==
OFP_FLOW_STATS_REQUEST_0_SIZE)
OFP_FLOW_STATS_REQUEST_PACK_STR = (OFP_FLOW_STATS_REQUEST_0_PACK_STR +
_OFP_MATCH_PACK_STR)
OFP_FLOW_STATS_REQUEST_SIZE = 40
assert (calcsize(OFP_FLOW_STATS_REQUEST_PACK_STR) ==
OFP_FLOW_STATS_REQUEST_SIZE)
# struct ofp_flow_stats
_OFP_FLOW_STATS_0_PACK_STR = 'HBxIIHHHH4xQQQ'
OFP_FLOW_STATS_0_PACK_STR = '!' + _OFP_FLOW_STATS_0_PACK_STR
OFP_FLOW_STATS_0_SIZE = 48
assert calcsize(OFP_FLOW_STATS_0_PACK_STR) == OFP_FLOW_STATS_0_SIZE
OFP_FLOW_STATS_PACK_STR = (OFP_FLOW_STATS_0_PACK_STR +
_OFP_MATCH_PACK_STR)
OFP_FLOW_STATS_SIZE = 56
assert calcsize(OFP_FLOW_STATS_PACK_STR) == OFP_FLOW_STATS_SIZE
# struct ofp_flow_stats_request
_OFP_AGGREGATE_STATS_REQUEST_0_PACK_STR = 'B3xII4xQQ'
OFP_AGGREGATE_STATS_REQUEST_0_PACK_STR = '!' + \
_OFP_AGGREGATE_STATS_REQUEST_0_PACK_STR
OFP_AGGREGATE_STATS_REQUEST_0_SIZE = 32
assert (calcsize(OFP_AGGREGATE_STATS_REQUEST_0_PACK_STR) ==
OFP_AGGREGATE_STATS_REQUEST_0_SIZE)
OFP_AGGREGATE_STATS_REQUEST_PACK_STR = \
OFP_AGGREGATE_STATS_REQUEST_0_PACK_STR + _OFP_MATCH_PACK_STR
OFP_AGGREGATE_STATS_REQUEST_SIZE = 40
assert (calcsize(OFP_AGGREGATE_STATS_REQUEST_PACK_STR) ==
OFP_AGGREGATE_STATS_REQUEST_SIZE)
# struct ofp_aggregate_stats_request
OFP_AGGREGATE_STATS_REQUEST_PACK_STR = '!B3xII4xQQ' + _OFP_MATCH_PACK_STR
OFP_AGGREGATE_STATS_REQUEST_SIZE = 40
assert (calcsize(OFP_AGGREGATE_STATS_REQUEST_PACK_STR) ==
OFP_AGGREGATE_STATS_REQUEST_SIZE)
# struct ofp_aggregate_stats_reply
OFP_AGGREGATE_STATS_REPLY_PACK_STR = '!QQI4x'
OFP_AGGREGATE_STATS_REPLY_SIZE = 24
assert (calcsize(OFP_AGGREGATE_STATS_REPLY_PACK_STR) ==
OFP_AGGREGATE_STATS_REPLY_SIZE)
# struct ofp_table_stats
OFP_TABLE_STATS_PACK_STR = '!B3xIQQ'
OFP_TABLE_STATS_SIZE = 24
assert calcsize(OFP_TABLE_STATS_PACK_STR) == OFP_TABLE_STATS_SIZE
# struct ofp_table_features
OFP_MAX_TABLE_NAME_LEN = 32
OFP_MAX_TABLE_NAME_LEN_STR = str(OFP_MAX_TABLE_NAME_LEN)
OFP_TABLE_FEATURES_PACK_STR = '!HB5x' + OFP_MAX_TABLE_NAME_LEN_STR + \
's' + 'QQII'
OFP_TABLE_FEATURES_SIZE = 64
assert (calcsize(OFP_TABLE_FEATURES_PACK_STR) ==
OFP_TABLE_FEATURES_SIZE)
# enum ofp_table_feature_prop_type
OFPTFPT_INSTRUCTIONS = 0
OFPTFPT_INSTRUCTIONS_MISS = 1
OFPTFPT_NEXT_TABLES = 2
OFPTFPT_NEXT_TABLES_MISS = 3
OFPTFPT_WRITE_ACTIONS = 4
OFPTFPT_WRITE_ACTIONS_MISS = 5
OFPTFPT_APPLY_ACTIONS = 6
OFPTFPT_APPLY_ACTIONS_MISS = 7
OFPTFPT_MATCH = 8
OFPTFPT_WILDCARDS = 10
OFPTFPT_WRITE_SETFIELD = 12
OFPTFPT_WRITE_SETFIELD_MISS = 13
OFPTFPT_APPLY_SETFIELD = 14
OFPTFPT_APPLY_SETFIELD_MISS = 15
OFPTFPT_EXPERIMENTER = 0xFFFE
OFPTFPT_EXPERIMENTER_MISS = 0xFFFF
# struct ofp_table_feature_prop_instructions
OFP_TABLE_FEATURE_PROP_INSTRUCTIONS_PACK_STR = '!HH'
OFP_TABLE_FEATURE_PROP_INSTRUCTIONS_SIZE = 4
assert (calcsize(OFP_TABLE_FEATURE_PROP_INSTRUCTIONS_PACK_STR) ==
OFP_TABLE_FEATURE_PROP_INSTRUCTIONS_SIZE)
# struct ofp_table_feature_prop_next_tables
OFP_TABLE_FEATURE_PROP_NEXT_TABLES_PACK_STR = '!HH'
OFP_TABLE_FEATURE_PROP_NEXT_TABLES_SIZE = 4
assert (calcsize(OFP_TABLE_FEATURE_PROP_NEXT_TABLES_PACK_STR) ==
OFP_TABLE_FEATURE_PROP_NEXT_TABLES_SIZE)
# struct ofp_table_feature_prop_actions
OFP_TABLE_FEATURE_PROP_ACTIONS_PACK_STR = '!HH'
OFP_TABLE_FEATURE_PROP_ACTIONS_SIZE = 4
assert (calcsize(OFP_TABLE_FEATURE_PROP_ACTIONS_PACK_STR) ==
OFP_TABLE_FEATURE_PROP_ACTIONS_SIZE)
# struct ofp_table_feature_prop_oxm
OFP_TABLE_FEATURE_PROP_OXM_PACK_STR = '!HH'
OFP_TABLE_FEATURE_PROP_OXM_SIZE = 4
assert (calcsize(OFP_TABLE_FEATURE_PROP_OXM_PACK_STR) ==
OFP_TABLE_FEATURE_PROP_OXM_SIZE)
# struct ofp_port_stats_request
OFP_PORT_STATS_REQUEST_PACK_STR = '!I4x'
OFP_PORT_STATS_REQUEST_SIZE = 8
assert (calcsize(OFP_PORT_STATS_REQUEST_PACK_STR) ==
OFP_PORT_STATS_REQUEST_SIZE)
# struct ofp_port_stats
OFP_PORT_STATS_PACK_STR = '!I4xQQQQQQQQQQQQII'
OFP_PORT_STATS_SIZE = 112
assert calcsize(OFP_PORT_STATS_PACK_STR) == OFP_PORT_STATS_SIZE
# struct ofp_queue_stats_request
OFP_QUEUE_STATS_REQUEST_PACK_STR = '!II'
OFP_QUEUE_STATS_REQUEST_SIZE = 8
assert (calcsize(OFP_QUEUE_STATS_REQUEST_PACK_STR) ==
OFP_QUEUE_STATS_REQUEST_SIZE)
# struct ofp_queue_stats
OFP_QUEUE_STATS_PACK_STR = '!IIQQQII'
OFP_QUEUE_STATS_SIZE = 40
assert calcsize(OFP_QUEUE_STATS_PACK_STR) == OFP_QUEUE_STATS_SIZE
# struct ofp_group_stats_request
OFP_GROUP_STATS_REQUEST_PACK_STR = '!I4x'
OFP_GROUP_STATS_REQUEST_SIZE = 8
assert (calcsize(OFP_GROUP_STATS_REQUEST_PACK_STR) ==
OFP_GROUP_STATS_REQUEST_SIZE)
# struct ofp_group_stats
OFP_GROUP_STATS_PACK_STR = '!H2xII4xQQII'
OFP_GROUP_STATS_SIZE = 40
assert calcsize(OFP_GROUP_STATS_PACK_STR) == OFP_GROUP_STATS_SIZE
# struct ofp_bucket_counter
OFP_BUCKET_COUNTER_PACK_STR = '!QQ'
OFP_BUCKET_COUNTER_SIZE = 16
assert calcsize(OFP_BUCKET_COUNTER_PACK_STR) == OFP_BUCKET_COUNTER_SIZE
# struct ofp_group_desc_stats
OFP_GROUP_DESC_STATS_PACK_STR = '!HBxI'
OFP_GROUP_DESC_STATS_SIZE = 8
assert calcsize(OFP_GROUP_DESC_STATS_PACK_STR) == OFP_GROUP_DESC_STATS_SIZE
# struct ofp_group_features
OFP_GROUP_FEATURES_PACK_STR = '!II4I4I'
OFP_GROUP_FEATURES_SIZE = 40
assert calcsize(OFP_GROUP_FEATURES_PACK_STR) == OFP_GROUP_FEATURES_SIZE
# enum ofp_group_capabilities
OFPGFC_SELECT_WEIGHT = 1 << 0 # Support weight for select groups.
OFPGFC_SELECT_LIVENESS = 1 << 1 # Support liveness for select groups.
OFPGFC_CHAINING = 1 << 2 # Support chaining groups.
OFPGFC_CHAINING_CHECKS = 1 << 3 # Check chaining for loops and delete
# struct ofp_meter_multipart_request
OFP_METER_MULTIPART_REQUEST_PACK_STR = '!I4x'
OFP_METER_MULTIPART_REQUEST_SIZE = 8
assert (calcsize(OFP_METER_MULTIPART_REQUEST_PACK_STR) ==
OFP_METER_MULTIPART_REQUEST_SIZE)
# struct ofp_meter_stats
OFP_METER_STATS_PACK_STR = '!IH6xIQQII'
OFP_METER_STATS_SIZE = 40
assert calcsize(OFP_METER_STATS_PACK_STR) == OFP_METER_STATS_SIZE
# struct ofp_meter_band_stats
OFP_METER_BAND_STATS_PACK_STR = '!QQ'
OFP_METER_BAND_STATS_SIZE = 16
assert (calcsize(OFP_METER_BAND_STATS_PACK_STR) ==
OFP_METER_BAND_STATS_SIZE)
# struct ofp_meter_config
OFP_METER_CONFIG_PACK_STR = '!HHI'
OFP_METER_CONFIG_SIZE = 8
assert calcsize(OFP_METER_CONFIG_PACK_STR) == OFP_METER_CONFIG_SIZE
# struct ofp_meter_features
OFP_METER_FEATURES_PACK_STR = '!IIIBB2x'
OFP_METER_FEATURES_SIZE = 16
assert (calcsize(OFP_METER_FEATURES_PACK_STR) ==
OFP_METER_FEATURES_SIZE)
# struct ofp_experimenter_multipart_header
OFP_EXPERIMENTER_MULTIPART_HEADER_PACK_STR = '!II'
OFP_EXPERIMENTER_MULTIPART_HEADER_SIZE = 8
assert (calcsize(OFP_EXPERIMENTER_MULTIPART_HEADER_PACK_STR) ==
OFP_EXPERIMENTER_MULTIPART_HEADER_SIZE)
# struct ofp_queue_get_config_request
OFP_QUEUE_GET_CONFIG_REQUEST_PACK_STR = '!I4x'
OFP_QUEUE_GET_CONFIG_REQUEST_SIZE = 16
assert (calcsize(OFP_QUEUE_GET_CONFIG_REQUEST_PACK_STR) +
OFP_HEADER_SIZE) == OFP_QUEUE_GET_CONFIG_REQUEST_SIZE
# struct ofp_queue_get_config_reply
OFP_QUEUE_GET_CONFIG_REPLY_PACK_STR = '!I4x'
OFP_QUEUE_GET_CONFIG_REPLY_SIZE = 16
assert (calcsize(OFP_QUEUE_GET_CONFIG_REPLY_PACK_STR) +
OFP_HEADER_SIZE) == OFP_QUEUE_GET_CONFIG_REPLY_SIZE
# struct ofp_packet_out
OFP_PACKET_OUT_PACK_STR = '!IIH6x'
OFP_PACKET_OUT_SIZE = 24
assert (calcsize(OFP_PACKET_OUT_PACK_STR) + OFP_HEADER_SIZE ==
OFP_PACKET_OUT_SIZE)
# struct ofp_role_request
OFP_ROLE_REQUEST_PACK_STR = '!I4xQ'
OFP_ROLE_REQUEST_SIZE = 24
assert (calcsize(OFP_ROLE_REQUEST_PACK_STR) + OFP_HEADER_SIZE ==
OFP_ROLE_REQUEST_SIZE)
# enum ofp_controller_role
OFPCR_ROLE_NOCHANGE = 0 # Don't change current role.
OFPCR_ROLE_EQUAL = 1 # Default role, full access.
OFPCR_ROLE_MASTER = 2 # Full access, at most one master.
OFPCR_ROLE_SLAVE = 3 # Read-only access.
# struct ofp_async_config
OFP_ASYNC_CONFIG_PACK_STR = '!2I2I2I'
OFP_ASYNC_CONFIG_SIZE = 32
assert (calcsize(OFP_ASYNC_CONFIG_PACK_STR) + OFP_HEADER_SIZE ==
OFP_ASYNC_CONFIG_SIZE)
# struct ofp_packet_in
OFP_PACKET_IN_PACK_STR = '!IHBBQ'
OFP_PACKET_IN_SIZE = 32
assert (calcsize(OFP_PACKET_IN_PACK_STR) + OFP_MATCH_SIZE + OFP_HEADER_SIZE ==
OFP_PACKET_IN_SIZE)
# enum ofp_packet_in_reason
OFPR_NO_MATCH = 0 # No matching flow.
OFPR_ACTION = 1 # Action explicitly output to controller.
OFPR_INVALID_TTL = 2 # Packet has invalid TTL.
# struct ofp_flow_removed
_OFP_FLOW_REMOVED_PACK_STR0 = 'QHBBIIHHQQ'
OFP_FLOW_REMOVED_PACK_STR = '!' + _OFP_FLOW_REMOVED_PACK_STR0 + \
_OFP_MATCH_PACK_STR
OFP_FLOW_REMOVED_PACK_STR0 = '!' + _OFP_FLOW_REMOVED_PACK_STR0
OFP_FLOW_REMOVED_SIZE = 56
assert (calcsize(OFP_FLOW_REMOVED_PACK_STR) + OFP_HEADER_SIZE ==
OFP_FLOW_REMOVED_SIZE)
# enum ofp_flow_removed_reason
OFPRR_IDLE_TIMEOUT = 0 # Flow idle time exceeded idle_timeout.
OFPRR_HARD_TIMEOUT = 1 # Time exceeded hard_timeout.
OFPRR_DELETE = 2 # Evicted by a DELETE flow mod.
OFPRR_GROUP_DELETE = 3 # Group was removed.
# struct ofp_port_status
OFP_PORT_STATUS_PACK_STR = '!B7x' + _OFP_PORT_PACK_STR
OFP_PORT_STATUS_DESC_OFFSET = OFP_HEADER_SIZE + 8
OFP_PORT_STATUS_SIZE = 80
assert (calcsize(OFP_PORT_STATUS_PACK_STR) + OFP_HEADER_SIZE ==
OFP_PORT_STATUS_SIZE)
# enum ofp_port_reason
OFPPR_ADD = 0 # The port was added.
OFPPR_DELETE = 1 # The port was removed.
OFPPR_MODIFY = 2 # Some attribute of the port has changed.
# OFPMP_EXPERIMENTER
# struct onf_experimenter_multipart_msg
# (experimenter == ONF_EXPERIMENTER_ID)
ONFMP_FLOW_MONITOR = 1870
# EXT-187 seems to have a lot of flaws.
# XXX the spec mentions ONFST_FLOW_MONITOR in some places.
# we assume it's same as ONFMP_FLOW_MONITOR.
# XXX the spec uses OFPP_NONE. we assume it means OFPP_ANY.
# XXX onf_flow_update_full.length is commented to be 24.
# but it needs to tell the actual length of instructions.
# we assume it's variable.
# XXX the spec seems confused between instructions and actions
# for onf_flow_update_full/ONFFMF_ACTIONS. we assume they all
# are instructions.
# XXX the spec does not define payload structures for any of
# ONFT_FLOW_MONITOR_CANCEL, ONFT_FLOW_MONITOR_PAUSED, or
# ONFT_FLOW_MONITOR_RESUMED. we assume they are same as NX.
# according to NX spec (OVS nicira-ext.h and ofp-msg.h):
# NXT_FLOW_MONITOR_CANCEL: a single u32 'id'.
# NXT_FLOW_MONITOR_PAUSED/RESUMED: empty payload
# (OF1.4 uses something different; OFPFMC_DELETE for CANCEL and
# OFPFME_ for PAUSED/RESUMED.)
# XXX onf_flow_monitor_request and onf_flow_update_full use
# match_len + oxm_fields instead of ofp_match. this pointless
# diverge from OF1.4 looks like a botch when updating from OF1.0.
# XXX the spec mentions "the current implementation of Open vSwitch"
# but, as of writing this, it doesn't have this extension implemented
# at all. we assume that it is about OF1.0 NX.
# XXX the spec mentions nx13_flow_monitor_request but i couldn't find
# it in OVS nicira-ext.h.
# onf_flow_monitor_request
# ONFMP_FLOW_MONITOR request's body is zero or more instances of this.
# id, flags, match_len, out_put, table_id, zeros[3]
ONF_FLOW_MONITOR_REQUEST_PACK_STR = '!IHHIB3x'
ONF_FLOW_MONITOR_REQUEST_SIZE = 16
assert (calcsize(ONF_FLOW_MONITOR_REQUEST_PACK_STR) ==
ONF_FLOW_MONITOR_REQUEST_SIZE)
# onf_flow_monitor_request.flags
ONFFMF_INITIAL = 1 << 0
ONFFMF_ADD = 1 << 1
ONFFMF_DELETE = 1 << 2
ONFFMF_MODIFY = 1 << 3
ONFFMF_ACTIONS = 1 << 4
ONFFMF_OWN = 1 << 5
# onf_flow_update_header
# ONFMP_FLOW_MONITOR request's body is an array of this
# length, event
ONF_FLOW_UPDATE_HEADER_PACK_STR = '!HH'
ONF_FLOW_UPDATE_HEADER_SIZE = 4
assert (calcsize(ONF_FLOW_UPDATE_HEADER_PACK_STR) ==
ONF_FLOW_UPDATE_HEADER_SIZE)
# onf_flow_update_full, excluding onf_flow_update_header
# reason, priority, idle_timeout, hard_timeout, match_len, table_id,
# pad, cookie
ONF_FLOW_UPDATE_FULL_PACK_STR = '!HHHHHBxQ'
ONF_FLOW_UPDATE_FULL_SIZE = 24 - ONF_FLOW_UPDATE_HEADER_SIZE
assert (calcsize(ONF_FLOW_UPDATE_FULL_PACK_STR) ==
ONF_FLOW_UPDATE_FULL_SIZE)
# onf_flow_update_abbrev, excluding onf_flow_update_header
# xid
ONF_FLOW_UPDATE_ABBREV_PACK_STR = '!I'
ONF_FLOW_UPDATE_ABBREV_SIZE = 8 - ONF_FLOW_UPDATE_HEADER_SIZE
assert (calcsize(ONF_FLOW_UPDATE_ABBREV_PACK_STR) ==
ONF_FLOW_UPDATE_ABBREV_SIZE)
# enum onf_flow_udpate_event
ONFFME_ADDED = 0 # some variations in the spec; ONFMFE_ADD, ONFFME_ADD
ONFFME_DELETED = 1
ONFFME_MODIFIED = 2
ONFFME_ABBREV = 3
# enum onf_flow_monitor_msg_type
ONFT_FLOW_MONITOR_CANCEL = 1870 # controller -> switch
ONFT_FLOW_MONITOR_PAUSED = 1871 # switch -> controller
ONFT_FLOW_MONITOR_RESUMED = 1872 # switch -> controller
# struct ofp_error_msg
OFP_ERROR_MSG_PACK_STR = '!HH'
OFP_ERROR_MSG_SIZE = 12
assert (calcsize(OFP_ERROR_MSG_PACK_STR) + OFP_HEADER_SIZE ==
OFP_ERROR_MSG_SIZE)
# enum ofp_error_type
OFPET_HELLO_FAILED = 0 # Hello protocol failed.
OFPET_BAD_REQUEST = 1 # Request was not understood.
OFPET_BAD_ACTION = 2 # Error in action description.
OFPET_BAD_INSTRUCTION = 3 # Error in instruction list.
OFPET_BAD_MATCH = 4 # Error in match.
OFPET_FLOW_MOD_FAILED = 5 # Problem modifying flow entry.
OFPET_GROUP_MOD_FAILED = 6 # Problem modifying group entry.
OFPET_PORT_MOD_FAILED = 7 # OFPT_PORT_MOD failed.
OFPET_TABLE_MOD_FAILED = 8 # Table mod request failed.
OFPET_QUEUE_OP_FAILED = 9 # Queue operation failed.
OFPET_SWITCH_CONFIG_FAILED = 10 # Switch config request failed.
OFPET_ROLE_REQUEST_FAILED = 11 # Controller Role request failed.
OFPET_METER_MOD_FAILED = 12 # Error in meter.
OFPET_TABLE_FEATURES_FAILED = 13 # Setting table features failed.
OFPET_EXPERIMENTER = 0xffff # Experimenter error messages.
# enum ofp_hello_failed_code
OFPHFC_INCOMPATIBLE = 0 # No compatible version.
OFPHFC_EPERM = 1 # Permissions error.
# enum ofp_bad_request_code
OFPBRC_BAD_VERSION = 0 # ofp_header.version not supported.
OFPBRC_BAD_TYPE = 1 # ofp_header.type not supported.
OFPBRC_BAD_MULTIPART = 2 # ofp_multipart_request.type not
# supported.
OFPBRC_BAD_EXPERIMENTER = 3 # Experimenter id not supported
# (in ofp_experimenter_header
# or ofp_multipart_request or
# ofp_multipart_reply).
OFPBRC_BAD_EXP_TYPE = 4 # Experimenter type not supported.
OFPBRC_EPERM = 5 # Permissions error.
OFPBRC_BAD_LEN = 6 # Wrong request length for type.
OFPBRC_BUFFER_EMPTY = 7 # Specified buffer has already been
# used.
OFPBRC_BUFFER_UNKNOWN = 8 # Specified buffer does not exist.
OFPBRC_BAD_TABLE_ID = 9 # Specified table-id invalid or does
# not exist.
OFPBRC_IS_SLAVE = 10 # Denied because controller is slave.
OFPBRC_BAD_PORT = 11 # Invalid port.
OFPBRC_BAD_PACKET = 12 # Invalid packet in packet-out
OFPBRC_MULTIPART_BUFFER_OVERFLOW = 13 # ofp_multipart_request
# overflowed the assigned buffer.
# enum ofp_bad_action_code
OFPBAC_BAD_TYPE = 0 # Unknown action type.
OFPBAC_BAD_LEN = 1 # Length problem in actions.
OFPBAC_BAD_EXPERIMENTER = 2 # Unknown experimenter id specified.
OFPBAC_BAD_EXP_TYPE = 3 # Unknown action type for experimenter id.
OFPBAC_BAD_OUT_PORT = 4 # Problem validating output action.
OFPBAC_BAD_ARGUMENT = 5 # Bad action argument.
OFPBAC_EPERM = 6 # Permissions error.
OFPBAC_TOO_MANY = 7 # Can't handle this many actions.
OFPBAC_BAD_QUEUE = 8 # Problem validating output queue.
OFPBAC_BAD_OUT_GROUP = 9 # Invalid group id in forward action.
OFPBAC_MATCH_INCONSISTENT = 10 # Action can't apply for this match,
# or Set-Field missing prerequisite.
OFPBAC_UNSUPPORTED_ORDER = 11 # Action order is unsupported for
# the action list in an Apply-Actions
# instruction
OFPBAC_BAD_TAG = 12 # Actions uses an unsupported tag/encap.
OFPBAC_BAD_SET_TYPE = 13 # Unsupported type in SET_FIELD action.
OFPBAC_BAD_SET_LEN = 14 # Length problem in SET_FIELD action.
OFPBAC_BAD_SET_ARGUMENT = 15 # Bad arguement in SET_FIELD action.
# enum ofp_bad_instruction_code
OFPBIC_UNKNOWN_INST = 0 # Unknown instruction.
OFPBIC_UNSUP_INST = 1 # Switch or table does not support
# the instruction.
OFPBIC_BAD_TABLE_ID = 2 # Invalid Table-Id specified
OFPBIC_UNSUP_METADATA = 3 # Metadata value unsupported by datapath.
OFPBIC_UNSUP_METADATA_MASK = 4 # Metadata mask value unsupported by
# datapath.
OFPBIC_BAD_EXPERIMENTER = 5 # Unknown experimenter id specified.
OFPBIC_BAD_EXP_TYPE = 6 # Unknown instruction for experimenter id.
OFPBIC_BAD_LEN = 7 # Length problem in instrucitons.
OFPBIC_EPERM = 8 # Permissions error.
# enum ofp_bad_match_code
OFPBMC_BAD_TYPE = 0 # Unsupported match type apecified by
# the match.
OFPBMC_BAD_LEN = 1 # Length problem in math.
OFPBMC_BAD_TAG = 2 # Match uses an unsupported tag/encap.
OFPBMC_BAD_DL_ADDR_MASK = 3 # Unsupported datalink addr mask -
# switch does not support arbitrary
# datalink address mask.
OFPBMC_BAD_NW_ADDR_MASK = 4 # Unsupported network addr mask -
# switch does not support arbitrary
# network addres mask.
OFPBMC_BAD_WILDCARDS = 5 # Unsupported combination of fields
# masked or omitted in the match.
OFPBMC_BAD_FIELD = 6 # Unsupported field type in the match.
OFPBMC_BAD_VALUE = 7 # Unsupported value in a match field.
OFPBMC_BAD_MASK = 8 # Unsupported mask specified in the
# match.
OFPBMC_BAD_PREREQ = 9 # A prerequisite was not met.
OFPBMC_DUP_FIELD = 10 # A field type was duplicated.
OFPBMC_EPERM = 11 # Permissions error.
# enum ofp_flow_mod_failed_code
OFPFMFC_UNKNOWN = 0 # Unspecified error.
OFPFMFC_TABLE_FULL = 1 # Flow not added because table was full.
OFPFMFC_BAD_TABLE_ID = 2 # Table does not exist
OFPFMFC_OVERLAP = 3 # Attempted to add overlapping flow
# with CHECK_OVERLAP flag set.
OFPFMFC_EPERM = 4 # Permissions error.
OFPFMFC_BAD_TIMEOUT = 5 # Flow not added because of
# unsupported idle/hard timeout.
OFPFMFC_BAD_COMMAND = 6 # Unsupported or unknown command.
OFPFMFC_BAD_FLAGS = 7 # Unsupported or unknown flags.
# enum ofp_group_mod_failed_code
OFPGMFC_GROUP_EXISTS = 0
OFPGMFC_INVALID_GROUP = 1
OFPGMFC_WEIGHT_UNSUPPORTED = 2 # Switch does not support unequal load
# sharing with select groups.
OFPGMFC_OUT_OF_GROUPS = 3 # The group table is full.
OFPGMFC_OUT_OF_BUCKETS = 4 # The maximum number of action buckets
# for a group has been exceeded.
OFPGMFC_CHAINING_UNSUPPORTED = 5 # Switch does not support groups that
# forward to groups.
OFPGMFC_WATCH_UNSUPPORTED = 6 # This group cannot watch the
# watch_port or watch_group specified.
OFPGMFC_LOOP = 7 # Group entry would cause a loop.
OFPGMFC_UNKNOWN_GROUP = 8 # Group not modified because a group
# MODIFY attempted to modify a
# non-existent group.
OFPGMFC_CHAINED_GROUP = 9 # Group not deleted because another
# group is forwarding to it.
OFPGMFC_BAD_TYPE = 10 # Unsupported or unknown group type.
OFPGMFC_BAD_COMMAND = 11 # Unsupported or unknown command.
OFPGMFC_BAD_BUCKET = 12 # Error in bucket.
OFPGMFC_BAD_WATCH = 13 # Error in watch port/group.
OFPGMFC_EPERM = 14 # Permissions error.
# enum ofp_port_mod_failed_code
OFPPMFC_BAD_PORT = 0 # Specified port does not exist.
OFPPMFC_BAD_HW_ADDR = 1 # Specified hardware address does not
# match the port number.
OFPPMFC_BAD_CONFIG = 2 # Specified config is invalid.
OFPPMFC_BAD_ADVERTISE = 3 # Specified advertise is invalid.
OFPPMFC_EPERM = 4 # Permissions error.
# enum ofp_table_mod_failed_code
OFPTMFC_BAD_TABLE = 0 # Specified table does not exist.
OFPTMFC_BAD_CONFIG = 1 # Specified config is invalid.
OFPTMFC_EPERM = 2 # Permissions error
# enum ofp_queue_op_failed_code
OFPQOFC_BAD_PORT = 0 # Invalid port (or port does not exist).
OFPQOFC_BAD_QUEUE = 1 # Queue does not exist.
OFPQOFC_EPERM = 2 # Permissions error.
# enum ofp_switch_config_failed_code
OFPSCFC_BAD_FLAGS = 0 # Specified flags is invalid.
OFPSCFC_BAD_LEN = 1 # Specified len is invalid.
OFPQCFC_EPERM = 2 # Permissions error.
# enum ofp_role_request_failed_code
OFPRRFC_STALE = 0 # Stale Message: old generation_id.
OFPRRFC_UNSUP = 1 # Controller role change unsupported.
OFPRRFC_BAD_ROLE = 2 # Invalid role.
# enum ofp_meter_mod_failed_code
OFPMMFC_UNKNOWN = 0 # Unspecified error.
OFPMMFC_METER_EXISTS = 1 # Meter not added because a Meter ADD
# attempted to replace an existing Meter.
OFPMMFC_INVALID_METER = 2 # Meter not added because Meter specified
# is invalid.
OFPMMFC_UNKNOWN_METER = 3 # Meter not modified because a Meter
# MODIFY attempted to modify a non-existent
# Meter.
OFPMMFC_BAD_COMMAND = 4 # Unsupported or unknown command.
OFPMMFC_BAD_FLAGS = 5 # Flag configuration unsupported.
OFPMMFC_BAD_RATE = 6 # Rate unsupported.
OFPMMFC_BAD_BURST = 7 # Burst size unsupported.
OFPMMFC_BAD_BAND = 8 # Band unsupported.
OFPMMFC_BAD_BAND_VALUE = 9 # Band value unsupported.
OFPMMFC_OUT_OF_METERS = 10 # No more meters availabile.
OFPMMFC_OUT_OF_BANDS = 11 # The maximum number of properties
# for a meter has been exceeded.
# enum ofp_table_features_failed_code
OFPTFFC_BAD_TABLE = 0 # Specified table does not exist.
OFPTFFC_BAD_METADATA = 1 # Invalid metadata mask.
OFPTFFC_BAD_TYPE = 2 # Unknown property type.
OFPTFFC_BAD_LEN = 3 # Length problem in properties.
OFPTFFC_BAD_ARGUMENT = 4 # Unsupported property value.
OFPTFFC_EPERM = 5 # Permissions error.
# struct ofp_error_experimenter_msg
OFP_ERROR_EXPERIMENTER_MSG_PACK_STR = '!HHI'
OFP_ERROR_EXPERIMENTER_MSG_SIZE = 16
assert (calcsize(OFP_ERROR_EXPERIMENTER_MSG_PACK_STR) +
OFP_HEADER_SIZE) == OFP_ERROR_EXPERIMENTER_MSG_SIZE
# struct ofp_experimenter_header
OFP_EXPERIMENTER_HEADER_PACK_STR = '!II'
OFP_EXPERIMENTER_HEADER_SIZE = 16
assert (calcsize(OFP_EXPERIMENTER_HEADER_PACK_STR) + OFP_HEADER_SIZE
== OFP_EXPERIMENTER_HEADER_SIZE)
# exp_type values for OFPET_EXPERIMENTER (experimenter=ONF_EXPERIMENTER_ID)
ONFERR_ET_UNKNOWN = 2300
ONFERR_ET_EPERM = 2301
ONFERR_ET_BAD_ID = 2302
ONFERR_ET_BUNDLE_EXIST = 2303
ONFERR_ET_BUNDLE_CLOSED = 2304
ONFERR_ET_OUT_OF_BUNDLES = 2305
ONFERR_ET_BAD_TYPE = 2306
ONFERR_ET_BAD_FLAGS = 2307
ONFERR_ET_MSG_BAD_LEN = 2308
ONFERR_ET_MSG_BAD_XID = 2309
ONFERR_ET_MSG_UNSUP = 2310
ONFERR_ET_MSG_CONFLICT = 2311
ONFERR_ET_MSG_TOO_MANY = 2312
ONFERR_ET_FAILED = 2313
ONFERR_ET_TIMEOUT = 2314
ONFERR_ET_BUNDLE_IN_PROGRESS = 2315
ONFERR_ET_CANT_SYNC = 2320
ONFERR_ET_BAD_PRIORITY = 2360
ONFERR_ET_ASYNC_INVALUD = 2370
ONFERR_ET_ASYNC_UNSUPPORTED = 2371
ONFERR_ET_ASYNC_EPERM = 2372
ONFERR_DUP_INSTRUCTION = 2600 # the lack of _ET_ is per spec
ONFERR_ET_MPART_REQUEST_TIMEOUT = 2640
ONFERR_ET_MPART_REPLY_TIMEOUT = 2641
# struct ofp_hello
OFP_HELLO_HEADER_SIZE = 8
# struct ofp_hello_elem_header
OFP_HELLO_ELEM_HEADER_PACK_STR = '!HH'
OFP_HELLO_ELEM_HEADER_SIZE = 4
assert (calcsize(OFP_HELLO_ELEM_HEADER_PACK_STR) == OFP_HELLO_ELEM_HEADER_SIZE)
# enum ofp_hello_elem_type
OFPHET_VERSIONBITMAP = 1
# struct ofp_hello_elem_versionbitmap
OFP_HELLO_ELEM_VERSIONBITMAP_HEADER_PACK_STR = '!HH'
OFP_HELLO_ELEM_VERSIONBITMAP_HEADER_SIZE = 4
assert (calcsize(OFP_HELLO_ELEM_VERSIONBITMAP_HEADER_PACK_STR) ==
OFP_HELLO_ELEM_VERSIONBITMAP_HEADER_SIZE)
# OXM
def _oxm_tlv_header(class_, field, hasmask, length):
return (class_ << 16) | (field << 9) | (hasmask << 8) | length
def oxm_tlv_header(field, length):
return _oxm_tlv_header(OFPXMC_OPENFLOW_BASIC, field, 0, length)
def oxm_tlv_header_w(field, length):
return _oxm_tlv_header(OFPXMC_OPENFLOW_BASIC, field, 1, length * 2)
def oxm_tlv_header_extract_hasmask(header):
return (header >> 8) & 1
def oxm_tlv_header_extract_length(header):
if oxm_tlv_header_extract_hasmask(header):
length = (header & 0xff) / 2
else:
length = header & 0xff
return length
oxm_types = [
oxm_fields.OpenFlowBasic('in_port', 0, oxm_fields.Int4),
oxm_fields.OpenFlowBasic('in_phy_port', 1, oxm_fields.Int4),
oxm_fields.OpenFlowBasic('metadata', 2, oxm_fields.Int8),
oxm_fields.OpenFlowBasic('eth_dst', 3, oxm_fields.MacAddr),
oxm_fields.OpenFlowBasic('eth_src', 4, oxm_fields.MacAddr),
oxm_fields.OpenFlowBasic('eth_type', 5, oxm_fields.Int2),
oxm_fields.OpenFlowBasic('vlan_vid', 6, oxm_fields.Int2),
oxm_fields.OpenFlowBasic('vlan_pcp', 7, oxm_fields.Int1),
oxm_fields.OpenFlowBasic('ip_dscp', 8, oxm_fields.Int1),
oxm_fields.OpenFlowBasic('ip_ecn', 9, oxm_fields.Int1),
oxm_fields.OpenFlowBasic('ip_proto', 10, oxm_fields.Int1),
oxm_fields.OpenFlowBasic('ipv4_src', 11, oxm_fields.IPv4Addr),
oxm_fields.OpenFlowBasic('ipv4_dst', 12, oxm_fields.IPv4Addr),
oxm_fields.OpenFlowBasic('tcp_src', 13, oxm_fields.Int2),
oxm_fields.OpenFlowBasic('tcp_dst', 14, oxm_fields.Int2),
oxm_fields.OpenFlowBasic('udp_src', 15, oxm_fields.Int2),
oxm_fields.OpenFlowBasic('udp_dst', 16, oxm_fields.Int2),
oxm_fields.OpenFlowBasic('sctp_src', 17, oxm_fields.Int2),
oxm_fields.OpenFlowBasic('sctp_dst', 18, oxm_fields.Int2),
oxm_fields.OpenFlowBasic('icmpv4_type', 19, oxm_fields.Int1),
oxm_fields.OpenFlowBasic('icmpv4_code', 20, oxm_fields.Int1),
oxm_fields.OpenFlowBasic('arp_op', 21, oxm_fields.Int2),
oxm_fields.OpenFlowBasic('arp_spa', 22, oxm_fields.IPv4Addr),
oxm_fields.OpenFlowBasic('arp_tpa', 23, oxm_fields.IPv4Addr),
oxm_fields.OpenFlowBasic('arp_sha', 24, oxm_fields.MacAddr),
oxm_fields.OpenFlowBasic('arp_tha', 25, oxm_fields.MacAddr),
oxm_fields.OpenFlowBasic('ipv6_src', 26, oxm_fields.IPv6Addr),
oxm_fields.OpenFlowBasic('ipv6_dst', 27, oxm_fields.IPv6Addr),
oxm_fields.OpenFlowBasic('ipv6_flabel', 28, oxm_fields.Int4),
oxm_fields.OpenFlowBasic('icmpv6_type', 29, oxm_fields.Int1),
oxm_fields.OpenFlowBasic('icmpv6_code', 30, oxm_fields.Int1),
oxm_fields.OpenFlowBasic('ipv6_nd_target', 31, oxm_fields.IPv6Addr),
oxm_fields.OpenFlowBasic('ipv6_nd_sll', 32, oxm_fields.MacAddr),
oxm_fields.OpenFlowBasic('ipv6_nd_tll', 33, oxm_fields.MacAddr),
oxm_fields.OpenFlowBasic('mpls_label', 34, oxm_fields.Int4),
oxm_fields.OpenFlowBasic('mpls_tc', 35, oxm_fields.Int1),
oxm_fields.OpenFlowBasic('mpls_bos', 36, oxm_fields.Int1),
oxm_fields.OpenFlowBasic('pbb_isid', 37, oxm_fields.Int3),
oxm_fields.OpenFlowBasic('tunnel_id', 38, oxm_fields.Int8),
oxm_fields.OpenFlowBasic('ipv6_exthdr', 39, oxm_fields.Int2),
oxm_fields.ONFExperimenter('pbb_uca', 2560, oxm_fields.Int1),
oxm_fields.NiciraExtended1('tun_ipv4_src', 31, oxm_fields.IPv4Addr),
oxm_fields.NiciraExtended1('tun_ipv4_dst', 32, oxm_fields.IPv4Addr),
]
oxm_fields.generate(__name__)
# define constants
OFP_VERSION = 0x04
OFP_TCP_PORT = 6633
MAX_XID = 0xffffffff
OFP_NO_BUFFER = 0xffffffff
| apache-2.0 |
marcomg/loggenerator | libs/fileHandler.py | 1 | 1212 | # class for handling standard operations against a file
import os
import loggenerator
import constants
import re
class FileOps():
def __init__(self, filename):
self.filename = filename
self.f = ''
def open(self):
self.f = open(self.filename, 'w')
def write(self):
self.f.write(self.hide())
def close(self):
self.f.close
# Set permissions to 666
def chmod(self):
os.chmod(self.filename, 666)
# hide away the username and hostname from log file
# only whether username/hostname aren't in constants.passList list
def hide(self):
myloggenerator = loggenerator.functions()
myvar = myloggenerator.getLogFile()
if constants.utente not in constants.passList:
myvar = re.sub(r'\b' + constants.utente + r'\b', 'nomeutente', myvar)
if constants.nomehost not in constants.passList:
myvar = re.sub(r'\b' + constants.nomehost + r'\b', 'nomehost', myvar)
return myvar
def go(self):
self.open()
self.write()
self.close()
self.chmod()
| gpl-3.0 |
osiell/server-tools | mass_editing/wizard/__init__.py | 62 | 1052 | # -*- coding: utf-8 -*-
##############################################################################
#
# This module uses OpenERP, Open Source Management Solution Framework.
# Copyright (C):
# 2012-Today Serpent Consulting Services (<http://www.serpentcs.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from . import mass_editing_wizard
| agpl-3.0 |
netarchy/android-git-kernel | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
urandu/rethinkdb | external/v8_3.30.33.16/build/gyp/test/hello/gyptest-regyp-output.py | 202 | 1077 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that Makefiles get rebuilt when a source gyp file changes and
--generator-output is used.
"""
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make and Android generators, and --generator-output is not supported
# by Android and ninja, so we can only test for make.
test = TestGyp.TestGyp(formats=['make'])
CHDIR='generator-output'
test.run_gyp('hello.gyp', '--generator-output=%s' % CHDIR)
test.build('hello.gyp', test.ALL, chdir=CHDIR)
test.run_built_executable('hello', stdout="Hello, world!\n", chdir=CHDIR)
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
test.write('hello.gyp', test.read('hello2.gyp'))
test.build('hello.gyp', test.ALL, chdir=CHDIR)
test.run_built_executable('hello', stdout="Hello, two!\n", chdir=CHDIR)
test.pass_test()
| agpl-3.0 |
cdondrup/pepper_planning | pepper_engage_human/scripts/find_interactant.py | 1 | 1709 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
from actionlib import SimpleActionClient
from pnp_plugin_server.pnp_simple_plugin_server import PNPSimplePluginServer
from pepper_engage_human.msg import FindInteractantAction, FindInteractantResult
from pnp_msgs.msg import ActionResult
from pepper_move_base.msg import TrackPersonAction, TrackPersonGoal
class FindInteractant(object):
def __init__(self, name):
rospy.loginfo("Starting %s ..." % name)
self._ps = PNPSimplePluginServer(
name=name,
ActionSpec=FindInteractantAction,
execute_cb=self.execute_cb,
auto_start=False
)
rospy.loginfo("Creating tracker client")
self.start_client = SimpleActionClient("/start_tracking_person", TrackPersonAction)
self.start_client.wait_for_server()
rospy.loginfo("Tracker client connected")
self._ps.start()
rospy.loginfo("... done")
def execute_cb(self, goal):
rospy.loginfo("Finding interactant '%s'" % (goal.interactant_id,))
self.start_client.send_goal(TrackPersonGoal(id=goal.id, interactant_id=goal.interactant_id, no_turn=True))
res = FindInteractantResult()
res.result.append(ActionResult(cond="found_interactant__"+goal.interactant_id+"__"+goal.id, truth_value=True))
res.result.append(ActionResult(cond="free_interactant_id__"+goal.interactant_id, truth_value=False))
if self._ps.is_preempt_requested():
self._ps.set_preempted()
else:
self._ps.set_succeeded(res)
if __name__ == "__main__":
rospy.init_node("find_interactant")
FindInteractant(rospy.get_name())
rospy.spin()
| mit |
repotvsupertuga/repo | plugin.video.TVsupertuga/resources/lib/modules/dom_parser.py | 8 | 5193 | """
Based on Parsedom for XBMC plugins
Copyright (C) 2010-2011 Tobias Ussing And Henrik Mosgaard Jensen
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from collections import namedtuple
DomMatch = namedtuple('DOMMatch', ['attrs', 'content'])
re_type = type(re.compile(''))
def __get_dom_content(html, name, match):
if match.endswith('/>'): return ''
# override tag name with tag from match if possible
tag = re.match('<([^\s/>]+)', match)
if tag: name = tag.group(1)
start_str = '<%s' % name
end_str = "</%s" % name
# start/end tags without matching case cause issues
start = html.find(match)
end = html.find(end_str, start)
pos = html.find(start_str, start + 1)
while pos < end and pos != -1: # Ignore too early </endstr> return
tend = html.find(end_str, end + len(end_str))
if tend != -1:
end = tend
pos = html.find(start_str, pos + 1)
if start == -1 and end == -1:
result = ''
elif start > -1 and end > -1:
result = html[start + len(match):end]
elif end > -1:
result = html[:end]
elif start > -1:
result = html[start + len(match):]
else:
result = ''
return result
def __get_dom_elements(item, name, attrs):
if not attrs:
pattern = '(<%s(?:\s[^>]*>|/?>))' % name
this_list = re.findall(pattern, item, re.M | re.S | re.I)
else:
last_list = None
for key, value in attrs.iteritems():
value_is_regex = isinstance(value, re_type)
value_is_str = isinstance(value, basestring)
pattern = '''(<{tag}[^>]*\s{key}=(?P<delim>['"])(.*?)(?P=delim)[^>]*>)'''.format(tag=name, key=key)
re_list = re.findall(pattern, item, re.M | re.S | re.I)
if value_is_regex:
this_list = [r[0] for r in re_list if re.match(value, r[2])]
else:
temp_value = [value] if value_is_str else value
this_list = [r[0] for r in re_list if set(temp_value) <= set(r[2].split(' '))]
if not this_list:
has_space = (value_is_regex and ' ' in value.pattern) or (value_is_str and ' ' in value)
if not has_space:
pattern = '''(<{tag}[^>]*\s{key}=([^\s/>]*)[^>]*>)'''.format(tag=name, key=key)
re_list = re.findall(pattern, item, re.M | re.S | re.I)
if value_is_regex:
this_list = [r[0] for r in re_list if re.match(value, r[1])]
else:
this_list = [r[0] for r in re_list if value == r[1]]
if last_list is None:
last_list = this_list
else:
last_list = [item for item in this_list if item in last_list]
this_list = last_list
return this_list
def __get_attribs(element):
attribs = {}
for match in re.finditer('''\s+(?P<key>[^=]+)=\s*(?:(?P<delim>["'])(?P<value1>.*?)(?P=delim)|(?P<value2>[^"'][^>\s]*))''', element):
match = match.groupdict()
value1 = match.get('value1')
value2 = match.get('value2')
value = value1 if value1 is not None else value2
if value is None: continue
attribs[match['key'].lower().strip()] = value
return attribs
def parse_dom(html, name='', attrs=None, req=False):
if attrs is None: attrs = {}
name = name.strip()
if isinstance(html, unicode) or isinstance(html, DomMatch):
html = [html]
elif isinstance(html, str):
try:
html = [html.decode("utf-8")] # Replace with chardet thingy
except:
try:
html = [html.decode("utf-8", "replace")]
except:
html = [html]
elif not isinstance(html, list):
return ''
if not name:
return ''
if not isinstance(attrs, dict):
return ''
if req:
if not isinstance(req, list):
req = [req]
req = set([key.lower() for key in req])
all_results = []
for item in html:
if isinstance(item, DomMatch):
item = item.content
results = []
for element in __get_dom_elements(item, name, attrs):
attribs = __get_attribs(element)
if req and not req <= set(attribs.keys()): continue
temp = __get_dom_content(item, name, element).strip()
results.append(DomMatch(attribs, temp))
item = item[item.find(temp, item.find(element)):]
all_results += results
return all_results
| gpl-2.0 |
vberaudi/scipy | scipy/special/tests/test_spfun_stats.py | 127 | 2127 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_equal, TestCase, run_module_suite, \
assert_array_almost_equal_nulp, assert_raises, assert_almost_equal
from scipy.special import gammaln, multigammaln
class TestMultiGammaLn(TestCase):
def test1(self):
# A test of the identity
# Gamma_1(a) = Gamma(a)
np.random.seed(1234)
a = np.abs(np.random.randn())
assert_array_equal(multigammaln(a, 1), gammaln(a))
def test2(self):
# A test of the identity
# Gamma_2(a) = sqrt(pi) * Gamma(a) * Gamma(a - 0.5)
a = np.array([2.5, 10.0])
result = multigammaln(a, 2)
expected = np.log(np.sqrt(np.pi)) + gammaln(a) + gammaln(a - 0.5)
assert_almost_equal(result, expected)
def test_bararg(self):
assert_raises(ValueError, multigammaln, 0.5, 1.2)
def _check_multigammaln_array_result(a, d):
# Test that the shape of the array returned by multigammaln
# matches the input shape, and that all the values match
# the value computed when multigammaln is called with a scalar.
result = multigammaln(a, d)
assert_array_equal(a.shape, result.shape)
a1 = a.ravel()
result1 = result.ravel()
for i in range(a.size):
assert_array_almost_equal_nulp(result1[i], multigammaln(a1[i], d))
def test_multigammaln_array_arg():
# Check that the array returned by multigammaln has the correct
# shape and contains the correct values. The cases have arrays
# with several differnent shapes.
# The cases include a regression test for ticket #1849
# (a = np.array([2.0]), an array with a single element).
np.random.seed(1234)
cases = [
# a, d
(np.abs(np.random.randn(3, 2)) + 5, 5),
(np.abs(np.random.randn(1, 2)) + 5, 5),
(np.arange(10.0, 18.0).reshape(2, 2, 2), 3),
(np.array([2.0]), 3),
(np.float64(2.0), 3),
]
for a, d in cases:
yield _check_multigammaln_array_result, a, d
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
smoser/granite | granite/virt/lxc/utils.py | 1 | 1628 | # Copyright (c) 2014 Canonical ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo.config import cfg
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def get_container_rootfs(instance):
return os.path.join(CONF.instances_path, instance['uuid'], 'rootfs')
def get_container_config(instance):
return os.path.join(CONF.instances_path, instance['uuid'], 'config')
def get_container_console(instance):
return os.path.join(CONF.instances_path, instance['uuid'],
'container.console')
def get_container_logfile(instance):
return os.path.join(CONF.instances_path, instance['uuid'],
'container.logifle')
def get_instance_path(instance):
return os.path.join(CONF.instances_path, instance['uuid'])
def get_disk_format(image_meta):
return image_meta.get('disk_format')
def parse_idmap(map_string):
mappings = []
mappings = map_string.split(':')
return mappings
| apache-2.0 |
KokareIITP/django | tests/template_tests/filter_tests/test_wordwrap.py | 324 | 1666 | from django.template.defaultfilters import wordwrap
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class WordwrapTests(SimpleTestCase):
@setup({'wordwrap01':
'{% autoescape off %}{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}{% endautoescape %}'})
def test_wordwrap01(self):
output = self.engine.render_to_string('wordwrap01', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, 'a &\nb a &\nb')
@setup({'wordwrap02': '{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}'})
def test_wordwrap02(self):
output = self.engine.render_to_string('wordwrap02', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, 'a &\nb a &\nb')
class FunctionTests(SimpleTestCase):
def test_wrap(self):
self.assertEqual(
wordwrap('this is a long paragraph of text that really needs to be wrapped I\'m afraid', 14),
'this is a long\nparagraph of\ntext that\nreally needs\nto be wrapped\nI\'m afraid',
)
def test_indent(self):
self.assertEqual(
wordwrap('this is a short paragraph of text.\n But this line should be indented', 14),
'this is a\nshort\nparagraph of\ntext.\n But this\nline should be\nindented',
)
def test_indent2(self):
self.assertEqual(
wordwrap('this is a short paragraph of text.\n But this line should be indented', 15),
'this is a short\nparagraph of\ntext.\n But this line\nshould be\nindented',
)
def test_non_string_input(self):
self.assertEqual(wordwrap(123, 2), '123')
| bsd-3-clause |
passiweinberger/passiweinberger.github.io | presentations/HTM_Intro/node_modules/node-gyp/gyp/pylib/gyp/ordered_dict.py | 2354 | 10366 | # Unmodified from http://code.activestate.com/recipes/576693/
# other than to add MIT license header (as specified on page, but not in code).
# Linked from Python documentation here:
# http://docs.python.org/2/library/collections.html#collections.OrderedDict
#
# This should be deleted once Py2.7 is available on all bots, see
# http://crbug.com/241769.
#
# Copyright (c) 2009 Raymond Hettinger.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
# Suppress 'OrderedDict.update: Method has no argument':
# pylint: disable=E0211
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| apache-2.0 |
oVirt/vdsm | tests/functional/utils.py | 2 | 7730 | #
# Copyright 2013-2017 Red Hat, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
from __future__ import division
from contextlib import contextmanager
from functools import wraps
import inspect
import six
import socket
import time
import threading
from vdsm.config import config
from vdsm.common.function import retry
from vdsm import jsonrpcvdscli
from vdsm.network import ipwrapper
from vdsm.network.netconfpersistence import RunningConfig
from vdsm.network.netinfo.cache import CachingNetInfo
from vdsm.network.restore_net_config import restore
SUCCESS = 0
def cleanupRules(func):
"""
Restores previous routing rules
in case of a test failure, traceback is kept.
Assumes root privileges.
"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
base = ipwrapper.ruleList()
func(*args, **kwargs)
except Exception:
restoreRules(base)
raise
return wrapper
def restoreRules(base):
current = ipwrapper.ruleList()
added = set(current) - set(base)
for rule in added:
ipwrapper.ruleDel(ipwrapper.Rule.fromText(rule))
class _VdsProxy(object):
"""
Vdscli wrapper to save tests
from common boilerplate code.
"""
def __init__(self):
self.vdscli = None
def _is_connected(self):
return self.vdscli is not None
def _connect(self):
retry(self.start, (socket.error, KeyError), tries=30)
def start(self):
requestQueues = config.get('addresses', 'request_queues')
requestQueue = requestQueues.split(",")[0]
self.vdscli = jsonrpcvdscli.connect(requestQueue, xml_compat=False)
self.netinfo = self._get_netinfo()
self.config = RunningConfig()
def __getattr__(self, attr):
"""
When accessing nonexistant attribute it is looked up in self.vdscli
and usual tuple
(result['status']['code'], result['status']['message'])
is returned
"""
if hasattr(self.vdscli, attr):
def wrapper(*args, **kwargs):
result = getattr(self.vdscli, attr)(*args, **kwargs)
return _parse_result(result)
return wrapper
raise AttributeError(attr)
def netinfo_altering(func):
"""Updates the cached information that might have been altered by an
api call that has side-effects on the server."""
@wraps(func)
def call_and_update(self, *args, **kwargs):
ret = func(self, *args, **kwargs)
self.netinfo = self._get_netinfo()
if self.config is not None:
self.config = RunningConfig()
return ret
return call_and_update
def _get_netinfo(self):
response = self.getVdsCapabilities()
try:
return CachingNetInfo(response[2])
except IndexError:
raise Exception('VdsProxy: getVdsCapabilities failed. '
'code:%s msg:%s' % (response[0], response[1]))
def _get_net_args(self, vlan, bond, nics, opts):
if vlan is None:
vlan = ''
if bond is None:
bond = ''
if nics is None:
nics = ''
if opts is None:
opts = {}
return [vlan, bond, nics, opts]
def save_config(self):
self.vdscli.setSafeNetworkConfig()
def refreshNetworkCapabilities(self):
self.refreshNetinfo()
@netinfo_altering
def refreshNetinfo(self):
pass
@netinfo_altering
def restoreNetConfig(self):
restore(force=True)
@netinfo_altering
def setupNetworks(self, networks, bonds, options):
stack = inspect.stack()
# add calling method for logs
test_method, code_line = stack[3][3], stack[3][2]
options['_caller'] = '{}:{}'.format(test_method, code_line)
result = self.vdscli.setupNetworks(networks, bonds, options,
_transport_timeout=90)
return _parse_result(result)
def _vlanInRunningConfig(self, devName, vlanId):
for attrs in six.itervalues(self.config.networks):
if (int(vlanId) == attrs.get('vlan') and
(attrs.get('bonding') == devName or
attrs.get('nic') == devName)):
return True
return False
def getMtu(self, name):
if name in self.netinfo.networks:
return self.netinfo.networks[name]['mtu']
elif name in self.netinfo.vlans:
return self.netinfo.vlans[name]['mtu']
elif name in self.netinfo.bondings:
return self.netinfo.bondings[name]['mtu']
elif name in self.netinfo.nics:
return self.netinfo.nics[name]['mtu']
return None
@contextmanager
def pinger(self):
"""Keeps pinging vdsm for operations that need it"""
def ping():
while not done:
# TODO: ping is deprecated, use confirmConnectivity instead
self.vdscli.ping()
time.sleep(1)
try:
done = False
pinger_thread = threading.Thread(target=ping)
pinger_thread.start()
yield
except Exception:
raise
finally:
done = True
def getVdsStats(self):
result = self.vdscli.getVdsStats()
return _parse_result(result, 'info')
def getAllVmStats(self):
result = self.vdscli.getAllVmStats()
return _parse_result(result, 'statsList')
def getVmStats(self, vmId):
result = self.vdscli.getVmStats(vmId)
if 'result' or 'statsList' in result:
code, msg, stats = _parse_result(result, 'statsList')
return code, msg, stats[0]
else:
return _parse_result(result)
def getVmList(self, vmId):
result = self.vdscli.fullList([vmId])
code, msg, vm_list = _parse_result(result, 'vmList')
return code, msg, vm_list[0]
def getVdsCapabilities(self):
result = self.vdscli.getVdsCapabilities()
return _parse_result(result, 'info')
def updateVmPolicy(self, vmId, vcpuLimit):
result = self.vdscli.updateVmPolicy([vmId, vcpuLimit])
return _parse_result(result)
_instance = _VdsProxy()
def getProxy(reconnect=False):
"""
We used to connect when a proxy was created but now
we want to connect only when the proxy is needed.
It is used in functional test context so we do not
care about concurrent calls of this function.
"""
if not _instance._is_connected() or reconnect:
_instance._connect()
return _instance
def _parse_result(result, return_value=None):
status = result['status']
code = status['code']
msg = status['message']
if code == SUCCESS and return_value:
return code, msg, result.get('result', {})
else:
return code, msg
| gpl-2.0 |
Azure/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2015_06_15/operations/_virtual_machine_sizes_operations.py | 1 | 5382 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineSizesOperations(object):
"""VirtualMachineSizesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2015_06_15.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualMachineSizeListResult"]
"""Lists all available virtual machine sizes for a subscription in a location.
:param location: The location upon which virtual-machine-sizes is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineSizeListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2015_06_15.models.VirtualMachineSizeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineSizeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualMachineSizeListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/vmSizes'} # type: ignore
| mit |
kevin-coder/tensorflow-fork | tensorflow/contrib/receptive_field/python/util/receptive_field.py | 13 | 17090 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to compute receptive field of a fully-convolutional network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.receptive_field.python.util import graph_compute_order
from tensorflow.contrib.receptive_field.python.util import parse_layer_parameters
from tensorflow.python.framework import ops as framework_ops
from tensorflow.python.platform import tf_logging as logging
def _get_rf_size_node_input(stride, kernel_size, rf_size_output):
"""Computes RF size at the input of a given layer.
Args:
stride: Stride of given layer (integer).
kernel_size: Kernel size of given layer (integer).
rf_size_output: RF size at output of given layer (integer).
Returns:
rf_size_input: RF size at input of given layer (integer).
"""
return stride * rf_size_output + kernel_size - stride
def _get_effective_stride_node_input(stride, effective_stride_output):
"""Computes effective stride at the input of a given layer.
Args:
stride: Stride of given layer (integer).
effective_stride_output: Effective stride at output of given layer
(integer).
Returns:
effective_stride_input: Effective stride at input of given layer
(integer).
"""
return stride * effective_stride_output
def _get_effective_padding_node_input(stride, padding,
effective_padding_output):
"""Computes effective padding at the input of a given layer.
Args:
stride: Stride of given layer (integer).
padding: Padding of given layer (integer).
effective_padding_output: Effective padding at output of given layer
(integer).
Returns:
effective_padding_input: Effective padding at input of given layer
(integer).
"""
return stride * effective_padding_output + padding
class ReceptiveField(object):
"""Receptive field of a convolutional neural network.
Args:
size: Receptive field size.
stride: Effective stride.
padding: Effective padding.
"""
def __init__(self, size, stride, padding):
self.size = np.asarray(size)
self.stride = np.asarray(stride)
self.padding = np.asarray(padding)
def compute_input_center_coordinates(self, y, axis=None):
"""Computes the center of the receptive field that generated a feature.
Args:
y: An array of feature coordinates with shape `(..., d)`, where `d` is the
number of dimensions of the coordinates.
axis: The dimensions for which to compute the input center coordinates. If
`None` (the default), compute the input center coordinates for all
dimensions.
Returns:
x: Center of the receptive field that generated the features, at the input
of the network.
Raises:
ValueError: If the number of dimensions of the feature coordinates does
not match the number of elements in `axis`.
"""
# Use all dimensions.
if axis is None:
axis = range(self.size.size)
# Ensure axis is a list because tuples have different indexing behavior.
axis = list(axis)
y = np.asarray(y)
if y.shape[-1] != len(axis):
raise ValueError("Dimensionality of the feature coordinates `y` (%d) "
"does not match dimensionality of `axis` (%d)" %
(y.shape[-1], len(axis)))
return -self.padding[axis] + y * self.stride[axis] + (
self.size[axis] - 1) / 2
def compute_feature_coordinates(self, x, axis=None):
"""Computes the position of a feature given the center of a receptive field.
Args:
x: An array of input center coordinates with shape `(..., d)`, where `d`
is the number of dimensions of the coordinates.
axis: The dimensions for which to compute the feature coordinates. If
`None` (the default), compute the feature coordinates for all
dimensions.
Returns:
y: Coordinates of the features.
Raises:
ValueError: If the number of dimensions of the input center coordinates
does not match the number of elements in `axis`.
"""
# Use all dimensions.
if axis is None:
axis = range(self.size.size)
# Ensure axis is a list because tuples have different indexing behavior.
axis = list(axis)
x = np.asarray(x)
if x.shape[-1] != len(axis):
raise ValueError("Dimensionality of the input center coordinates `x` "
"(%d) does not match dimensionality of `axis` (%d)" %
(x.shape[-1], len(axis)))
return (x + self.padding[axis] +
(1 - self.size[axis]) / 2) / self.stride[axis]
def __iter__(self):
return iter(np.concatenate([self.size, self.stride, self.padding]))
def compute_receptive_field_from_graph_def(graph_def,
input_node,
output_node,
stop_propagation=None,
input_resolution=None):
"""Computes receptive field (RF) parameters from a Graph or GraphDef object.
The algorithm stops the calculation of the receptive field whenever it
encounters an operation in the list `stop_propagation`. Stopping the
calculation early can be useful to calculate the receptive field of a
subgraph such as a single branch of the
[inception network](https://arxiv.org/abs/1512.00567).
Args:
graph_def: Graph or GraphDef object.
input_node: Name of the input node or Tensor object from graph.
output_node: Name of the output node or Tensor object from graph.
stop_propagation: List of operations or scope names for which to stop the
propagation of the receptive field.
input_resolution: 2D list. If the input resolution to the model is fixed and
known, this may be set. This is helpful for cases where the RF parameters
vary depending on the input resolution (this happens since SAME padding in
tensorflow depends on input resolution in general). If this is None, it is
assumed that the input resolution is unknown, so some RF parameters may be
unknown (depending on the model architecture).
Returns:
rf_size_x: Receptive field size of network in the horizontal direction, with
respect to specified input and output.
rf_size_y: Receptive field size of network in the vertical direction, with
respect to specified input and output.
effective_stride_x: Effective stride of network in the horizontal direction,
with respect to specified input and output.
effective_stride_y: Effective stride of network in the vertical direction,
with respect to specified input and output.
effective_padding_x: Effective padding of network in the horizontal
direction, with respect to specified input and output.
effective_padding_y: Effective padding of network in the vertical
direction, with respect to specified input and output.
Raises:
ValueError: If network is not aligned or if either input or output nodes
cannot be found. For network criterion alignment, see
photos/vision/features/delf/g3doc/rf_computation.md
"""
# Convert a graph to graph_def if necessary.
if isinstance(graph_def, framework_ops.Graph):
graph_def = graph_def.as_graph_def()
# Convert tensors to names.
if isinstance(input_node, framework_ops.Tensor):
input_node = input_node.op.name
if isinstance(output_node, framework_ops.Tensor):
output_node = output_node.op.name
stop_propagation = stop_propagation or []
# Computes order of computation for a given graph.
node_info, name_to_node = graph_compute_order.get_compute_order(
graph_def=graph_def,
input_node_name=input_node,
input_node_size=input_resolution)
# Sort in reverse topological order.
ordered_node_info = sorted(node_info.items(), key=lambda x: -x[1].order)
# Dictionaries to keep track of receptive field, effective stride and
# effective padding of different nodes.
rf_sizes_x = {}
rf_sizes_y = {}
effective_strides_x = {}
effective_strides_y = {}
effective_paddings_x = {}
effective_paddings_y = {}
# Initialize dicts for output_node.
rf_sizes_x[output_node] = 1
rf_sizes_y[output_node] = 1
effective_strides_x[output_node] = 1
effective_strides_y[output_node] = 1
effective_paddings_x[output_node] = 0
effective_paddings_y[output_node] = 0
# Flag to denote if we found output node yet. If we have not, we skip nodes
# until the output node is found.
found_output_node = False
# Flag to denote if padding is undefined. This happens when SAME padding mode
# is used in conjunction with stride and kernel sizes which make it such that
# the padding to be applied would depend on the input size. In this case,
# alignment checks are skipped, and the effective padding is None.
undefined_padding = False
for _, (o, node, _, _) in ordered_node_info:
if node:
logging.vlog(3, "%10d %-100s %-20s" % (o, node.name[:90], node.op))
else:
continue
# When we find input node, we can stop.
if node.name == input_node:
break
# Loop until we find the output node. All nodes before finding the output
# one are irrelevant, so they can be skipped.
if not found_output_node:
if node.name == output_node:
found_output_node = True
if found_output_node:
if node.name not in rf_sizes_x:
assert node.name not in rf_sizes_y, ("Node %s is in rf_sizes_y, but "
"not in rf_sizes_x" % node.name)
# In this case, node is not relevant since it's not part of the
# computation we're interested in.
logging.vlog(3, "Irrelevant node %s, skipping it...", node.name)
continue
# Get params for this layer.
(kernel_size_x, kernel_size_y, stride_x, stride_y, padding_x, padding_y,
_, _) = parse_layer_parameters.get_layer_params(
node, name_to_node, node_info[node.name].input_size)
logging.vlog(
3, "kernel_size_x = %s, kernel_size_y = %s, "
"stride_x = %s, stride_y = %s, "
"padding_x = %s, padding_y = %s, input size = %s" %
(kernel_size_x, kernel_size_y, stride_x, stride_y, padding_x,
padding_y, node_info[node.name].input_size))
if padding_x is None or padding_y is None:
undefined_padding = True
# Get parameters at input of this layer which may or may not be propagated
# to the input layers.
rf_size_input_x = _get_rf_size_node_input(stride_x, kernel_size_x,
rf_sizes_x[node.name])
rf_size_input_y = _get_rf_size_node_input(stride_y, kernel_size_y,
rf_sizes_y[node.name])
effective_stride_input_x = _get_effective_stride_node_input(
stride_x, effective_strides_x[node.name])
effective_stride_input_y = _get_effective_stride_node_input(
stride_y, effective_strides_y[node.name])
if not undefined_padding:
effective_padding_input_x = _get_effective_padding_node_input(
stride_x, padding_x, effective_paddings_x[node.name])
effective_padding_input_y = _get_effective_padding_node_input(
stride_y, padding_y, effective_paddings_y[node.name])
else:
effective_padding_input_x = None
effective_padding_input_y = None
logging.vlog(
4, "rf_size_input_x = %s, rf_size_input_y = %s, "
"effective_stride_input_x = %s, effective_stride_input_y = %s, "
"effective_padding_input_x = %s, effective_padding_input_y = %s" %
(rf_size_input_x, rf_size_input_y, effective_stride_input_x,
effective_stride_input_y, effective_padding_input_x,
effective_padding_input_y))
# Loop over this node's inputs and potentially propagate information down.
for inp_name in node.input:
# Stop the propagation of the receptive field.
if any(inp_name.startswith(stop) for stop in stop_propagation):
logging.vlog(3, "Skipping explicitly ignored node %s.", inp_name)
continue
logging.vlog(4, "inp_name = %s", inp_name)
if inp_name.startswith("^"):
# The character "^" denotes a control dependency, so this input node
# can be safely ignored.
continue
inp_node = name_to_node[inp_name]
logging.vlog(4, "inp_node = \n%s", inp_node)
if inp_name in rf_sizes_x:
assert inp_name in rf_sizes_y, ("Node %s is in rf_sizes_x, but "
"not in rf_sizes_y" % inp_name)
logging.vlog(
4, "rf_sizes_x[inp_name] = %s,"
" rf_sizes_y[inp_name] = %s, "
"effective_strides_x[inp_name] = %s,"
" effective_strides_y[inp_name] = %s, "
"effective_paddings_x[inp_name] = %s,"
" effective_paddings_y[inp_name] = %s" %
(rf_sizes_x[inp_name], rf_sizes_y[inp_name],
effective_strides_x[inp_name], effective_strides_y[inp_name],
effective_paddings_x[inp_name], effective_paddings_y[inp_name]))
# This node was already discovered through a previous path, so we need
# to make sure that graph is aligned. This alignment check is skipped
# if the padding is not defined, since in this case alignment cannot
# be checked.
if not undefined_padding:
if effective_strides_x[inp_name] != effective_stride_input_x:
raise ValueError(
"Graph is not aligned since effective stride from different "
"paths is different in horizontal direction")
if effective_strides_y[inp_name] != effective_stride_input_y:
raise ValueError(
"Graph is not aligned since effective stride from different "
"paths is different in vertical direction")
if (rf_sizes_x[inp_name] -
1) / 2 - effective_paddings_x[inp_name] != (
rf_size_input_x - 1) / 2 - effective_padding_input_x:
raise ValueError(
"Graph is not aligned since center shift from different "
"paths is different in horizontal direction")
if (rf_sizes_y[inp_name] -
1) / 2 - effective_paddings_y[inp_name] != (
rf_size_input_y - 1) / 2 - effective_padding_input_y:
raise ValueError(
"Graph is not aligned since center shift from different "
"paths is different in vertical direction")
# Keep track of path with largest RF, for both directions.
if rf_sizes_x[inp_name] < rf_size_input_x:
rf_sizes_x[inp_name] = rf_size_input_x
effective_strides_x[inp_name] = effective_stride_input_x
effective_paddings_x[inp_name] = effective_padding_input_x
if rf_sizes_y[inp_name] < rf_size_input_y:
rf_sizes_y[inp_name] = rf_size_input_y
effective_strides_y[inp_name] = effective_stride_input_y
effective_paddings_y[inp_name] = effective_padding_input_y
else:
assert inp_name not in rf_sizes_y, ("Node %s is in rf_sizes_y, but "
"not in rf_sizes_x" % inp_name)
# In this case, it is the first time we encounter this node. So we
# propagate the RF parameters.
rf_sizes_x[inp_name] = rf_size_input_x
rf_sizes_y[inp_name] = rf_size_input_y
effective_strides_x[inp_name] = effective_stride_input_x
effective_strides_y[inp_name] = effective_stride_input_y
effective_paddings_x[inp_name] = effective_padding_input_x
effective_paddings_y[inp_name] = effective_padding_input_y
if not found_output_node:
raise ValueError("Output node was not found")
if input_node not in rf_sizes_x:
raise ValueError("Input node was not found")
return ReceptiveField(
(rf_sizes_x[input_node], rf_sizes_y[input_node]),
(effective_strides_x[input_node], effective_strides_y[input_node]),
(effective_paddings_x[input_node], effective_paddings_y[input_node]))
| apache-2.0 |
nicholasserra/sentry | src/sentry/web/frontend/project_release_tracking.py | 3 | 4046 | from __future__ import absolute_import
from hashlib import sha256
import hmac
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from uuid import uuid1
from sentry import constants
from sentry.models import ProjectOption
from sentry.plugins import plugins, ReleaseTrackingPlugin
from sentry.utils.http import absolute_uri
from sentry.web.frontend.base import ProjectView
OK_TOKEN_REGENERATED = _("Your deploy token has been regenerated. You will need to update any pre-existing deploy hooks.")
ERR_NO_FEATURE = _('The release tracking feature is not enabled for this project.')
class ProjectReleaseTrackingView(ProjectView):
required_scope = 'project:write'
def _iter_plugins(self):
for plugin in plugins.all(version=2):
if not isinstance(plugin, ReleaseTrackingPlugin):
continue
yield plugin
def _handle_enable_plugin(self, request, project):
plugin = plugins.get(request.POST['plugin'])
plugin.enable(project)
messages.add_message(
request, messages.SUCCESS,
constants.OK_PLUGIN_ENABLED.format(name=plugin.get_title()),
)
def _handle_disable_plugin(self, request, project):
plugin = plugins.get(request.POST['plugin'])
plugin.disable(project)
messages.add_message(
request, messages.SUCCESS,
constants.OK_PLUGIN_DISABLED.format(name=plugin.get_title()),
)
def _regenerate_token(self, project):
token = uuid1().hex
ProjectOption.objects.set_value(project, 'sentry:release-token', token)
return token
def _get_signature(self, project_id, plugin_id, token):
return hmac.new(
key=str(token),
msg='{}-{}'.format(plugin_id, project_id),
digestmod=sha256
).hexdigest()
def handle(self, request, organization, team, project):
token = None
if request.method == 'POST':
op = request.POST.get('op')
if op == 'regenerate-token':
token = self._regenerate_token(project)
messages.add_message(
request, messages.SUCCESS,
OK_TOKEN_REGENERATED,
)
elif op == 'enable':
self._handle_enable_plugin(request, project)
elif op == 'disable':
self._handle_disable_plugin(request, project)
return HttpResponseRedirect(request.path)
if token is None:
token = ProjectOption.objects.get_value(project, 'sentry:release-token')
if token is None:
token = self._regenerate_token(project)
enabled_plugins = []
other_plugins = []
for plugin in self._iter_plugins():
if plugin.is_enabled(project):
hook_url = absolute_uri(reverse('sentry-release-hook', kwargs={
'plugin_id': plugin.slug,
'project_id': project.id,
'signature': self._get_signature(project.id, plugin.slug, token),
}))
content = plugin.get_release_doc_html(hook_url=hook_url)
enabled_plugins.append((plugin, mark_safe(content)))
elif plugin.can_configure_for_project(project):
other_plugins.append(plugin)
context = {
'page': 'release-tracking',
'token': token,
'enabled_plugins': enabled_plugins,
'other_plugins': other_plugins,
'webhook_url': absolute_uri(reverse('sentry-release-hook', kwargs={
'plugin_id': 'builtin',
'project_id': project.id,
'signature': self._get_signature(project.id, 'builtin', token),
}))
}
return self.respond('sentry/project-release-tracking.html', context)
| bsd-3-clause |
bclay/teamtechdraft | pygithub3/services/issues/labels.py | 8 | 6175 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from pygithub3.services.base import Service
class Labels(Service):
""" Consume `Labels API
<http://developer.github.com/v3/issues/labels>`_ """
def list(self, user=None, repo=None):
""" Get repository's labels
:param str user: Username
:param str repo: Repository
:returns: A :doc:`result`
.. note::
Remember :ref:`config precedence`
"""
request = self.make_request('issues.labels.list', user=user, repo=repo)
return self._get_result(request)
def get(self, name, user=None, repo=None):
""" Get a single label
:param str name: Label name
:param str user: Username
:param str repo: Repo name
.. note::
Remember :ref:`config precedence`
"""
request = self.make_request('issues.labels.get', user=user,
repo=repo, name=name)
return self._get(request)
def create(self, data, user=None, repo=None):
""" Create a label on an repo
:param dict data: Input. See `github labels doc`_
:param str user: Username
:param str repo: Repo name
.. warning::
You must be authenticated
.. note::
Remember :ref:`config precedence`
"""
request = self.make_request('issues.labels.create', user=user,
repo=repo, body=data)
return self._post(request)
def update(self, name, data, user=None, repo=None):
""" Update a label on an repo
:param str name: Label name
:param dict data: Input. See `github labels doc`_
:param str user: Username
:param str repo: Repo name
.. warning::
You must be authenticated
.. note::
Remember :ref:`config precedence`
"""
request = self.make_request('issues.labels.update', user=user,
repo=repo, name=name, body=data)
return self._patch(request)
def delete(self, name, user=None, repo=None):
""" Delete a label on an repo
:param str name: Label name
:param str user: Username
:param str repo: Repo name
.. warning::
You must be authenticated
.. note::
Remember :ref:`config precedence`
"""
request = self.make_request('issues.labels.delete', user=user,
repo=repo, name=name)
return self._delete(request)
def list_by_issue(self, number, user=None, repo=None):
""" List labels for an issue
:param int number: Issue number
:param str user: Username
:param str repo: Repo name
:returns: A :doc:`result`
.. note::
Remember :ref:`config precedence`
"""
request = self.make_request('issues.labels.list_by_issue', user=user,
repo=repo, number=number)
return self._get(request)
def add_to_issue(self, number, labels, user=None, repo=None):
""" Add labels to issue
:param int number: Issue number
:param str user: Username
:param str repo: Repo name
:param list labels: Label names
:returns: A :doc:`result`
.. note::
Remember :ref:`config precedence`
::
labels_service.add_to_issue(2, user='github', repo='github',
'label1', 'label2', 'label3')
"""
request = self.make_request('issues.labels.add_to_issue', user=user,
repo=repo, number=number, body=map(str, labels))
return self._post(request)
def remove_from_issue(self, number, label, user=None, repo=None):
""" Remove a label from an issue
:param int number: Issue number
:param str label: Label name
:param str user: Username
:param str repo: Repo name
:returns: A :doc:`result`
.. note::
Remember :ref:`config precedence`
"""
request = self.make_request('issues.labels.remove_from_issue',
user=user,
repo=repo,
number=number,
name=label)
return self._delete(request)
def replace_all(self, number, labels, user=None, repo=None):
""" Replace all labels for a issue
:param int number: Issue number
:param list labels: New labels
:param str user: Username
:param str repo: Repo name
:returns: A :doc:`result`
.. note::
If labels weren't especified, it'd remove all labels from the issue
.. note::
Remember :ref:`config precedence`
"""
request = self.make_request('issues.labels.replace_all',
user=user,
repo=repo,
number=number,
body=map(str, labels))
return self._put(request)
def remove_all(self, number, user=None, repo=None):
""" Remove all labels from a issue
:param int number: Issue number
:param str user: Username
:param str repo: Repo name
:returns: A :doc:`result`
.. note::
Remember :ref:`config precedence`
"""
request = self.make_request('issues.labels.remove_all',
user=user,
repo=repo,
number=number,)
return self._delete(request)
def list_by_milestone(self, number, user=None, repo=None):
""" Get labels for every issue in a milestone
:param int number: Milestone ID
:param str user: Username
:param str repo: Repo name
:returns: A :doc:`result`
.. note::
Remember :ref:`config precedence`
"""
request = self.make_request('issues.labels.list_by_milestone',
user=user, repo=repo, number=number)
return self._get_result(request)
| apache-2.0 |
laufercenter/meld | meld/pdb_writer.py | 1 | 1653 | #
# Copyright 2015 by Justin MacCallum, Alberto Perez, Ken Dill
# All rights reserved
#
class PDBWriter(object):
header = 'REMARK stage {stage}'
footer = 'TER\nEND\n\n'
template = 'ATOM {atom_number:>5d} {atom_name:4s} {residue_name:>3s} {residue_number:5d} {x:8.3f}{y:8.3f}{z:8.3f}'
def __init__(self, atom_numbers, atom_names, residue_numbers, residue_names):
self._atom_numbers = atom_numbers
self._n_atoms = len(atom_numbers)
assert len(atom_names) == self._n_atoms
self._atom_names = atom_names
self._atom_names = [''.join([' ', atom_name]) if len(atom_name) < 4 else atom_name
for atom_name in self._atom_names]
assert len(residue_numbers) == self._n_atoms
self._residue_numbers = residue_numbers
assert len(residue_names) == self._n_atoms
self._residue_names = residue_names
def get_pdb_string(self, coordinates, stage):
assert coordinates.shape[0] == self._n_atoms
assert coordinates.shape[1] == 3
zipper = zip(self._atom_numbers, self._atom_names, self._residue_numbers,
self._residue_names, range(coordinates.shape[0]))
lines = [self.template.format(atom_number=atom_num, atom_name=atom_name, residue_name=res_name,
residue_number=res_num, x=coordinates[i, 0], y=coordinates[i, 1],
z=coordinates[i, 2]) for atom_num, atom_name, res_num, res_name, i in zipper]
lines.insert(0, self.header.format(stage=stage))
lines.append(self.footer)
return '\n'.join(lines)
| mit |
richardfergie/googleads-python-lib | examples/dfp/v201502/activity_service/get_all_activities.py | 4 | 1934 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all activities.
To create activities, run create_activities.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
activity_service = client.GetService('ActivityService', version='v201502')
# Create statement object to select only all activities.
statement = dfp.FilterStatement()
# Get activities by statement.
while True:
response = activity_service.getActivitiesByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for activity in response['results']:
print ('Activity with ID \'%s\', name \'%s\', and type \'%s\' was '
'found.' % (activity['id'], activity['name'], activity['type']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 |
kalahbrown/HueBigSQL | desktop/core/ext-py/PyYAML-3.09/lib3/yaml/dumper.py | 277 | 2723 |
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
from .emitter import *
from .serializer import *
from .representer import *
from .resolver import *
class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class Dumper(Emitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
| apache-2.0 |
elena/django | tests/csrf_tests/tests.py | 12 | 33137 | import re
from django.conf import settings
from django.contrib.sessions.backends.cache import SessionStore
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpRequest, HttpResponse
from django.middleware.csrf import (
CSRF_SESSION_KEY, CSRF_TOKEN_LENGTH, REASON_BAD_TOKEN,
REASON_NO_CSRF_COOKIE, CsrfViewMiddleware,
_compare_masked_tokens as equivalent_tokens, get_token,
)
from django.test import SimpleTestCase, override_settings
from django.views.decorators.csrf import csrf_exempt, requires_csrf_token
from .views import (
ensure_csrf_cookie_view, non_token_view_using_request_processor,
post_form_view, token_view,
)
class TestingHttpRequest(HttpRequest):
"""
A version of HttpRequest that allows us to change some things
more easily
"""
def __init__(self):
super().__init__()
self.session = SessionStore()
def is_secure(self):
return getattr(self, '_is_secure_override', False)
class CsrfViewMiddlewareTestMixin:
"""
Shared methods and tests for session-based and cookie-based tokens.
"""
_csrf_id = _csrf_id_cookie = '1bcdefghij2bcdefghij3bcdefghij4bcdefghij5bcdefghij6bcdefghijABCD'
def _get_GET_no_csrf_cookie_request(self):
return TestingHttpRequest()
def _get_GET_csrf_cookie_request(self):
raise NotImplementedError('This method must be implemented by a subclass.')
def _get_POST_csrf_cookie_request(self):
req = self._get_GET_csrf_cookie_request()
req.method = "POST"
return req
def _get_POST_no_csrf_cookie_request(self):
req = self._get_GET_no_csrf_cookie_request()
req.method = "POST"
return req
def _get_POST_request_with_token(self):
req = self._get_POST_csrf_cookie_request()
req.POST['csrfmiddlewaretoken'] = self._csrf_id
return req
def _check_token_present(self, response, csrf_id=None):
text = str(response.content, response.charset)
match = re.search('name="csrfmiddlewaretoken" value="(.*?)"', text)
csrf_token = csrf_id or self._csrf_id
self.assertTrue(
match and equivalent_tokens(csrf_token, match[1]),
"Could not find csrfmiddlewaretoken to match %s" % csrf_token
)
def test_process_response_get_token_not_used(self):
"""
If get_token() is not called, the view middleware does not
add a cookie.
"""
# This is important to make pages cacheable. Pages which do call
# get_token(), assuming they use the token, are not cacheable because
# the token is specific to the user
req = self._get_GET_no_csrf_cookie_request()
# non_token_view_using_request_processor does not call get_token(), but
# does use the csrf request processor. By using this, we are testing
# that the view processor is properly lazy and doesn't call get_token()
# until needed.
mw = CsrfViewMiddleware(non_token_view_using_request_processor)
mw.process_request(req)
mw.process_view(req, non_token_view_using_request_processor, (), {})
resp = mw(req)
csrf_cookie = resp.cookies.get(settings.CSRF_COOKIE_NAME, False)
self.assertIs(csrf_cookie, False)
# Check the request processing
def test_process_request_no_csrf_cookie(self):
"""
If no CSRF cookies is present, the middleware rejects the incoming
request. This will stop login CSRF.
"""
req = self._get_POST_no_csrf_cookie_request()
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
with self.assertLogs('django.security.csrf', 'WARNING') as cm:
resp = mw.process_view(req, post_form_view, (), {})
self.assertEqual(403, resp.status_code)
self.assertEqual(cm.records[0].getMessage(), 'Forbidden (%s): ' % REASON_NO_CSRF_COOKIE)
def test_process_request_csrf_cookie_no_token(self):
"""
If a CSRF cookie is present but no token, the middleware rejects
the incoming request.
"""
req = self._get_POST_csrf_cookie_request()
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
with self.assertLogs('django.security.csrf', 'WARNING') as cm:
resp = mw.process_view(req, post_form_view, (), {})
self.assertEqual(403, resp.status_code)
self.assertEqual(cm.records[0].getMessage(), 'Forbidden (%s): ' % REASON_BAD_TOKEN)
def test_process_request_csrf_cookie_and_token(self):
"""
If both a cookie and a token is present, the middleware lets it through.
"""
req = self._get_POST_request_with_token()
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
def test_process_request_csrf_cookie_no_token_exempt_view(self):
"""
If a CSRF cookie is present and no token, but the csrf_exempt decorator
has been applied to the view, the middleware lets it through
"""
req = self._get_POST_csrf_cookie_request()
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, csrf_exempt(post_form_view), (), {})
self.assertIsNone(resp)
def test_csrf_token_in_header(self):
"""
The token may be passed in a header instead of in the form.
"""
req = self._get_POST_csrf_cookie_request()
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
@override_settings(CSRF_HEADER_NAME='HTTP_X_CSRFTOKEN_CUSTOMIZED')
def test_csrf_token_in_header_with_customized_name(self):
"""
settings.CSRF_HEADER_NAME can be used to customize the CSRF header name
"""
req = self._get_POST_csrf_cookie_request()
req.META['HTTP_X_CSRFTOKEN_CUSTOMIZED'] = self._csrf_id
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
def test_put_and_delete_rejected(self):
"""
HTTP PUT and DELETE methods have protection
"""
req = TestingHttpRequest()
req.method = 'PUT'
mw = CsrfViewMiddleware(post_form_view)
with self.assertLogs('django.security.csrf', 'WARNING') as cm:
resp = mw.process_view(req, post_form_view, (), {})
self.assertEqual(403, resp.status_code)
self.assertEqual(cm.records[0].getMessage(), 'Forbidden (%s): ' % REASON_NO_CSRF_COOKIE)
req = TestingHttpRequest()
req.method = 'DELETE'
with self.assertLogs('django.security.csrf', 'WARNING') as cm:
resp = mw.process_view(req, post_form_view, (), {})
self.assertEqual(403, resp.status_code)
self.assertEqual(cm.records[0].getMessage(), 'Forbidden (%s): ' % REASON_NO_CSRF_COOKIE)
def test_put_and_delete_allowed(self):
"""
HTTP PUT and DELETE can get through with X-CSRFToken and a cookie.
"""
req = self._get_GET_csrf_cookie_request()
req.method = 'PUT'
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
req = self._get_GET_csrf_cookie_request()
req.method = 'DELETE'
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
# Tests for the template tag method
def test_token_node_no_csrf_cookie(self):
"""
CsrfTokenNode works when no CSRF cookie is set.
"""
req = self._get_GET_no_csrf_cookie_request()
resp = token_view(req)
token = get_token(req)
self.assertIsNotNone(token)
self._check_token_present(resp, token)
def test_token_node_empty_csrf_cookie(self):
"""
A new token is sent if the csrf_cookie is the empty string.
"""
req = self._get_GET_no_csrf_cookie_request()
req.COOKIES[settings.CSRF_COOKIE_NAME] = ""
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = token_view(req)
token = get_token(req)
self.assertIsNotNone(token)
self._check_token_present(resp, token)
def test_token_node_with_csrf_cookie(self):
"""
CsrfTokenNode works when a CSRF cookie is set.
"""
req = self._get_GET_csrf_cookie_request()
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
mw.process_view(req, token_view, (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_exempt_view(self):
"""
get_token still works for a view decorated with 'csrf_exempt'.
"""
req = self._get_GET_csrf_cookie_request()
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
mw.process_view(req, csrf_exempt(token_view), (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_requires_csrf_token_view(self):
"""
get_token() works for a view decorated solely with requires_csrf_token.
"""
req = self._get_GET_csrf_cookie_request()
resp = requires_csrf_token(token_view)(req)
self._check_token_present(resp)
def test_token_node_with_new_csrf_cookie(self):
"""
CsrfTokenNode works when a CSRF cookie is created by
the middleware (when one was not already present)
"""
req = self._get_GET_no_csrf_cookie_request()
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
csrf_cookie = resp.cookies[settings.CSRF_COOKIE_NAME]
self._check_token_present(resp, csrf_id=csrf_cookie.value)
def test_cookie_not_reset_on_accepted_request(self):
"""
The csrf token used in posts is changed on every request (although
stays equivalent). The csrf cookie should not change on accepted
requests. If it appears in the response, it should keep its value.
"""
req = self._get_POST_request_with_token()
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
mw.process_view(req, token_view, (), {})
resp = mw(req)
csrf_cookie = resp.cookies.get(settings.CSRF_COOKIE_NAME, None)
if csrf_cookie:
self.assertEqual(
csrf_cookie.value, self._csrf_id_cookie,
"CSRF cookie was changed on an accepted request"
)
@override_settings(DEBUG=True, ALLOWED_HOSTS=['www.example.com'])
def test_https_bad_referer(self):
"""
A POST HTTPS request with a bad referer is rejected
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.evil.org/somepage'
req.META['SERVER_PORT'] = '443'
mw = CsrfViewMiddleware(post_form_view)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(
response,
'Referer checking failed - https://www.evil.org/somepage does not '
'match any trusted origins.',
status_code=403,
)
def test_https_malformed_host(self):
"""
CsrfViewMiddleware generates a 403 response if it receives an HTTPS
request with a bad host.
"""
req = self._get_GET_no_csrf_cookie_request()
req._is_secure_override = True
req.META['HTTP_HOST'] = '@malformed'
req.META['HTTP_REFERER'] = 'https://www.evil.org/somepage'
req.META['SERVER_PORT'] = '443'
mw = CsrfViewMiddleware(token_view)
response = mw.process_view(req, token_view, (), {})
self.assertEqual(response.status_code, 403)
@override_settings(DEBUG=True)
def test_https_malformed_referer(self):
"""
A POST HTTPS request with a bad referer is rejected.
"""
malformed_referer_msg = 'Referer checking failed - Referer is malformed.'
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_REFERER'] = 'http://http://www.example.com/'
mw = CsrfViewMiddleware(post_form_view)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(
response,
'Referer checking failed - Referer is insecure while host is secure.',
status_code=403,
)
# Empty
req.META['HTTP_REFERER'] = ''
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
# Non-ASCII
req.META['HTTP_REFERER'] = 'ØBöIß'
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
# missing scheme
# >>> urlparse('//example.com/')
# ParseResult(scheme='', netloc='example.com', path='/', params='', query='', fragment='')
req.META['HTTP_REFERER'] = '//example.com/'
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
# missing netloc
# >>> urlparse('https://')
# ParseResult(scheme='https', netloc='', path='', params='', query='', fragment='')
req.META['HTTP_REFERER'] = 'https://'
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_https_good_referer(self):
"""
A POST HTTPS request with a good referer is accepted.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.example.com/somepage'
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_https_good_referer_2(self):
"""
A POST HTTPS request with a good referer is accepted where the referer
contains no trailing slash.
"""
# See ticket #15617
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.example.com'
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
def _test_https_good_referer_behind_proxy(self):
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META.update({
'HTTP_HOST': '10.0.0.2',
'HTTP_REFERER': 'https://www.example.com/somepage',
'SERVER_PORT': '8080',
'HTTP_X_FORWARDED_HOST': 'www.example.com',
'HTTP_X_FORWARDED_PORT': '443',
})
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_TRUSTED_ORIGINS=['dashboard.example.com'])
def test_https_csrf_trusted_origin_allowed(self):
"""
A POST HTTPS request with a referer added to the CSRF_TRUSTED_ORIGINS
setting is accepted.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://dashboard.example.com'
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_TRUSTED_ORIGINS=['.example.com'])
def test_https_csrf_wildcard_trusted_origin_allowed(self):
"""
A POST HTTPS request with a referer that matches a CSRF_TRUSTED_ORIGINS
wildcard is accepted.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://dashboard.example.com'
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
response = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(response)
def _test_https_good_referer_matches_cookie_domain(self):
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_REFERER'] = 'https://foo.example.com/'
req.META['SERVER_PORT'] = '443'
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
response = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(response)
def _test_https_good_referer_matches_cookie_domain_with_different_port(self):
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://foo.example.com:4443/'
req.META['SERVER_PORT'] = '4443'
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
response = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(response)
def test_ensures_csrf_cookie_no_logging(self):
"""
ensure_csrf_cookie() doesn't log warnings (#19436).
"""
with self.assertRaisesMessage(AssertionError, 'no logs'):
with self.assertLogs('django.request', 'WARNING'):
req = self._get_GET_no_csrf_cookie_request()
ensure_csrf_cookie_view(req)
def test_post_data_read_failure(self):
"""
OSErrors during POST data reading are caught and treated as if the
POST data wasn't there (#20128).
"""
class CsrfPostRequest(HttpRequest):
"""
HttpRequest that can raise an OSError when accessing POST data
"""
def __init__(self, token, raise_error):
super().__init__()
self.method = 'POST'
self.raise_error = False
self.COOKIES[settings.CSRF_COOKIE_NAME] = token
# Handle both cases here to prevent duplicate code in the
# session tests.
self.session = {}
self.session[CSRF_SESSION_KEY] = token
self.POST['csrfmiddlewaretoken'] = token
self.raise_error = raise_error
def _load_post_and_files(self):
raise OSError('error reading input data')
def _get_post(self):
if self.raise_error:
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
POST = property(_get_post, _set_post)
token = ('ABC' + self._csrf_id)[:CSRF_TOKEN_LENGTH]
req = CsrfPostRequest(token, raise_error=False)
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
req = CsrfPostRequest(token, raise_error=True)
mw.process_request(req)
with self.assertLogs('django.security.csrf', 'WARNING') as cm:
resp = mw.process_view(req, post_form_view, (), {})
self.assertEqual(resp.status_code, 403)
self.assertEqual(cm.records[0].getMessage(), 'Forbidden (%s): ' % REASON_BAD_TOKEN)
class CsrfViewMiddlewareTests(CsrfViewMiddlewareTestMixin, SimpleTestCase):
def _get_GET_csrf_cookie_request(self):
req = TestingHttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = self._csrf_id_cookie
return req
def _get_POST_bare_secret_csrf_cookie_request(self):
req = self._get_POST_no_csrf_cookie_request()
req.COOKIES[settings.CSRF_COOKIE_NAME] = self._csrf_id_cookie[:32]
return req
def _get_POST_bare_secret_csrf_cookie_request_with_token(self):
req = self._get_POST_bare_secret_csrf_cookie_request()
req.POST['csrfmiddlewaretoken'] = self._csrf_id_cookie[:32]
return req
def test_ensures_csrf_cookie_no_middleware(self):
"""
The ensure_csrf_cookie() decorator works without middleware.
"""
req = self._get_GET_no_csrf_cookie_request()
resp = ensure_csrf_cookie_view(req)
self.assertTrue(resp.cookies.get(settings.CSRF_COOKIE_NAME, False))
self.assertIn('Cookie', resp.get('Vary', ''))
def test_ensures_csrf_cookie_with_middleware(self):
"""
The ensure_csrf_cookie() decorator works with the CsrfViewMiddleware
enabled.
"""
req = self._get_GET_no_csrf_cookie_request()
mw = CsrfViewMiddleware(ensure_csrf_cookie_view)
mw.process_view(req, ensure_csrf_cookie_view, (), {})
resp = mw(req)
self.assertTrue(resp.cookies.get(settings.CSRF_COOKIE_NAME, False))
self.assertIn('Cookie', resp.get('Vary', ''))
def test_csrf_cookie_age(self):
"""
CSRF cookie age can be set using settings.CSRF_COOKIE_AGE.
"""
req = self._get_GET_no_csrf_cookie_request()
MAX_AGE = 123
with self.settings(CSRF_COOKIE_NAME='csrfcookie',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_AGE=MAX_AGE,
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True):
# token_view calls get_token() indirectly
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
max_age = resp.cookies.get('csrfcookie').get('max-age')
self.assertEqual(max_age, MAX_AGE)
def test_csrf_cookie_age_none(self):
"""
CSRF cookie age does not have max age set and therefore uses
session-based cookies.
"""
req = self._get_GET_no_csrf_cookie_request()
MAX_AGE = None
with self.settings(CSRF_COOKIE_NAME='csrfcookie',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_AGE=MAX_AGE,
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True):
# token_view calls get_token() indirectly
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
max_age = resp.cookies.get('csrfcookie').get('max-age')
self.assertEqual(max_age, '')
def test_csrf_cookie_samesite(self):
req = self._get_GET_no_csrf_cookie_request()
with self.settings(CSRF_COOKIE_NAME='csrfcookie', CSRF_COOKIE_SAMESITE='Strict'):
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
self.assertEqual(resp.cookies['csrfcookie']['samesite'], 'Strict')
def test_process_view_token_too_long(self):
"""
If the token is longer than expected, it is ignored and a new token is
created.
"""
req = self._get_GET_no_csrf_cookie_request()
req.COOKIES[settings.CSRF_COOKIE_NAME] = 'x' * 100000
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
csrf_cookie = resp.cookies.get(settings.CSRF_COOKIE_NAME, False)
self.assertEqual(len(csrf_cookie.value), CSRF_TOKEN_LENGTH)
def test_process_view_token_invalid_chars(self):
"""
If the token contains non-alphanumeric characters, it is ignored and a
new token is created.
"""
token = ('!@#' + self._csrf_id)[:CSRF_TOKEN_LENGTH]
req = self._get_GET_no_csrf_cookie_request()
req.COOKIES[settings.CSRF_COOKIE_NAME] = token
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
csrf_cookie = resp.cookies.get(settings.CSRF_COOKIE_NAME, False)
self.assertEqual(len(csrf_cookie.value), CSRF_TOKEN_LENGTH)
self.assertNotEqual(csrf_cookie.value, token)
def test_bare_secret_accepted_and_replaced(self):
"""
The csrf token is reset from a bare secret.
"""
req = self._get_POST_bare_secret_csrf_cookie_request_with_token()
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
resp = mw.process_view(req, token_view, (), {})
self.assertIsNone(resp)
resp = mw(req)
self.assertIn(settings.CSRF_COOKIE_NAME, resp.cookies, "Cookie was not reset from bare secret")
csrf_cookie = resp.cookies[settings.CSRF_COOKIE_NAME]
self.assertEqual(len(csrf_cookie.value), CSRF_TOKEN_LENGTH)
self._check_token_present(resp, csrf_id=csrf_cookie.value)
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_COOKIE_DOMAIN='.example.com', USE_X_FORWARDED_PORT=True)
def test_https_good_referer_behind_proxy(self):
"""
A POST HTTPS request is accepted when USE_X_FORWARDED_PORT=True.
"""
self._test_https_good_referer_behind_proxy()
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_COOKIE_DOMAIN='.example.com')
def test_https_good_referer_matches_cookie_domain(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by CSRF_COOKIE_DOMAIN.
"""
self._test_https_good_referer_matches_cookie_domain()
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_COOKIE_DOMAIN='.example.com')
def test_https_good_referer_matches_cookie_domain_with_different_port(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by CSRF_COOKIE_DOMAIN and a non-443 port.
"""
self._test_https_good_referer_matches_cookie_domain_with_different_port()
@override_settings(CSRF_COOKIE_DOMAIN='.example.com', DEBUG=True)
def test_https_reject_insecure_referer(self):
"""
A POST HTTPS request from an insecure referer should be rejected.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_REFERER'] = 'http://example.com/'
req.META['SERVER_PORT'] = '443'
mw = CsrfViewMiddleware(post_form_view)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(
response,
'Referer checking failed - Referer is insecure while host is secure.',
status_code=403,
)
@override_settings(CSRF_USE_SESSIONS=True, CSRF_COOKIE_DOMAIN=None)
class CsrfViewMiddlewareUseSessionsTests(CsrfViewMiddlewareTestMixin, SimpleTestCase):
"""
CSRF tests with CSRF_USE_SESSIONS=True.
"""
def _get_POST_bare_secret_csrf_cookie_request(self):
req = self._get_POST_no_csrf_cookie_request()
req.session[CSRF_SESSION_KEY] = self._csrf_id_cookie[:32]
return req
def _get_GET_csrf_cookie_request(self):
req = TestingHttpRequest()
req.session[CSRF_SESSION_KEY] = self._csrf_id_cookie
return req
def test_no_session_on_request(self):
msg = (
'CSRF_USE_SESSIONS is enabled, but request.session is not set. '
'SessionMiddleware must appear before CsrfViewMiddleware in MIDDLEWARE.'
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
mw = CsrfViewMiddleware(lambda req: HttpResponse())
mw.process_request(HttpRequest())
def test_process_response_get_token_used(self):
"""The ensure_csrf_cookie() decorator works without middleware."""
req = self._get_GET_no_csrf_cookie_request()
ensure_csrf_cookie_view(req)
self.assertTrue(req.session.get(CSRF_SESSION_KEY, False))
def test_session_modify(self):
"""The session isn't saved if the CSRF cookie is unchanged."""
req = self._get_GET_no_csrf_cookie_request()
mw = CsrfViewMiddleware(ensure_csrf_cookie_view)
mw.process_view(req, ensure_csrf_cookie_view, (), {})
mw(req)
self.assertIsNotNone(req.session.get(CSRF_SESSION_KEY))
req.session.modified = False
mw.process_view(req, ensure_csrf_cookie_view, (), {})
mw(req)
self.assertFalse(req.session.modified)
def test_ensures_csrf_cookie_with_middleware(self):
"""
The ensure_csrf_cookie() decorator works with the CsrfViewMiddleware
enabled.
"""
req = self._get_GET_no_csrf_cookie_request()
mw = CsrfViewMiddleware(ensure_csrf_cookie_view)
mw.process_view(req, ensure_csrf_cookie_view, (), {})
mw(req)
self.assertTrue(req.session.get(CSRF_SESSION_KEY, False))
def test_token_node_with_new_csrf_cookie(self):
"""
CsrfTokenNode works when a CSRF cookie is created by the middleware
(when one was not already present).
"""
req = self._get_GET_no_csrf_cookie_request()
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
csrf_cookie = req.session[CSRF_SESSION_KEY]
self._check_token_present(resp, csrf_id=csrf_cookie)
@override_settings(
ALLOWED_HOSTS=['www.example.com'],
SESSION_COOKIE_DOMAIN='.example.com',
USE_X_FORWARDED_PORT=True,
DEBUG=True,
)
def test_https_good_referer_behind_proxy(self):
"""
A POST HTTPS request is accepted when USE_X_FORWARDED_PORT=True.
"""
self._test_https_good_referer_behind_proxy()
@override_settings(ALLOWED_HOSTS=['www.example.com'], SESSION_COOKIE_DOMAIN='.example.com')
def test_https_good_referer_matches_cookie_domain(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by SESSION_COOKIE_DOMAIN.
"""
self._test_https_good_referer_matches_cookie_domain()
@override_settings(ALLOWED_HOSTS=['www.example.com'], SESSION_COOKIE_DOMAIN='.example.com')
def test_https_good_referer_matches_cookie_domain_with_different_port(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by SESSION_COOKIE_DOMAIN and a non-443 port.
"""
self._test_https_good_referer_matches_cookie_domain_with_different_port()
@override_settings(SESSION_COOKIE_DOMAIN='.example.com', DEBUG=True)
def test_https_reject_insecure_referer(self):
"""
A POST HTTPS request from an insecure referer should be rejected.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_REFERER'] = 'http://example.com/'
req.META['SERVER_PORT'] = '443'
mw = CsrfViewMiddleware(post_form_view)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(
response,
'Referer checking failed - Referer is insecure while host is secure.',
status_code=403,
)
@override_settings(ROOT_URLCONF='csrf_tests.csrf_token_error_handler_urls', DEBUG=False)
class CsrfInErrorHandlingViewsTests(SimpleTestCase):
def test_csrf_token_on_404_stays_constant(self):
response = self.client.get('/does not exist/')
# The error handler returns status code 599.
self.assertEqual(response.status_code, 599)
token1 = response.content
response = self.client.get('/does not exist/')
self.assertEqual(response.status_code, 599)
token2 = response.content
self.assertTrue(equivalent_tokens(token1.decode('ascii'), token2.decode('ascii')))
| bsd-3-clause |
goel42/spamDet | createArf.py | 1 | 2860 | from pymongo import MongoClient
import numpy
mongoclient = MongoClient('127.0.0.1', 27017)
db = mongoclient.tweets
cursor = db.bitly_urls.find()
dataset = dict()
def main():
find_clicks()
find_encoders_count()
find_countries_clicks()
find_domain_clicks()
#dataset['http://bit.ly/2bjLLU3'] = [False, 31, 1, 10.333333333333334, 12.498888839501783, 31.0, 0.0]
fp = open('features.arff', 'w')
for key in dataset:
try:
for i in range(6, 2, -1):
print("{0:.4}".format(dataset[key][i]), file=fp, end=",")
for i in range(2, 0, -1):
print(dataset[key][i], end=",", file=fp)
print(dataset[key][0], file=fp)
except IndexError as ierr:
continue
fp.close()
def find_domain_clicks():
cursor.rewind()
for record in cursor:
if record["shortened_url"] not in dataset:
continue
short_url = record["shortened_url"]
domains = record["referring_domains"]
domain_clicks = []
for domain in domains:
domain_clicks.append(domain["clicks"])
if len(dataset[short_url]) == 5:
if len(domain_clicks) == 0:
dataset[short_url].append([0, 0])
else:
dataset[short_url].append(numpy.mean(domain_clicks))
dataset[short_url].append(numpy.sqrt(numpy.var(domain_clicks)))
def find_countries_clicks():
cursor.rewind()
for record in cursor:
if record["shortened_url"] not in dataset:
continue
short_url = record["shortened_url"]
countries = record["countries"]
country_clicks = []
for country in countries:
country_clicks.append(country["clicks"])
if len(dataset[short_url]) == 3:
if len(country_clicks) == 0:
dataset[short_url].append([0, 0])
else:
dataset[short_url].append(numpy.mean(country_clicks))
dataset[short_url].append(numpy.sqrt(numpy.var(country_clicks)))
def find_encoders_count():
cursor.rewind()
for record in cursor:
if record["shortened_url"] not in dataset:
continue
short_url = record["shortened_url"]
encoders_count = record["encoders_count"]
if len(dataset[short_url]) == 2:
dataset[short_url].append(encoders_count)
else:
dataset[short_url][2] = encoders_count
def find_clicks():
for record in cursor:
if 'google_safe_browsing' not in record:
continue
short_url = record["shortened_url"]
clicks = record["clicks"]
safe_browsing = record["google_safe_browsing"]
if clicks >= 2:
dataset[short_url] = [safe_browsing, clicks]
if __name__ == '__main__':
main()
cursor.close()
| mit |
shssoichiro/servo | tests/wpt/css-tests/tools/wptserve/tests/functional/test_handlers.py | 77 | 11803 | import json
import os
import pytest
import unittest
import urllib2
import uuid
import wptserve
from .base import TestUsingServer, doc_root
class TestFileHandler(TestUsingServer):
def test_GET(self):
resp = self.request("/document.txt")
self.assertEqual(200, resp.getcode())
self.assertEqual("text/plain", resp.info()["Content-Type"])
self.assertEqual(open(os.path.join(doc_root, "document.txt"), 'rb').read(), resp.read())
def test_headers(self):
resp = self.request("/with_headers.txt")
self.assertEqual(200, resp.getcode())
self.assertEqual("text/html", resp.info()["Content-Type"])
self.assertEqual("PASS", resp.info()["Custom-Header"])
# This will fail if it isn't a valid uuid
uuid.UUID(resp.info()["Another-Header"])
self.assertEqual(resp.info()["Same-Value-Header"], resp.info()["Another-Header"])
self.assertEqual(resp.info()["Double-Header"], "PA, SS")
def test_range(self):
resp = self.request("/document.txt", headers={"Range":"bytes=10-19"})
self.assertEqual(206, resp.getcode())
data = resp.read()
expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
self.assertEqual(10, len(data))
self.assertEqual("bytes 10-19/%i" % len(expected), resp.info()['Content-Range'])
self.assertEqual("10", resp.info()['Content-Length'])
self.assertEqual(expected[10:20], data)
def test_range_no_end(self):
resp = self.request("/document.txt", headers={"Range":"bytes=10-"})
self.assertEqual(206, resp.getcode())
data = resp.read()
expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
self.assertEqual(len(expected) - 10, len(data))
self.assertEqual("bytes 10-%i/%i" % (len(expected) - 1, len(expected)), resp.info()['Content-Range'])
self.assertEqual(expected[10:], data)
def test_range_no_start(self):
resp = self.request("/document.txt", headers={"Range":"bytes=-10"})
self.assertEqual(206, resp.getcode())
data = resp.read()
expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
self.assertEqual(10, len(data))
self.assertEqual("bytes %i-%i/%i" % (len(expected) - 10, len(expected) - 1, len(expected)),
resp.info()['Content-Range'])
self.assertEqual(expected[-10:], data)
def test_multiple_ranges(self):
resp = self.request("/document.txt", headers={"Range":"bytes=1-2,5-7,6-10"})
self.assertEqual(206, resp.getcode())
data = resp.read()
expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
self.assertTrue(resp.info()["Content-Type"].startswith("multipart/byteranges; boundary="))
boundary = resp.info()["Content-Type"].split("boundary=")[1]
parts = data.split("--" + boundary)
self.assertEqual("\r\n", parts[0])
self.assertEqual("--", parts[-1])
expected_parts = [("1-2", expected[1:3]), ("5-10", expected[5:11])]
for expected_part, part in zip(expected_parts, parts[1:-1]):
header_string, body = part.split("\r\n\r\n")
headers = dict(item.split(": ", 1) for item in header_string.split("\r\n") if item.strip())
self.assertEqual(headers["Content-Type"], "text/plain")
self.assertEqual(headers["Content-Range"], "bytes %s/%i" % (expected_part[0], len(expected)))
self.assertEqual(expected_part[1] + "\r\n", body)
def test_range_invalid(self):
with self.assertRaises(urllib2.HTTPError) as cm:
self.request("/document.txt", headers={"Range":"bytes=11-10"})
self.assertEqual(cm.exception.code, 416)
expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
with self.assertRaises(urllib2.HTTPError) as cm:
self.request("/document.txt", headers={"Range":"bytes=%i-%i" % (len(expected), len(expected) + 10)})
self.assertEqual(cm.exception.code, 416)
def test_sub_config(self):
resp = self.request("/sub.sub.txt")
expected = b"localhost localhost %i" % self.server.port
assert resp.read().rstrip() == expected
def test_sub_headers(self):
resp = self.request("/sub_headers.sub.txt", headers={"X-Test": "PASS"})
expected = b"PASS"
assert resp.read().rstrip() == expected
def test_sub_params(self):
resp = self.request("/sub_params.sub.txt", query="test=PASS")
expected = b"PASS"
assert resp.read().rstrip() == expected
class TestFunctionHandler(TestUsingServer):
def test_string_rv(self):
@wptserve.handlers.handler
def handler(request, response):
return "test data"
route = ("GET", "/test/test_string_rv", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertEqual(200, resp.getcode())
self.assertEqual("9", resp.info()["Content-Length"])
self.assertEqual("test data", resp.read())
def test_tuple_1_rv(self):
@wptserve.handlers.handler
def handler(request, response):
return ()
route = ("GET", "/test/test_tuple_1_rv", handler)
self.server.router.register(*route)
with pytest.raises(urllib2.HTTPError) as cm:
self.request(route[1])
assert cm.value.code == 500
def test_tuple_2_rv(self):
@wptserve.handlers.handler
def handler(request, response):
return [("Content-Length", 4), ("test-header", "test-value")], "test data"
route = ("GET", "/test/test_tuple_2_rv", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertEqual(200, resp.getcode())
self.assertEqual("4", resp.info()["Content-Length"])
self.assertEqual("test-value", resp.info()["test-header"])
self.assertEqual("test", resp.read())
def test_tuple_3_rv(self):
@wptserve.handlers.handler
def handler(request, response):
return 202, [("test-header", "test-value")], "test data"
route = ("GET", "/test/test_tuple_3_rv", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertEqual(202, resp.getcode())
self.assertEqual("test-value", resp.info()["test-header"])
self.assertEqual("test data", resp.read())
def test_tuple_3_rv_1(self):
@wptserve.handlers.handler
def handler(request, response):
return (202, "Some Status"), [("test-header", "test-value")], "test data"
route = ("GET", "/test/test_tuple_3_rv_1", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertEqual(202, resp.getcode())
self.assertEqual("Some Status", resp.msg)
self.assertEqual("test-value", resp.info()["test-header"])
self.assertEqual("test data", resp.read())
def test_tuple_4_rv(self):
@wptserve.handlers.handler
def handler(request, response):
return 202, [("test-header", "test-value")], "test data", "garbage"
route = ("GET", "/test/test_tuple_1_rv", handler)
self.server.router.register(*route)
with pytest.raises(urllib2.HTTPError) as cm:
self.request(route[1])
assert cm.value.code == 500
def test_none_rv(self):
@wptserve.handlers.handler
def handler(request, response):
return None
route = ("GET", "/test/test_none_rv", handler)
self.server.router.register(*route)
resp = self.request(route[1])
assert resp.getcode() == 200
assert "Content-Length" not in resp.info()
assert resp.read() == b""
class TestJSONHandler(TestUsingServer):
def test_json_0(self):
@wptserve.handlers.json_handler
def handler(request, response):
return {"data": "test data"}
route = ("GET", "/test/test_json_0", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertEqual(200, resp.getcode())
self.assertEqual({"data": "test data"}, json.load(resp))
def test_json_tuple_2(self):
@wptserve.handlers.json_handler
def handler(request, response):
return [("Test-Header", "test-value")], {"data": "test data"}
route = ("GET", "/test/test_json_tuple_2", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertEqual(200, resp.getcode())
self.assertEqual("test-value", resp.info()["test-header"])
self.assertEqual({"data": "test data"}, json.load(resp))
def test_json_tuple_3(self):
@wptserve.handlers.json_handler
def handler(request, response):
return (202, "Giraffe"), [("Test-Header", "test-value")], {"data": "test data"}
route = ("GET", "/test/test_json_tuple_2", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertEqual(202, resp.getcode())
self.assertEqual("Giraffe", resp.msg)
self.assertEqual("test-value", resp.info()["test-header"])
self.assertEqual({"data": "test data"}, json.load(resp))
class TestPythonHandler(TestUsingServer):
def test_string(self):
resp = self.request("/test_string.py")
self.assertEqual(200, resp.getcode())
self.assertEqual("text/plain", resp.info()["Content-Type"])
self.assertEqual("PASS", resp.read())
def test_tuple_2(self):
resp = self.request("/test_tuple_2.py")
self.assertEqual(200, resp.getcode())
self.assertEqual("text/html", resp.info()["Content-Type"])
self.assertEqual("PASS", resp.info()["X-Test"])
self.assertEqual("PASS", resp.read())
def test_tuple_3(self):
resp = self.request("/test_tuple_3.py")
self.assertEqual(202, resp.getcode())
self.assertEqual("Giraffe", resp.msg)
self.assertEqual("text/html", resp.info()["Content-Type"])
self.assertEqual("PASS", resp.info()["X-Test"])
self.assertEqual("PASS", resp.read())
def test_no_main(self):
with pytest.raises(urllib2.HTTPError) as cm:
self.request("/no_main.py")
assert cm.value.code == 500
def test_invalid(self):
with pytest.raises(urllib2.HTTPError) as cm:
self.request("/invalid.py")
assert cm.value.code == 500
def test_missing(self):
with pytest.raises(urllib2.HTTPError) as cm:
self.request("/missing.py")
assert cm.value.code == 404
class TestDirectoryHandler(TestUsingServer):
def test_directory(self):
resp = self.request("/")
self.assertEqual(200, resp.getcode())
self.assertEqual("text/html", resp.info()["Content-Type"])
#Add a check that the response is actually sane
def test_subdirectory_trailing_slash(self):
resp = self.request("/subdir/")
assert resp.getcode() == 200
assert resp.info()["Content-Type"] == "text/html"
def test_subdirectory_no_trailing_slash(self):
with pytest.raises(urllib2.HTTPError) as cm:
self.request("/subdir")
assert cm.value.code == 404
class TestAsIsHandler(TestUsingServer):
def test_as_is(self):
resp = self.request("/test.asis")
self.assertEqual(202, resp.getcode())
self.assertEqual("Giraffe", resp.msg)
self.assertEqual("PASS", resp.info()["X-Test"])
self.assertEqual("Content", resp.read())
#Add a check that the response is actually sane
if __name__ == '__main__':
unittest.main()
| mpl-2.0 |
ryfeus/lambda-packs | Tensorflow_LightGBM_Scipy_nightly/source/scipy/io/matlab/miobase.py | 125 | 12083 | # Authors: Travis Oliphant, Matthew Brett
"""
Base classes for MATLAB file stream reading.
MATLAB is a registered trademark of the Mathworks inc.
"""
from __future__ import division, print_function, absolute_import
import sys
import operator
from scipy._lib.six import reduce
import numpy as np
if sys.version_info[0] >= 3:
byteord = int
else:
byteord = ord
from scipy.misc import doccer
from . import byteordercodes as boc
class MatReadError(Exception):
pass
class MatWriteError(Exception):
pass
class MatReadWarning(UserWarning):
pass
doc_dict = \
{'file_arg':
'''file_name : str
Name of the mat file (do not need .mat extension if
appendmat==True) Can also pass open file-like object.''',
'append_arg':
'''appendmat : bool, optional
True to append the .mat extension to the end of the given
filename, if not already present.''',
'load_args':
'''byte_order : str or None, optional
None by default, implying byte order guessed from mat
file. Otherwise can be one of ('native', '=', 'little', '<',
'BIG', '>').
mat_dtype : bool, optional
If True, return arrays in same dtype as would be loaded into
MATLAB (instead of the dtype with which they are saved).
squeeze_me : bool, optional
Whether to squeeze unit matrix dimensions or not.
chars_as_strings : bool, optional
Whether to convert char arrays to string arrays.
matlab_compatible : bool, optional
Returns matrices as would be loaded by MATLAB (implies
squeeze_me=False, chars_as_strings=False, mat_dtype=True,
struct_as_record=True).''',
'struct_arg':
'''struct_as_record : bool, optional
Whether to load MATLAB structs as numpy record arrays, or as
old-style numpy arrays with dtype=object. Setting this flag to
False replicates the behavior of scipy version 0.7.x (returning
numpy object arrays). The default setting is True, because it
allows easier round-trip load and save of MATLAB files.''',
'matstream_arg':
'''mat_stream : file-like
Object with file API, open for reading.''',
'long_fields':
'''long_field_names : bool, optional
* False - maximum field name length in a structure is 31 characters
which is the documented maximum length. This is the default.
* True - maximum field name length in a structure is 63 characters
which works for MATLAB 7.6''',
'do_compression':
'''do_compression : bool, optional
Whether to compress matrices on write. Default is False.''',
'oned_as':
'''oned_as : {'row', 'column'}, optional
If 'column', write 1-D numpy arrays as column vectors.
If 'row', write 1D numpy arrays as row vectors.''',
'unicode_strings':
'''unicode_strings : bool, optional
If True, write strings as Unicode, else MATLAB usual encoding.'''}
docfiller = doccer.filldoc(doc_dict)
'''
Note on architecture
======================
There are three sets of parameters relevant for reading files. The
first are *file read parameters* - containing options that are common
for reading the whole file, and therefore every variable within that
file. At the moment these are:
* mat_stream
* dtypes (derived from byte code)
* byte_order
* chars_as_strings
* squeeze_me
* struct_as_record (MATLAB 5 files)
* class_dtypes (derived from order code, MATLAB 5 files)
* codecs (MATLAB 5 files)
* uint16_codec (MATLAB 5 files)
Another set of parameters are those that apply only to the current
variable being read - the *header*:
* header related variables (different for v4 and v5 mat files)
* is_complex
* mclass
* var_stream
With the header, we need ``next_position`` to tell us where the next
variable in the stream is.
Then, for each element in a matrix, there can be *element read
parameters*. An element is, for example, one element in a MATLAB cell
array. At the moment these are:
* mat_dtype
The file-reading object contains the *file read parameters*. The
*header* is passed around as a data object, or may be read and discarded
in a single function. The *element read parameters* - the mat_dtype in
this instance, is passed into a general post-processing function - see
``mio_utils`` for details.
'''
def convert_dtypes(dtype_template, order_code):
''' Convert dtypes in mapping to given order
Parameters
----------
dtype_template : mapping
mapping with values returning numpy dtype from ``np.dtype(val)``
order_code : str
an order code suitable for using in ``dtype.newbyteorder()``
Returns
-------
dtypes : mapping
mapping where values have been replaced by
``np.dtype(val).newbyteorder(order_code)``
'''
dtypes = dtype_template.copy()
for k in dtypes:
dtypes[k] = np.dtype(dtypes[k]).newbyteorder(order_code)
return dtypes
def read_dtype(mat_stream, a_dtype):
"""
Generic get of byte stream data of known type
Parameters
----------
mat_stream : file_like object
MATLAB (tm) mat file stream
a_dtype : dtype
dtype of array to read. `a_dtype` is assumed to be correct
endianness.
Returns
-------
arr : ndarray
Array of dtype `a_dtype` read from stream.
"""
num_bytes = a_dtype.itemsize
arr = np.ndarray(shape=(),
dtype=a_dtype,
buffer=mat_stream.read(num_bytes),
order='F')
return arr
def get_matfile_version(fileobj):
"""
Return major, minor tuple depending on apparent mat file type
Where:
#. 0,x -> version 4 format mat files
#. 1,x -> version 5 format mat files
#. 2,x -> version 7.3 format mat files (HDF format)
Parameters
----------
fileobj : file_like
object implementing seek() and read()
Returns
-------
major_version : {0, 1, 2}
major MATLAB File format version
minor_version : int
minor MATLAB file format version
Raises
------
MatReadError
If the file is empty.
ValueError
The matfile version is unknown.
Notes
-----
Has the side effect of setting the file read pointer to 0
"""
# Mat4 files have a zero somewhere in first 4 bytes
fileobj.seek(0)
mopt_bytes = fileobj.read(4)
if len(mopt_bytes) == 0:
raise MatReadError("Mat file appears to be empty")
mopt_ints = np.ndarray(shape=(4,), dtype=np.uint8, buffer=mopt_bytes)
if 0 in mopt_ints:
fileobj.seek(0)
return (0,0)
# For 5 format or 7.3 format we need to read an integer in the
# header. Bytes 124 through 128 contain a version integer and an
# endian test string
fileobj.seek(124)
tst_str = fileobj.read(4)
fileobj.seek(0)
maj_ind = int(tst_str[2] == b'I'[0])
maj_val = byteord(tst_str[maj_ind])
min_val = byteord(tst_str[1-maj_ind])
ret = (maj_val, min_val)
if maj_val in (1, 2):
return ret
raise ValueError('Unknown mat file type, version %s, %s' % ret)
def matdims(arr, oned_as='column'):
"""
Determine equivalent MATLAB dimensions for given array
Parameters
----------
arr : ndarray
Input array
oned_as : {'column', 'row'}, optional
Whether 1-D arrays are returned as MATLAB row or column matrices.
Default is 'column'.
Returns
-------
dims : tuple
Shape tuple, in the form MATLAB expects it.
Notes
-----
We had to decide what shape a 1 dimensional array would be by
default. ``np.atleast_2d`` thinks it is a row vector. The
default for a vector in MATLAB (e.g. ``>> 1:12``) is a row vector.
Versions of scipy up to and including 0.11 resulted (accidentally)
in 1-D arrays being read as column vectors. For the moment, we
maintain the same tradition here.
Examples
--------
>>> matdims(np.array(1)) # numpy scalar
(1, 1)
>>> matdims(np.array([1])) # 1d array, 1 element
(1, 1)
>>> matdims(np.array([1,2])) # 1d array, 2 elements
(2, 1)
>>> matdims(np.array([[2],[3]])) # 2d array, column vector
(2, 1)
>>> matdims(np.array([[2,3]])) # 2d array, row vector
(1, 2)
>>> matdims(np.array([[[2,3]]])) # 3d array, rowish vector
(1, 1, 2)
>>> matdims(np.array([])) # empty 1d array
(0, 0)
>>> matdims(np.array([[]])) # empty 2d
(0, 0)
>>> matdims(np.array([[[]]])) # empty 3d
(0, 0, 0)
Optional argument flips 1-D shape behavior.
>>> matdims(np.array([1,2]), 'row') # 1d array, 2 elements
(1, 2)
The argument has to make sense though
>>> matdims(np.array([1,2]), 'bizarre')
Traceback (most recent call last):
...
ValueError: 1D option "bizarre" is strange
"""
shape = arr.shape
if shape == (): # scalar
return (1,1)
if reduce(operator.mul, shape) == 0: # zero elememts
return (0,) * np.max([arr.ndim, 2])
if len(shape) == 1: # 1D
if oned_as == 'column':
return shape + (1,)
elif oned_as == 'row':
return (1,) + shape
else:
raise ValueError('1D option "%s" is strange'
% oned_as)
return shape
class MatVarReader(object):
''' Abstract class defining required interface for var readers'''
def __init__(self, file_reader):
pass
def read_header(self):
''' Returns header '''
pass
def array_from_header(self, header):
''' Reads array given header '''
pass
class MatFileReader(object):
""" Base object for reading mat files
To make this class functional, you will need to override the
following methods:
matrix_getter_factory - gives object to fetch next matrix from stream
guess_byte_order - guesses file byte order from file
"""
@docfiller
def __init__(self, mat_stream,
byte_order=None,
mat_dtype=False,
squeeze_me=False,
chars_as_strings=True,
matlab_compatible=False,
struct_as_record=True,
verify_compressed_data_integrity=True
):
'''
Initializer for mat file reader
mat_stream : file-like
object with file API, open for reading
%(load_args)s
'''
# Initialize stream
self.mat_stream = mat_stream
self.dtypes = {}
if not byte_order:
byte_order = self.guess_byte_order()
else:
byte_order = boc.to_numpy_code(byte_order)
self.byte_order = byte_order
self.struct_as_record = struct_as_record
if matlab_compatible:
self.set_matlab_compatible()
else:
self.squeeze_me = squeeze_me
self.chars_as_strings = chars_as_strings
self.mat_dtype = mat_dtype
self.verify_compressed_data_integrity = verify_compressed_data_integrity
def set_matlab_compatible(self):
''' Sets options to return arrays as MATLAB loads them '''
self.mat_dtype = True
self.squeeze_me = False
self.chars_as_strings = False
def guess_byte_order(self):
''' As we do not know what file type we have, assume native '''
return boc.native_code
def end_of_stream(self):
b = self.mat_stream.read(1)
curpos = self.mat_stream.tell()
self.mat_stream.seek(curpos-1)
return len(b) == 0
def arr_dtype_number(arr, num):
''' Return dtype for given number of items per element'''
return np.dtype(arr.dtype.str[:2] + str(num))
def arr_to_chars(arr):
''' Convert string array to char array '''
dims = list(arr.shape)
if not dims:
dims = [1]
dims.append(int(arr.dtype.str[2:]))
arr = np.ndarray(shape=dims,
dtype=arr_dtype_number(arr, 1),
buffer=arr)
empties = [arr == '']
if not np.any(empties):
return arr
arr = arr.copy()
arr[empties] = ' '
return arr
| mit |
intel-ctrlsys/actsys | actsys/control/commands/power/tests/test_power_off.py | 1 | 3904 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Intel Corp.
#
"""
Test the PowerOffCommand.
"""
from .. import PowerOffCommand
from .power_fixures import *
class MockStepUpdateResource(PowerOffCommand):
"""Fail resource update mocked object"""
def __init__(self, **args):
super(MockStepUpdateResource, self).__init__(**args)
def _update_resource_state(self, new_state):
return False
class TestPowerOffCommand(PowerCommandsCommon):
"""Test case for the RemoteSshPlugin class."""
def setUp(self):
super(TestPowerOffCommand, self).setUp()
self.write_state('On:bmc_on')
self.command_options["subcommand"] = 'off'
self.command = PowerOffCommand(**self.command_options)
self.command.plugin_name = 'mock'
def test_positive_off_from_on(self):
result = self.command.execute()
self.assertEqual('Success: Power Off test_node',
result[1].message)
self.assertEqual(0, result[1].return_code)
def test_power_plugin_object_exists(self):
self.command.power_plugin = self.manager.\
create_instance('power_control', 'mock', **self.command.node_options)
result = self.command.execute()
self.assertEqual('Success: Power Off test_node',
result[1].message)
self.assertEqual(0, result[1].return_code)
def test_parse_arguments(self):
self.command.force = None
self.command.subcommand = 'bad_subcommand'
result = self.command.execute()
self.assertEqual("Incorrect arguments passed to turn off a node: "
"['test_node']", result[0].message)
self.assertEqual(-1, result[0].return_code)
def test_parse_arguments_2(self):
self.args = None
result = self.command.execute()
self.assertEqual('Success: Power Off test_node',
result[1].message)
self.assertEqual(0, result[1].return_code)
def test_parse_arguments_3(self):
self.command.subcommand = 'unknown'
result = self.command.execute()
self.assertEqual("Incorrect arguments passed to turn off a node: "
"['test_node']", result[0].message)
self.assertEqual(-1, result[0].return_code)
def test_parse_arguments_4(self):
self.command.force = True
result = self.command.execute()
self.assertEqual('Success: Power Off test_node',
result[1].message)
self.assertEqual(0, result[1].return_code)
def test_positive_off_from_off(self):
self.write_state('Off')
result = self.command.execute()
self.assertEqual('Power off for test_node: Device is already Powered off',
result[0].message)
self.assertEqual(-1, result[0].return_code)
def test_failure_to_change_state(self):
self.command.power_plugin = MockPowerPlugin(**self.options)
result = self.command.execute()
self.assertEqual('Failed to change state to Off on device '
'test_node', result[1].message)
self.assertEqual(-1, result[1].return_code)
def test_failure_to_change_state_with_exception(self):
self.command.power_plugin = MockPowerPluginException(**self.options)
result = self.command.execute()
self.assertEqual('Mock exception', result[0].message)
self.assertEqual(-1, result[0].return_code)
def test_resource_failure(self):
cmd = MockStepUpdateResource(**self.command_options)
cmd.plugin_name = 'mock'
result = cmd.execute()
self.assertEqual("Failed to inform the resource manager of the state "
"change for device ['test_node']", result[0].message)
self.assertEqual(-1, result[0].return_code)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
pcinat/QuestradeAPI_PythonWrapper | src/questrade/api/streamer/StreamPublisher.py | 1 | 2023 | '''Stream Publisher
@summary: A Publisher in the Publish/Subscriber design pattern. This
publisher is aware of all instances created and calls update_observers
on each StreamPublisher.
@see: http://www.questrade.com/api/documentation/streaming
@copyright: 2016
@author: Peter Cinat
@license: Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import weakref
class StreamPublisher(object):
_instances = set()
def __init__(self):
self.observers = []
self._instances.add(weakref.ref(self))
def register(self, observer):
if not observer in self.observers:
self.observers.append(observer)
def unregister(self, observer):
if observer in self.observers:
self.observers.remove(observer)
def unregister_all(self):
if self.observers:
del self.observers[:]
def update_observers(self, payload, isBinary):
for observer in self.observers:
observer.update(payload, isBinary)
@staticmethod
def onMessage(payload, isBinary):
for i in StreamPublisher.get_instances():
StreamPublisher.update_observers(i, payload, isBinary)
@classmethod
def get_instances(cls):
dead = set()
for ref in cls._instances:
obj = ref()
if obj is not None:
yield obj
else:
dead.add(ref)
cls._instances -= dead
| apache-2.0 |
pahaz/ansible-modules-core | cloud/amazon/ec2_vpc_net.py | 88 | 9882 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc_net
short_description: Configure AWS virtual private clouds
description:
- Create or terminate AWS virtual private clouds. This module has a dependency on python-boto.
version_added: "2.0"
author: Jonathan Davila (@defionscode)
options:
name:
description:
- The name to give your VPC. This is used in combination with the cidr_block paramater to determine if a VPC already exists.
required: yes
cidr_block:
description:
- The CIDR of the VPC
required: yes
tenancy:
description:
- Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
required: false
default: default
choices: [ 'default', 'dedicated' ]
dns_support:
description:
- Whether to enable AWS DNS support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dns_hostnames:
description:
- Whether to enable AWS hostname support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dhcp_opts_id:
description:
- the id of the DHCP options to use for this vpc
default: null
required: false
tags:
description:
- The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different.
default: None
required: false
aliases: [ 'resource_tags' ]
state:
description:
- The state of the VPC. Either absent or present.
default: present
required: false
choices: [ 'present', 'absent' ]
multi_ok:
description:
- By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created.
default: false
required: false
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create a VPC with dedicate tenancy and a couple of tags
- ec2_vpc_net:
name: Module_dev2
cidr_block: 10.10.0.0/16
region: us-east-1
tags:
module: ec2_vpc_net
this: works
tenancy: dedicated
'''
import time
import sys
try:
import boto
import boto.ec2
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO=True
except ImportError:
HAS_BOTO=False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def vpc_exists(module, vpc, name, cidr_block, multi):
"""Returns True or False in regards to the existence of a VPC. When supplied
with a CIDR, it will check for matching tags to determine if it is a match
otherwise it will assume the VPC does not exist and thus return false.
"""
matched_vpc = None
try:
matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block})
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
if len(matching_vpcs) == 1:
matched_vpc = matching_vpcs[0]
elif len(matching_vpcs) > 1:
if multi:
module.fail_json(msg='Currently there are %d VPCs that have the same name and '
'CIDR block you specified. If you would like to create '
'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
return matched_vpc
def update_vpc_tags(vpc, module, vpc_obj, tags, name):
if tags is None:
tags = dict()
tags.update({'Name': name})
try:
current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id}))
if cmp(tags, current_tags):
vpc.create_tags(vpc_obj.id, tags)
return True
else:
return False
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
if vpc_obj.dhcp_options_id != dhcp_id:
connection.associate_dhcp_options(dhcp_id, vpc_obj.id)
return True
else:
return False
def get_vpc_values(vpc_obj):
if vpc_obj is not None:
vpc_values = vpc_obj.__dict__
if "region" in vpc_values:
vpc_values.pop("region")
if "item" in vpc_values:
vpc_values.pop("item")
if "connection" in vpc_values:
vpc_values.pop("connection")
return vpc_values
else:
return None
def main():
argument_spec=ec2_argument_spec()
argument_spec.update(dict(
name = dict(type='str', default=None, required=True),
cidr_block = dict(type='str', default=None, required=True),
tenancy = dict(choices=['default', 'dedicated'], default='default'),
dns_support = dict(type='bool', default=True),
dns_hostnames = dict(type='bool', default=True),
dhcp_opts_id = dict(type='str', default=None, required=False),
tags = dict(type='dict', required=False, default=None, aliases=['resource_tags']),
state = dict(choices=['present', 'absent'], default='present'),
multi_ok = dict(type='bool', default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
name=module.params.get('name')
cidr_block=module.params.get('cidr_block')
tenancy=module.params.get('tenancy')
dns_support=module.params.get('dns_support')
dns_hostnames=module.params.get('dns_hostnames')
dhcp_id=module.params.get('dhcp_opts_id')
tags=module.params.get('tags')
state=module.params.get('state')
multi=module.params.get('multi_ok')
changed=False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if dns_hostnames and not dns_support:
module.fail_json('In order to enable DNS Hostnames you must also enable DNS support')
if state == 'present':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is None:
try:
vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy)
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
if dhcp_id is not None:
try:
if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
if tags is not None or name is not None:
try:
if update_vpc_tags(connection, module, vpc_obj, tags, name):
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
# Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute
# which is needed in order to detect the current status of DNS options. For now we just update
# the attribute each time and is not used as a changed-factor.
try:
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support)
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames)
except BotoServerError, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
# get the vpc obj again in case it has changed
try:
vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0]
except BotoServerError, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
elif state == 'absent':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is not None:
try:
connection.delete_vpc(vpc_obj.id)
vpc_obj = None
changed = True
except BotoServerError, e:
e_msg = boto_exception(e)
module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
"and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
chrisdearman/micropython | tests/basics/set_binop.py | 16 | 1498 | # test set binary operations
sets = [set(), {1}, {1, 2}, {1, 2, 3}, {2, 3}, {2, 3, 5}, {5}, {7}]
for s in sets:
for t in sets:
print(sorted(s), '|', sorted(t), '=', sorted(s | t))
print(sorted(s), '^', sorted(t), '=', sorted(s ^ t))
print(sorted(s), '&', sorted(t), '=', sorted(s & t))
print(sorted(s), '-', sorted(t), '=', sorted(s - t))
u = s.copy()
u |= t
print(sorted(s), "|=", sorted(t), '-->', sorted(u))
u = s.copy()
u ^= t
print(sorted(s), "^=", sorted(t), '-->', sorted(u))
u = s.copy()
u &= t
print(sorted(s), "&=", sorted(t), "-->", sorted(u))
u = s.copy()
u -= t
print(sorted(s), "-=", sorted(t), "-->", sorted(u))
print(sorted(s), '==', sorted(t), '=', s == t)
print(sorted(s), '!=', sorted(t), '=', s != t)
print(sorted(s), '>', sorted(t), '=', s > t)
print(sorted(s), '>=', sorted(t), '=', s >= t)
print(sorted(s), '<', sorted(t), '=', s < t)
print(sorted(s), '<=', sorted(t), '=', s <= t)
print(set('abc') == 1)
# make sure inplace operators modify the set
s1 = s2 = set('abc')
s1 |= set('ad')
print(s1 is s2, len(s1))
s1 = s2 = set('abc')
s1 ^= set('ad')
print(s1 is s2, len(s1))
s1 = s2 = set('abc')
s1 &= set('ad')
print(s1 is s2, len(s1))
s1 = s2 = set('abc')
s1 -= set('ad')
print(s1 is s2, len(s1))
# unsupported operator
try:
set('abc') * 2
except TypeError:
print('TypeError')
| mit |
jantman/pelican-plugins | better_codeblock_line_numbering/better_codeblock_line_numbering.py | 68 | 4458 | """
Better Code-Block Line Numbering Plugin
--------------------------
Authored by Jacob Levernier, 2014
Released under the BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
For more information on this plugin, please see the attached Readme.md file.
"""
from pelican import signals # For making this plugin work with Pelican.
import os.path # For checking whether files are present in the filesystem.
import re # For using regular expressions.
def add_line_wrappers(data_passed_from_pelican):
"""A function to read through each page and post as it comes through from Pelican, find all instances of triple-backtick (```...```) code blocks, and add an HTML wrapper to each line of each of those code blocks"""
if data_passed_from_pelican._content: # If the item passed from Pelican has a "content" attribute (i.e., if it's not an image file or something else like that). NOTE: data_passed_from_pelican.content (without an underscore in front of 'content') seems to be read-only, whereas data_passed_from_pelican._content is able to be overwritten. This is somewhat explained in an IRC log from 2013-02-03 from user alexis to user webdesignhero_ at https://botbot.me/freenode/pelican/2013-02-01/?tz=America/Los_Angeles.
full_content_of_page_or_post = data_passed_from_pelican._content
else:
return # Exit the function, essentially passing over the (non-text) file.
all_instances_of_pre_elements = re.findall('<pre>.*?</pre>', full_content_of_page_or_post, re.DOTALL) # Use a regular expression to find every instance of '<pre>' followed by anything up to the first matching '</pre>'. re.DOTALL puts python's regular expression engine ('re') into a mode where a dot ('.') matches absolutely anything, including newline characters.
if(len(all_instances_of_pre_elements) > 0): # If the article/page HAS any <pre>...</pre> elements, go on. Otherwise, don't (to do so would inadvertantly wipe out the output content for that article/page).
updated_full_content_of_page_or_post = full_content_of_page_or_post # This just gives this an initial value before going into the loop below.
# Go through each <pre> element instance that we found above, and parse it:
for pre_element_to_parse in all_instances_of_pre_elements:
# Wrap each line of the <pre>...</pre> section with <span class=code-line>...</span>, following http://bililite.com/blog/2012/08/05/line-numbering-in-pre-elements/. We'll use these to add line numbers using CSS later.
# Note that below, '^' is the beginning of a string, '$' is the end of a string, and '\n' is a newline.
replacement_text_with_beginning_of_each_line_wrapped_in_span = re.sub(r'(<pre.*?>|\n(?!</pre>))','\\1<span class="code-line">',pre_element_to_parse) # The (?!...) here is a Negative Lookahead (cf. http://www.regular-expressions.info/lookaround.html). This full regular expression says "Give me all code snippets that start with <pre ****> or start with a newline (\n), but NOT if the newline is followed immediately with '</pre>'. Take whatever you find, and replace it with what you found (\1) followed immediately by '<span class="code-lines">'.
# http://stackoverflow.com/a/14625628 explains why we need to escape the backslash in the capture group reference (the '\1'). In short, python will recognize it as "\x01" if it's not escaped.
replacement_text_with_full_line_wrapped_in_span = re.sub(r'((?<!</pre>)$|(?<!</pre>)\n)','</span>\\1',replacement_text_with_beginning_of_each_line_wrapped_in_span) # This regular expression says "Give me all code snippets that are the end of a string or a newline (but not preceeded by "</pre>" (this is a 'negative lookahead,' '(?<)'), and replace whatever you found with '</span'> followed by whatever you found (\1).
updated_full_content_of_page_or_post = updated_full_content_of_page_or_post.replace(pre_element_to_parse,replacement_text_with_full_line_wrapped_in_span)
# Replace the content of the page or post with our now-updated content (having gone through all instances of <pre> elements and updated them all, exiting the loop above.
data_passed_from_pelican._content = updated_full_content_of_page_or_post
# Make Pelican work (see http://docs.getpelican.com/en/3.3.0/plugins.html#how-to-create-plugins):
def register():
signals.content_object_init.connect(add_line_wrappers)
| agpl-3.0 |
tseaver/google-cloud-python | language/samples/v1/language_entities_gcs.py | 2 | 4040 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT EDIT! This is a generated sample ("Request", "language_entities_gcs")
# To install the latest published package dependency, execute the following:
# pip install google-cloud-language
# sample-metadata
# title: Analyzing Entities (GCS)
# description: Analyzing Entities in text file stored in Cloud Storage
# usage: python3 samples/v1/language_entities_gcs.py [--gcs_content_uri "gs://cloud-samples-data/language/entity.txt"]
# [START language_entities_gcs]
from google.cloud import language_v1
from google.cloud.language_v1 import enums
def sample_analyze_entities(gcs_content_uri):
"""
Analyzing Entities in text file stored in Cloud Storage
Args:
gcs_content_uri Google Cloud Storage URI where the file content is located.
e.g. gs://[Your Bucket]/[Path to File]
"""
client = language_v1.LanguageServiceClient()
# gcs_content_uri = 'gs://cloud-samples-data/language/entity.txt'
# Available types: PLAIN_TEXT, HTML
type_ = enums.Document.Type.PLAIN_TEXT
# Optional. If not specified, the language is automatically detected.
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language}
# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = enums.EncodingType.UTF8
response = client.analyze_entities(document, encoding_type=encoding_type)
# Loop through entitites returned from the API
for entity in response.entities:
print(u"Representative name for the entity: {}".format(entity.name))
# Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al
print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name))
# Get the salience score associated with the entity in the [0, 1.0] range
print(u"Salience score: {}".format(entity.salience))
# Loop over the metadata associated with entity. For many known entities,
# the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid).
# Some entity types may have additional metadata, e.g. ADDRESS entities
# may have metadata for the address street_name, postal_code, et al.
for metadata_name, metadata_value in entity.metadata.items():
print(u"{}: {}".format(metadata_name, metadata_value))
# Loop over the mentions of this entity in the input document.
# The API currently supports proper noun mentions.
for mention in entity.mentions:
print(u"Mention text: {}".format(mention.text.content))
# Get the mention type, e.g. PROPER for proper noun
print(
u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name)
)
# Get the language of the text, which will be the same as
# the language specified in the request or, if not specified,
# the automatically-detected language.
print(u"Language of the text: {}".format(response.language))
# [END language_entities_gcs]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--gcs_content_uri",
type=str,
default="gs://cloud-samples-data/language/entity.txt",
)
args = parser.parse_args()
sample_analyze_entities(args.gcs_content_uri)
if __name__ == "__main__":
main()
| apache-2.0 |
gusai-francelabs/datafari | windows/python/Lib/test/test_tuple.py | 123 | 5408 | from test import test_support, seq_tests
import gc
class TupleTest(seq_tests.CommonTest):
type2test = tuple
def test_constructors(self):
super(TupleTest, self).test_constructors()
# calling built-in types without argument must return empty
self.assertEqual(tuple(), ())
t0_3 = (0, 1, 2, 3)
t0_3_bis = tuple(t0_3)
self.assertTrue(t0_3 is t0_3_bis)
self.assertEqual(tuple([]), ())
self.assertEqual(tuple([0, 1, 2, 3]), (0, 1, 2, 3))
self.assertEqual(tuple(''), ())
self.assertEqual(tuple('spam'), ('s', 'p', 'a', 'm'))
def test_truth(self):
super(TupleTest, self).test_truth()
self.assertTrue(not ())
self.assertTrue((42, ))
def test_len(self):
super(TupleTest, self).test_len()
self.assertEqual(len(()), 0)
self.assertEqual(len((0,)), 1)
self.assertEqual(len((0, 1, 2)), 3)
def test_iadd(self):
super(TupleTest, self).test_iadd()
u = (0, 1)
u2 = u
u += (2, 3)
self.assertTrue(u is not u2)
def test_imul(self):
super(TupleTest, self).test_imul()
u = (0, 1)
u2 = u
u *= 3
self.assertTrue(u is not u2)
def test_tupleresizebug(self):
# Check that a specific bug in _PyTuple_Resize() is squashed.
def f():
for i in range(1000):
yield i
self.assertEqual(list(tuple(f())), range(1000))
def test_hash(self):
# See SF bug 942952: Weakness in tuple hash
# The hash should:
# be non-commutative
# should spread-out closely spaced values
# should not exhibit cancellation in tuples like (x,(x,y))
# should be distinct from element hashes: hash(x)!=hash((x,))
# This test exercises those cases.
# For a pure random hash and N=50, the expected number of occupied
# buckets when tossing 252,600 balls into 2**32 buckets
# is 252,592.6, or about 7.4 expected collisions. The
# standard deviation is 2.73. On a box with 64-bit hash
# codes, no collisions are expected. Here we accept no
# more than 15 collisions. Any worse and the hash function
# is sorely suspect.
N=50
base = range(N)
xp = [(i, j) for i in base for j in base]
inps = base + [(i, j) for i in base for j in xp] + \
[(i, j) for i in xp for j in base] + xp + zip(base)
collisions = len(inps) - len(set(map(hash, inps)))
self.assertTrue(collisions <= 15)
def test_repr(self):
l0 = tuple()
l2 = (0, 1, 2)
a0 = self.type2test(l0)
a2 = self.type2test(l2)
self.assertEqual(str(a0), repr(l0))
self.assertEqual(str(a2), repr(l2))
self.assertEqual(repr(a0), "()")
self.assertEqual(repr(a2), "(0, 1, 2)")
def _not_tracked(self, t):
# Nested tuples can take several collections to untrack
gc.collect()
gc.collect()
self.assertFalse(gc.is_tracked(t), t)
def _tracked(self, t):
self.assertTrue(gc.is_tracked(t), t)
gc.collect()
gc.collect()
self.assertTrue(gc.is_tracked(t), t)
@test_support.cpython_only
def test_track_literals(self):
# Test GC-optimization of tuple literals
x, y, z = 1.5, "a", []
self._not_tracked(())
self._not_tracked((1,))
self._not_tracked((1, 2))
self._not_tracked((1, 2, "a"))
self._not_tracked((1, 2, (None, True, False, ()), int))
self._not_tracked((object(),))
self._not_tracked(((1, x), y, (2, 3)))
# Tuples with mutable elements are always tracked, even if those
# elements are not tracked right now.
self._tracked(([],))
self._tracked(([1],))
self._tracked(({},))
self._tracked((set(),))
self._tracked((x, y, z))
def check_track_dynamic(self, tp, always_track):
x, y, z = 1.5, "a", []
check = self._tracked if always_track else self._not_tracked
check(tp())
check(tp([]))
check(tp(set()))
check(tp([1, x, y]))
check(tp(obj for obj in [1, x, y]))
check(tp(set([1, x, y])))
check(tp(tuple([obj]) for obj in [1, x, y]))
check(tuple(tp([obj]) for obj in [1, x, y]))
self._tracked(tp([z]))
self._tracked(tp([[x, y]]))
self._tracked(tp([{x: y}]))
self._tracked(tp(obj for obj in [x, y, z]))
self._tracked(tp(tuple([obj]) for obj in [x, y, z]))
self._tracked(tuple(tp([obj]) for obj in [x, y, z]))
@test_support.cpython_only
def test_track_dynamic(self):
# Test GC-optimization of dynamically constructed tuples.
self.check_track_dynamic(tuple, False)
@test_support.cpython_only
def test_track_subtypes(self):
# Tuple subtypes must always be tracked
class MyTuple(tuple):
pass
self.check_track_dynamic(MyTuple, True)
@test_support.cpython_only
def test_bug7466(self):
# Trying to untrack an unfinished tuple could crash Python
self._not_tracked(tuple(gc.collect() for i in range(101)))
def test_main():
test_support.run_unittest(TupleTest)
if __name__=="__main__":
test_main()
| apache-2.0 |
openstack/monasca-api | monasca_api/tests/test_rest.py | 2 | 2696 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslotest import base
from monasca_api.common.rest import exceptions
from monasca_api.common.rest import utils
class TestRestUtils(base.BaseTestCase):
def setUp(self):
super(TestRestUtils, self).setUp()
self.mock_json_patcher = mock.patch('monasca_api.common.rest.utils.json')
self.mock_json = self.mock_json_patcher.start()
def tearDown(self):
super(TestRestUtils, self).tearDown()
self.mock_json_patcher.stop()
def test_read_body_with_success(self):
self.mock_json.loads.return_value = ""
payload = mock.Mock()
utils.read_body(payload)
self.mock_json.loads.assert_called_once_with(payload.read.return_value)
def test_read_body_empty_content_in_payload(self):
self.mock_json.loads.return_value = ""
payload = mock.Mock()
payload.read.return_value = None
self.assertIsNone(utils.read_body(payload))
def test_read_body_json_loads_exception(self):
self.mock_json.loads.side_effect = Exception
payload = mock.Mock()
self.assertRaises(exceptions.DataConversionException,
utils.read_body, payload)
def test_read_body_unsupported_content_type(self):
unsupported_content_type = mock.Mock()
self.assertRaises(
exceptions.UnsupportedContentTypeException, utils.read_body, None,
unsupported_content_type)
def test_read_body_unreadable_content_error(self):
unreadable_content = mock.Mock()
unreadable_content.read.side_effect = Exception
self.assertRaises(
exceptions.UnreadableContentError,
utils.read_body, unreadable_content)
def test_as_json_success(self):
data = mock.Mock()
dumped_json = utils.as_json(data)
self.assertEqual(dumped_json, self.mock_json.dumps.return_value)
def test_as_json_with_exception(self):
data = mock.Mock()
self.mock_json.dumps.side_effect = Exception
self.assertRaises(exceptions.DataConversionException,
utils.as_json, data)
| apache-2.0 |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_1_0/firewall_connection_statistic_broker.py | 16 | 42034 | from ..broker import Broker
class FirewallConnectionStatisticBroker(Broker):
controller = "firewall_connection_statistics"
def index(self, **kwargs):
"""Lists the available firewall connection statistics. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which firewall connection statistics information was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which firewall connection statistics information was collected.
:type DeviceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param FWCSID: The internal NetMRI identifier for this firewall connection statistics entry.
:type FWCSID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FWCSID: The internal NetMRI identifier for this firewall connection statistics entry.
:type FWCSID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the firewall connection statistics with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the firewall connection statistics with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of firewall connection statistic methods. The listed methods will be called on each firewall connection statistic returned and included in the output. Available methods are: device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` FWCSID
:param sort: The data field(s) to use for sorting the output. Default is FWCSID. Valid values are FWCSID, DeviceID, StartTime, EndTime, FWCSCurrentInUse, FWCSHigh, DataSourceID, fwIndex.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each FirewallConnectionStatistic. Valid values are FWCSID, DeviceID, StartTime, EndTime, FWCSCurrentInUse, FWCSHigh, DataSourceID, fwIndex. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return firewall_connection_statistics: An array of the FirewallConnectionStatistic objects that match the specified input criteria.
:rtype firewall_connection_statistics: Array of FirewallConnectionStatistic
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified firewall connection statistic.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param FWCSID: The internal NetMRI identifier for this firewall connection statistics entry.
:type FWCSID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of firewall connection statistic methods. The listed methods will be called on each firewall connection statistic returned and included in the output. Available methods are: device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return firewall_connection_statistic: The firewall connection statistic identified by the specified FWCSID.
:rtype firewall_connection_statistic: FirewallConnectionStatistic
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def search(self, **kwargs):
"""Lists the available firewall connection statistics matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which firewall connection statistics information was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which firewall connection statistics information was collected.
:type DeviceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EndTime: The date and time the record was last modified in NetMRI.
:type EndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EndTime: The date and time the record was last modified in NetMRI.
:type EndTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param FWCSCurrentInUse: Currently in used firewall connection statistics.
:type FWCSCurrentInUse: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FWCSCurrentInUse: Currently in used firewall connection statistics.
:type FWCSCurrentInUse: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param FWCSHigh: High level of firewall connection statistics.
:type FWCSHigh: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FWCSHigh: High level of firewall connection statistics.
:type FWCSHigh: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param FWCSID: The internal NetMRI identifier for this firewall connection statistics entry.
:type FWCSID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FWCSID: The internal NetMRI identifier for this firewall connection statistics entry.
:type FWCSID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StartTime: The date and time the record was initially created in NetMRI.
:type StartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StartTime: The date and time the record was initially created in NetMRI.
:type StartTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param fwIndex: The current index of the local interface of firewall.
:type fwIndex: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param fwIndex: The current index of the local interface of firewall.
:type fwIndex: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the firewall connection statistics with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the firewall connection statistics with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of firewall connection statistic methods. The listed methods will be called on each firewall connection statistic returned and included in the output. Available methods are: device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` FWCSID
:param sort: The data field(s) to use for sorting the output. Default is FWCSID. Valid values are FWCSID, DeviceID, StartTime, EndTime, FWCSCurrentInUse, FWCSHigh, DataSourceID, fwIndex.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each FirewallConnectionStatistic. Valid values are FWCSID, DeviceID, StartTime, EndTime, FWCSCurrentInUse, FWCSHigh, DataSourceID, fwIndex. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against firewall connection statistics, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DeviceID, EndTime, FWCSCurrentInUse, FWCSHigh, FWCSID, StartTime, fwIndex.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return firewall_connection_statistics: An array of the FirewallConnectionStatistic objects that match the specified input criteria.
:rtype firewall_connection_statistics: Array of FirewallConnectionStatistic
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available firewall connection statistics matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DeviceID, EndTime, FWCSCurrentInUse, FWCSHigh, FWCSID, StartTime, fwIndex.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device from which firewall connection statistics information was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_EndTime: The operator to apply to the field EndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. EndTime: The date and time the record was last modified in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_EndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_EndTime: If op_EndTime is specified, the field named in this input will be compared to the value in EndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_EndTime must be specified if op_EndTime is specified.
:type val_f_EndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_EndTime: If op_EndTime is specified, this value will be compared to the value in EndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_EndTime must be specified if op_EndTime is specified.
:type val_c_EndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FWCSCurrentInUse: The operator to apply to the field FWCSCurrentInUse. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FWCSCurrentInUse: Currently in used firewall connection statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FWCSCurrentInUse: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FWCSCurrentInUse: If op_FWCSCurrentInUse is specified, the field named in this input will be compared to the value in FWCSCurrentInUse using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FWCSCurrentInUse must be specified if op_FWCSCurrentInUse is specified.
:type val_f_FWCSCurrentInUse: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FWCSCurrentInUse: If op_FWCSCurrentInUse is specified, this value will be compared to the value in FWCSCurrentInUse using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FWCSCurrentInUse must be specified if op_FWCSCurrentInUse is specified.
:type val_c_FWCSCurrentInUse: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FWCSHigh: The operator to apply to the field FWCSHigh. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FWCSHigh: High level of firewall connection statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FWCSHigh: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FWCSHigh: If op_FWCSHigh is specified, the field named in this input will be compared to the value in FWCSHigh using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FWCSHigh must be specified if op_FWCSHigh is specified.
:type val_f_FWCSHigh: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FWCSHigh: If op_FWCSHigh is specified, this value will be compared to the value in FWCSHigh using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FWCSHigh must be specified if op_FWCSHigh is specified.
:type val_c_FWCSHigh: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FWCSID: The operator to apply to the field FWCSID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FWCSID: The internal NetMRI identifier for this firewall connection statistics entry. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FWCSID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FWCSID: If op_FWCSID is specified, the field named in this input will be compared to the value in FWCSID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FWCSID must be specified if op_FWCSID is specified.
:type val_f_FWCSID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FWCSID: If op_FWCSID is specified, this value will be compared to the value in FWCSID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FWCSID must be specified if op_FWCSID is specified.
:type val_c_FWCSID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StartTime: The operator to apply to the field StartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StartTime: The date and time the record was initially created in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StartTime: If op_StartTime is specified, the field named in this input will be compared to the value in StartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StartTime must be specified if op_StartTime is specified.
:type val_f_StartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StartTime: If op_StartTime is specified, this value will be compared to the value in StartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StartTime must be specified if op_StartTime is specified.
:type val_c_StartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_fwIndex: The operator to apply to the field fwIndex. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. fwIndex: The current index of the local interface of firewall. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_fwIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_fwIndex: If op_fwIndex is specified, the field named in this input will be compared to the value in fwIndex using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_fwIndex must be specified if op_fwIndex is specified.
:type val_f_fwIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_fwIndex: If op_fwIndex is specified, this value will be compared to the value in fwIndex using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_fwIndex must be specified if op_fwIndex is specified.
:type val_c_fwIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the firewall connection statistics with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the firewall connection statistics with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of firewall connection statistic methods. The listed methods will be called on each firewall connection statistic returned and included in the output. Available methods are: device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` FWCSID
:param sort: The data field(s) to use for sorting the output. Default is FWCSID. Valid values are FWCSID, DeviceID, StartTime, EndTime, FWCSCurrentInUse, FWCSHigh, DataSourceID, fwIndex.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each FirewallConnectionStatistic. Valid values are FWCSID, DeviceID, StartTime, EndTime, FWCSCurrentInUse, FWCSHigh, DataSourceID, fwIndex. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return firewall_connection_statistics: An array of the FirewallConnectionStatistic objects that match the specified input criteria.
:rtype firewall_connection_statistics: Array of FirewallConnectionStatistic
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def data_source(self, **kwargs):
"""The collector NetMRI that collected this data record.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param FWCSID: The internal NetMRI identifier for this firewall connection statistics entry.
:type FWCSID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The collector NetMRI that collected this data record.
:rtype : DataSource
"""
return self.api_request(self._get_method_fullname("data_source"), kwargs)
def device(self, **kwargs):
"""The device from which this data was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param FWCSID: The internal NetMRI identifier for this firewall connection statistics entry.
:type FWCSID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this data was collected.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
def infradevice(self, **kwargs):
"""The device from which this data was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param FWCSID: The internal NetMRI identifier for this firewall connection statistics entry.
:type FWCSID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this data was collected.
:rtype : InfraDevice
"""
return self.api_request(self._get_method_fullname("infradevice"), kwargs)
| apache-2.0 |
mancoast/CPythonPyc_test | crash/270_test_ftplib.py | 3 | 25025 | """Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import StringIO
import errno
import os
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase
from test import test_support
from test.test_support import HOST
threading = test_support.import_module('threading')
# the dummy data returned by server over the data channel when
# RETR, LIST and NLST commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024)
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def handle_error(self):
raise
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator("\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.rest = None
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = ''.join(self.in_buffer)
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data + '\r\n')
def cmd_port(self, arg):
addr = map(int, arg.split(','))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=2)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
sock = socket.socket()
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ',')
p1, p2 = divmod(port, 256)
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=2)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
sock = socket.socket(socket.AF_INET6)
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(RETR_DATA[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accept(self):
conn, addr = self.accept()
self.handler = self.handler(conn)
self.close()
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert.pem")
class SSLConnection(object, asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
self.socket = ssl.wrap_socket(self.socket, suppress_ragged_eofs=False,
certfile=CERTFILE, server_side=True,
do_handshake_on_connect=False,
ssl_version=ssl.PROTOCOL_SSLv23)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except socket.error, err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except socket.error, err:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
super(SSLConnection, self).close()
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return ''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return ''
raise
def handle_error(self):
raise
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn):
DummyFTPHandler.__init__(self, conn)
self.secure_data_channel = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=2)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, IOError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_retrbinary(self):
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', received.append, rest=rest)
self.assertEqual(''.join(received), RETR_DATA[rest:],
msg='rest test case %d %d %d' % (rest,
len(''.join(received)),
len(RETR_DATA[rest:])))
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = StringIO.StringIO(RETR_DATA)
self.client.storbinary('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
f = StringIO.StringIO(RETR_DATA)
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler.rest, str(r))
def test_storlines(self):
f = StringIO.StringIO(RETR_DATA.replace('\r\n', '\n'))
self.client.storlines('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_makeport(self):
self.client.makeport()
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'pasv')
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
self.client.makeport()
self.assertEqual(self.server.handler.last_received_cmd, 'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
self.assertEqual(self.server.handler.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=2)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=2)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
sock = self.client.transfercmd('list')
self.assertNotIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
sock = self.client.transfercmd('list')
self.assertIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
sock = self.client.transfercmd('list')
self.assertNotIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_auth_ssl(self):
try:
self.client.ssl_version = ssl.PROTOCOL_SSLv3
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
finally:
self.client.ssl_version = ssl.PROTOCOL_TLSv1
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = test_support.bind_port(self.sock)
threading.Thread(target=self.server, args=(self.evt,self.sock)).start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
ftplib.FTP.port = self.port
def tearDown(self):
self.evt.wait()
def server(self, evt, serv):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
serv.listen(5)
# (1) Signal the caller that we are ready to accept the connection.
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
conn.send("1 Hola mundo\n")
# (2) Signal the caller that it is safe to close the socket.
evt.set()
conn.close()
finally:
serv.close()
# (3) Signal the caller that we are done.
evt.set()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost")
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost", timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(ftp.sock.gettimeout() is None)
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts]
if socket.has_ipv6:
try:
DummyFTPServer((HOST, 0), af=socket.AF_INET6)
except socket.error:
pass
else:
tests.append(TestIPv6Environment)
if ssl is not None:
tests.extend([TestTLS_FTPClassMixin, TestTLS_FTPClass])
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
Psycojoker/baron | docs/conf.py | 2 | 9121 | # -*- coding: utf-8 -*-
#
# Baron documentation build configuration file, created by
# sphinx-quickstart on Sat May 10 02:16:20 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Baron'
copyright = u'2014, Laurent Peuch'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6'
# The full version, including alpha/beta/rc tags.
release = '0.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Barondoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Baron.tex', u'Baron Documentation',
u'Laurent Peuch', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'baron', u'Baron Documentation',
[u'Laurent Peuch'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Baron', u'Baron Documentation',
u'Laurent Peuch', 'Baron', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Baron'
epub_author = u'Laurent Peuch'
epub_publisher = u'Laurent Peuch'
epub_copyright = u'2014, Laurent Peuch'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| lgpl-3.0 |
PoornimaNayak/autotest-client-tests | linux-tools/patchutils/patchutils.py | 4 | 1259 | #!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class patchutils(test.test):
"""
Autotest module for testing basic functionality
of patchutils
@author Gopal Kalita <gokalita@in.ibm.com> ##
"""
version = 1
nfail = 0
path = ''
def initialize(self):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./patchutils.sh'], cwd="%s/patchutils" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
| gpl-2.0 |
sbesson/openmicroscopy | components/tools/OmeroWeb/test/integration/test_config.py | 3 | 4877 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test server config in decorator.
"""
import json
from omero.testlib import ITest
from django.test import RequestFactory
from django.contrib.sessions.middleware import SessionMiddleware
from omeroweb.webclient import webclient_gateway # NOQA
from omero.gateway import BlitzGateway
from omeroweb.decorators import login_required
from omero.gateway.utils import propertiesToDict
import pytest
def default_view(request):
pass
# helper from http://codereview.stackexchange.com/questions/
# 21033/flatten-dictionary-in-python-functional-style
def flattenProperties(d):
"""
Convert nested dictionary to flat map,
"""
def items():
for key, value in list(d.items()):
if isinstance(value, dict):
for subkey, subvalue in list(flattenProperties(value).items()):
yield key + "." + subkey, subvalue
else:
yield key, value
return dict(items())
def test_flattenProperties():
d = {
'omero.prefix.str.1': 'mystring',
'omero.prefix.str.2': '1',
'omero.prefix.int.1': 1
}
dictprop = propertiesToDict(d, prefix='omero.prefix.')
flatprop = flattenProperties({'omero': {'prefix': dictprop}})
assert set(d.keys()) - set(flatprop.keys()) == set()
class TestConfig(ITest):
def setup_method(self, method):
# prepare session
self.r = RequestFactory().get('/rand')
middleware = SessionMiddleware()
middleware.process_request(self.r)
self.r.session.save()
self.rs = self.root.sf.getConfigService()
self.conn = BlitzGateway(client_obj=self.new_client())
def teardown_method(self, method):
self.conn.close()
self.r.session.flush()
def testDefaultConfig(self):
""" Test loading default config """
default = self.rs.getClientConfigDefaults()
login_required(default_view).load_server_settings(self.conn, self.r)
s = {"omero": {"client": self.r.session.get('server_settings', {})}}
# compare keys in default and config loaded by decorator
a = [x for x in set(flattenProperties(s).keys()) if x not in (
set(default.keys()))]
assert a == ['omero.client.email']
def testDefaultConfigConversion(self):
default = self.rs.getClientConfigDefaults()
# bool
key1 = 'omero.client.ui.tree.orphans.enabled'
self.rs.setConfigValue(key1, default[key1])
key11 = 'omero.client.ui.tree.orphans.name'
self.rs.setConfigValue(key11, default[key11])
# digit
key2 = 'omero.client.viewer.roi_limit'
self.rs.setConfigValue(key2, default[key2])
login_required(default_view).load_server_settings(self.conn, self.r)
ss = self.r.session['server_settings']
assert isinstance(ss['ui']['tree']['orphans']['enabled'], bool)
assert ss['ui']['tree']['orphans']['enabled'] == bool(default[key1])
assert isinstance(ss['ui']['tree']['orphans']['name'], str)
assert ss['ui']['tree']['orphans']['name'] == default[key11]
assert isinstance(ss['viewer']['roi_limit'], int)
assert ss['viewer']['roi_limit'] == json.loads(default[key2])
@pytest.mark.parametrize("prop", ["colleagues.label", "leaders.label",
"everyone.label"])
@pytest.mark.parametrize("label", ["foo"])
def testUpgradeDropdownMenuConfig(self, prop, label):
""" Test to set and get DropdownMenuConfig """
d = self.rs.getClientConfigDefaults()
key = "omero.client.ui.menu.dropdown.%s" % prop
try:
self.rs.setConfigValue(key, label)
# test load_server_settings directly
login_required(default_view).load_server_settings(
self.conn, self.r)
s = self.r.session.get('server_settings', {})
prop = prop.replace(".label", "")
assert s['ui']['menu']['dropdown'][prop]['label'] == label
finally:
self.rs.setConfigValue(key, d[key])
| gpl-2.0 |
bertucho/epic-movie-quotes-quiz | dialogos/build/Twisted/twisted/conch/error.py | 67 | 2667 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An error to represent bad things happening in Conch.
Maintainer: Paul Swartz
"""
from twisted.cred.error import UnauthorizedLogin
class ConchError(Exception):
def __init__(self, value, data = None):
Exception.__init__(self, value, data)
self.value = value
self.data = data
class NotEnoughAuthentication(Exception):
"""
This is thrown if the authentication is valid, but is not enough to
successfully verify the user. i.e. don't retry this type of
authentication, try another one.
"""
class ValidPublicKey(UnauthorizedLogin):
"""
Raised by public key checkers when they receive public key credentials
that don't contain a signature at all, but are valid in every other way.
(e.g. the public key matches one in the user's authorized_keys file).
Protocol code (eg
L{SSHUserAuthServer<twisted.conch.ssh.userauth.SSHUserAuthServer>}) which
attempts to log in using
L{ISSHPrivateKey<twisted.cred.credentials.ISSHPrivateKey>} credentials
should be prepared to handle a failure of this type by telling the user to
re-authenticate using the same key and to include a signature with the new
attempt.
See U{http://www.ietf.org/rfc/rfc4252.txt} section 7 for more details.
"""
class IgnoreAuthentication(Exception):
"""
This is thrown to let the UserAuthServer know it doesn't need to handle the
authentication anymore.
"""
class MissingKeyStoreError(Exception):
"""
Raised if an SSHAgentServer starts receiving data without its factory
providing a keys dict on which to read/write key data.
"""
class UserRejectedKey(Exception):
"""
The user interactively rejected a key.
"""
class InvalidEntry(Exception):
"""
An entry in a known_hosts file could not be interpreted as a valid entry.
"""
class HostKeyChanged(Exception):
"""
The host key of a remote host has changed.
@ivar offendingEntry: The entry which contains the persistent host key that
disagrees with the given host key.
@type offendingEntry: L{twisted.conch.interfaces.IKnownHostEntry}
@ivar path: a reference to the known_hosts file that the offending entry
was loaded from
@type path: L{twisted.python.filepath.FilePath}
@ivar lineno: The line number of the offending entry in the given path.
@type lineno: L{int}
"""
def __init__(self, offendingEntry, path, lineno):
Exception.__init__(self)
self.offendingEntry = offendingEntry
self.path = path
self.lineno = lineno
| mit |
tensorflow/models | research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_tf2_test.py | 1 | 8166 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for create_cococameratraps_tfexample_main."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import json
import os
import tempfile
import unittest
import numpy as np
from PIL import Image
import tensorflow as tf
from object_detection.utils import tf_version
if tf_version.is_tf2():
from object_detection.dataset_tools.context_rcnn import create_cococameratraps_tfexample_main # pylint:disable=g-import-not-at-top
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CreateCOCOCameraTrapsTfexampleTest(tf.test.TestCase):
IMAGE_HEIGHT = 360
IMAGE_WIDTH = 480
def _write_random_images_to_directory(self, directory, num_frames):
for frame_num in range(num_frames):
img = np.random.randint(0, high=256,
size=(self.IMAGE_HEIGHT, self.IMAGE_WIDTH, 3),
dtype=np.uint8)
pil_image = Image.fromarray(img)
fname = 'im_' + str(frame_num) + '.jpg'
pil_image.save(os.path.join(directory, fname), 'JPEG')
def _create_json_file(self, directory, num_frames, keep_bboxes=False):
json_dict = {'images': [], 'annotations': []}
json_dict['categories'] = [{'id': 0, 'name': 'empty'},
{'id': 1, 'name': 'animal'}]
for idx in range(num_frames):
im = {'id': 'im_' + str(idx),
'file_name': 'im_' + str(idx) + '.jpg',
'height': self.IMAGE_HEIGHT,
'width': self.IMAGE_WIDTH,
'seq_id': 'seq_1',
'seq_num_frames': num_frames,
'frame_num': idx,
'location': 'loc_' + str(idx),
'date_captured': str(datetime.datetime.now())
}
json_dict['images'].append(im)
ann = {'id': 'ann' + str(idx),
'image_id': 'im_' + str(idx),
'category_id': 1,
}
if keep_bboxes:
ann['bbox'] = [0.0 * self.IMAGE_WIDTH,
0.1 * self.IMAGE_HEIGHT,
0.5 * self.IMAGE_WIDTH,
0.5 * self.IMAGE_HEIGHT]
json_dict['annotations'].append(ann)
json_path = os.path.join(directory, 'test_file.json')
with tf.io.gfile.GFile(json_path, 'w') as f:
json.dump(json_dict, f)
return json_path
def assert_expected_example_bbox(self, example):
self.assertAllClose(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.1])
self.assertAllClose(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.0])
self.assertAllClose(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.6])
self.assertAllClose(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.5])
self.assertAllClose(
example.features.feature['image/object/class/label']
.int64_list.value, [1])
self.assertAllEqual(
example.features.feature['image/object/class/text']
.bytes_list.value, [b'animal'])
self.assertAllClose(
example.features.feature['image/class/label']
.int64_list.value, [1])
self.assertAllEqual(
example.features.feature['image/class/text']
.bytes_list.value, [b'animal'])
# Check other essential attributes.
self.assertAllEqual(
example.features.feature['image/height'].int64_list.value,
[self.IMAGE_HEIGHT])
self.assertAllEqual(
example.features.feature['image/width'].int64_list.value,
[self.IMAGE_WIDTH])
self.assertAllEqual(
example.features.feature['image/source_id'].bytes_list.value,
[b'im_0'])
self.assertTrue(
example.features.feature['image/encoded'].bytes_list.value)
def assert_expected_example(self, example):
self.assertAllClose(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[])
self.assertAllClose(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[])
self.assertAllClose(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[])
self.assertAllClose(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[])
self.assertAllClose(
example.features.feature['image/object/class/label']
.int64_list.value, [1])
self.assertAllEqual(
example.features.feature['image/object/class/text']
.bytes_list.value, [b'animal'])
self.assertAllClose(
example.features.feature['image/class/label']
.int64_list.value, [1])
self.assertAllEqual(
example.features.feature['image/class/text']
.bytes_list.value, [b'animal'])
# Check other essential attributes.
self.assertAllEqual(
example.features.feature['image/height'].int64_list.value,
[self.IMAGE_HEIGHT])
self.assertAllEqual(
example.features.feature['image/width'].int64_list.value,
[self.IMAGE_WIDTH])
self.assertAllEqual(
example.features.feature['image/source_id'].bytes_list.value,
[b'im_0'])
self.assertTrue(
example.features.feature['image/encoded'].bytes_list.value)
def test_beam_pipeline(self):
num_frames = 1
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
json_path = self._create_json_file(temp_dir, num_frames)
output_tfrecord = temp_dir+'/output'
self._write_random_images_to_directory(temp_dir, num_frames)
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
create_cococameratraps_tfexample_main.create_pipeline(
p, temp_dir, json_path,
output_tfrecord_prefix=output_tfrecord)
p.run()
filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), num_frames)
self.assert_expected_example(tf.train.Example.FromString(
actual_output[0]))
def test_beam_pipeline_bbox(self):
num_frames = 1
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
json_path = self._create_json_file(temp_dir, num_frames, keep_bboxes=True)
output_tfrecord = temp_dir+'/output'
self._write_random_images_to_directory(temp_dir, num_frames)
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
create_cococameratraps_tfexample_main.create_pipeline(
p, temp_dir, json_path,
output_tfrecord_prefix=output_tfrecord,
keep_bboxes=True)
p.run()
filenames = tf.io.gfile.glob(output_tfrecord+'-?????-of-?????')
actual_output = []
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), num_frames)
self.assert_expected_example_bbox(tf.train.Example.FromString(
actual_output[0]))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
tejal29/pants | tests/python/pants_test/tasks/test_jar_publish_integration.py | 1 | 9359 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import pytest
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_rmtree
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
from pants_test.tasks.test_base import is_exe
def shared_artifacts(version, extra_jar=None):
published_file_list = ['ivy-{0}.xml'.format(version),
'hello-greet-{0}.jar'.format(version),
'hello-greet-{0}.pom'.format(version),
'hello-greet-{0}-sources.jar'.format(version)]
if extra_jar:
published_file_list.append(extra_jar)
return {'com/pants/testproject/publish/hello-greet/{0}/'.format(version): published_file_list}
def publish_extra_config(unique_config):
return {
'jar-publish': {
'publish_extras': {
'extra_test_jar_example': unique_config,
},
},
'backends': {
'packages': [
'example.pants_publish_plugin',
'internal_backend.repositories',
],
},
}
class JarPublishIntegrationTest(PantsRunIntegrationTest):
SCALADOC = is_exe('scaladoc')
JAVADOC = is_exe('javadoc')
# This is where all pushdb properties files will end up.
@property
def pushdb_root(self):
return os.path.join(get_buildroot(), 'testprojects', 'ivy', 'pushdb')
def setUp(self):
safe_rmtree(self.pushdb_root)
def tearDown(self):
safe_rmtree(self.pushdb_root)
@pytest.mark.skipif('not JarPublishIntegrationTest.SCALADOC',
reason='No scaladoc binary on the PATH.')
def test_scala_publish(self):
unique_artifacts = {'com/pants/testproject/publish/jvm-example-lib/0.0.1-SNAPSHOT':
['ivy-0.0.1-SNAPSHOT.xml',
'jvm-example-lib-0.0.1-SNAPSHOT.jar',
'jvm-example-lib-0.0.1-SNAPSHOT.pom',
'jvm-example-lib-0.0.1-SNAPSHOT-sources.jar'],
'com/pants/testproject/publish/hello/welcome/0.0.1-SNAPSHOT':
['ivy-0.0.1-SNAPSHOT.xml',
'welcome-0.0.1-SNAPSHOT.jar',
'welcome-0.0.1-SNAPSHOT.pom',
'welcome-0.0.1-SNAPSHOT-sources.jar'],}
self.publish_test('testprojects/src/scala/com/pants/testproject/publish:jvm-run-example-lib',
dict(unique_artifacts.items() + shared_artifacts('0.0.1-SNAPSHOT').items()),
['com.pants.testproject.publish/hello-greet/publish.properties',
'com.pants.testproject.publish/jvm-example-lib/publish.properties',
'com.pants.testproject.publish.hello/welcome/publish.properties'],
extra_options=['--doc-scaladoc-skip'],
expected_primary_artifact_count=3)
@pytest.mark.skipif('not JarPublishIntegrationTest.JAVADOC',
reason='No javadoc binary on the PATH.')
def test_java_publish(self):
self.publish_test('testprojects/src/java/com/pants/testproject/publish/hello/greet',
shared_artifacts('0.0.1-SNAPSHOT'),
['com.pants.testproject.publish/hello-greet/publish.properties'],)
def test_named_snapshot(self):
name = "abcdef0123456789"
self.publish_test('testprojects/src/java/com/pants/testproject/publish/hello/greet',
shared_artifacts(name),
['com.pants.testproject.publish/hello-greet/publish.properties'],
extra_options=['--publish-named-snapshot=%s' % name])
# Collect all the common factors for running a publish_extras test, and execute the test.
def publish_extras_runner(self, extra_config=None, artifact_name=None, success_expected=True):
self.publish_test('testprojects/src/java/com/pants/testproject/publish/hello/greet',
shared_artifacts('0.0.1-SNAPSHOT', artifact_name),
['com.pants.testproject.publish/hello-greet/publish.properties'],
extra_options=['--doc-javadoc-skip'],
extra_config=extra_config,
extra_env={'WRAPPER_SRCPATH': 'examples/src/python'},
success_expected=success_expected)
#
# Run through all the permutations of the config parameters for publish_extras.
#
def test_publish_extras_name_classifier(self):
self.publish_extras_runner(extra_config=publish_extra_config({
'override_name': '{target_provides_name}-extra_example',
'classifier': 'classy',
}),
artifact_name='hello-greet-extra_example-0.0.1-SNAPSHOT-classy.jar')
def test_publish_extras_name(self):
self.publish_extras_runner(extra_config=publish_extra_config({
'override_name': '{target_provides_name}-extra_example',
}),
artifact_name='hello-greet-extra_example-0.0.1-SNAPSHOT.jar')
def test_publish_extras_name_extension(self):
self.publish_extras_runner(extra_config=publish_extra_config({
'override_name': '{target_provides_name}-extra_example',
'extension': 'zip'
}),
artifact_name='hello-greet-extra_example-0.0.1-SNAPSHOT.zip')
def test_publish_extras_extension(self):
self.publish_extras_runner(extra_config=publish_extra_config({
'extension': 'zip'
}),
artifact_name='hello-greet-0.0.1-SNAPSHOT.zip')
def test_publish_extras_extension_classifier(self):
self.publish_extras_runner(extra_config=publish_extra_config({
'classifier': 'classy',
'extension': 'zip'
}),
artifact_name='hello-greet-0.0.1-SNAPSHOT-classy.zip')
def test_publish_extras_classifier(self):
self.publish_extras_runner(extra_config=publish_extra_config({
'classifier': 'classy',
}),
artifact_name='hello-greet-0.0.1-SNAPSHOT-classy.jar')
# This test doesn't specify a proper set of parameters that uniquely name the extra artifact, and
# should fail with an error from pants.
def test_publish_extras_invalid_args(self):
self.publish_extras_runner(extra_config=publish_extra_config({
'extension': 'jar',
}),
artifact_name='hello-greet-0.0.1-SNAPSHOT.jar',
success_expected=False)
def publish_test(self, target, artifacts, pushdb_files, extra_options=None, extra_config=None,
extra_env=None, expected_primary_artifact_count=1, success_expected=True):
"""Tests that publishing the given target results in the expected output.
:param target: Target to test.
:param artifacts: A map from directories to a list of expected filenames.
:param extra_options: Extra command-line options to the pants run.
:param extra_config: Extra pants.ini configuration for the pants run.
:param expected_primary_artifact_count: Number of artifacts we expect to be published.
:param extra_env: Extra environment variables for the pants run."""
with temporary_dir() as publish_dir:
options = ['--publish-local=%s' % publish_dir,
'--no-publish-dryrun',
'--publish-force']
if extra_options:
options.extend(extra_options)
yes = 'y' * expected_primary_artifact_count
pants_run = self.run_pants(['publish', target] + options, config=extra_config,
stdin_data=yes, extra_env=extra_env)
if success_expected:
self.assert_success(pants_run, "'pants goal publish' expected success, but failed instead.")
else:
self.assert_failure(pants_run,
"'pants goal publish' expected failure, but succeeded instead.")
return
# New pushdb directory should be created for all artifacts.
for pushdb_file in pushdb_files:
pushdb_dir = os.path.dirname(os.path.join(self.pushdb_root, pushdb_file))
self.assertTrue(os.path.exists(pushdb_dir))
# But because we are doing local publishes, no pushdb files are created
for pushdb_file in pushdb_files:
self.assertFalse(os.path.exists(os.path.join(self.pushdb_root, pushdb_file)))
for directory, artifact_list in artifacts.items():
for artifact in artifact_list:
artifact_path = os.path.join(publish_dir, directory, artifact)
self.assertTrue(os.path.exists(artifact_path))
| apache-2.0 |
danakj/chromium | tools/android/loading/cloud/common/google_bigquery_helper.py | 7 | 4013 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import uuid
from googleapiclient import (discovery, errors)
import common.google_error_helper as google_error_helper
# Name of the dataset.
BIGQUERY_DATASET = 'clovis_dataset'
# Name of the table used as a template for new tables.
BIGQUERY_TABLE_TEMPLATE = 'report'
def GetBigQueryService(credentials):
"""Returns the BigQuery service."""
return discovery.build('bigquery', 'v2', credentials=credentials)
def GetBigQueryTableID(clovis_report_task):
"""Returns the ID of the BigQuery table associated with the task.
This ID is appended at the end of the table name.
Args:
clovis_report_task: (ClovisTask) The task, must be a 'report' task.
Returns:
str: The table ID.
"""
assert (clovis_report_task.Action() == 'report')
# Name the table after the last path component of the trace bucket.
trace_bucket = clovis_report_task.ActionParams()['trace_bucket']
table_id = os.path.basename(os.path.normpath(trace_bucket))
task_name = clovis_report_task.BackendParams().get('task_name')
if task_name is not None:
table_id += '_' + task_name
# BigQuery table names can contain only alpha numeric characters and
# underscores.
return ''.join(c for c in table_id if c.isalnum() or c == '_')
def GetBigQueryTableURL(project_name, table_id):
"""Returns the full URL for the BigQuery table associated with table_id."""
return 'https://bigquery.cloud.google.com/table/%s:%s.%s_%s' % (
project_name, BIGQUERY_DATASET, BIGQUERY_TABLE_TEMPLATE, table_id)
def InsertInTemplatedBigQueryTable(bigquery_service, project_name, table_id,
rows, logger):
"""Inserts rows in the BigQuery table corresponding to table_id.
Assumes that the BigQuery dataset and table template already exist.
Args:
bigquery_service: The BigQuery service.
project_name: (str) Name of the Google Cloud project.
table_id: (str) table_id as returned by GetBigQueryTableID().
rows: (list) Rows to insert in the table.
logger: (logging.Logger) The logger.
Returns:
dict: The BigQuery service response.
"""
rows_data = [{'json': row, 'insertId': str(uuid.uuid4())} for row in rows]
body = {'rows': rows_data, 'templateSuffix':'_'+table_id}
logger.info('BigQuery API request:\n' + str(body))
response = bigquery_service.tabledata().insertAll(
projectId=project_name, datasetId=BIGQUERY_DATASET,
tableId=BIGQUERY_TABLE_TEMPLATE, body=body).execute()
logger.info('BigQuery API response:\n' + str(response))
return response
def DoesBigQueryTableExist(bigquery_service, project_name, table_id, logger):
"""Returns wether the BigQuery table identified by table_id exists.
Raises a HttpError exception if the call to BigQuery API fails.
Args:
bigquery_service: The BigQuery service.
project_name: (str) Name of the Google Cloud project.
table_id: (str) table_id as returned by GetBigQueryTableID().
Returns:
bool: True if the table exists.
"""
table_name = BIGQUERY_TABLE_TEMPLATE + '_' + table_id
logger.info('Getting table information for %s.' % table_name)
try:
table = bigquery_service.tables().get(projectId=project_name,
datasetId=BIGQUERY_DATASET,
tableId=table_name).execute()
return bool(table)
except errors.HttpError as http_error:
error_content = google_error_helper.GetErrorContent(http_error)
error_reason = google_error_helper.GetErrorReason(error_content)
if error_reason == google_error_helper.REASON_NOT_FOUND:
return False
else:
logger.error('BigQuery API error (reason: "%s"):\n%s' % (
error_reason, http_error))
if error_content:
logger.error('Error details:\n%s' % error_content)
raise # Re-raise the exception.
return False
| bsd-3-clause |
assamite/itm_project | returns/first/itmProject/compressor/utils.py | 2 | 4498 | '''Various utility functions
'''
from collections import Counter
from heapq import heappush, heappop, heapify
from struct import pack, unpack
import numpy as np
import re
def count_chars(s):
'''Count number of occurences of each char in str s'''
return Counter(s)
def count_digits(iterable, unsign = True, split=""):
"""Count number of each digit (and minus symbols) in iterable consisting of
integers. If unsign is True, will first make all integers unsigned by adding
-min(iterable) to each element. Adds split-string between each integer
before passing it to count_chars.
Returns both, the constructed string by concatenating (accorcing to given
parameters) each integer and the individual character counts.
"""
add = -int(min(iterable)) if unsign else 0
digits = reduce(lambda x,y: x+split+str(int(y)+add), iterable, "").strip()
return digits, count_chars(digits)
def digits2encode(iterable, unsign = True, split = ""):
'''Get Huffman encoding of the given iterable of integers and the encoded
binary string.'''
s, counts = count_digits(iterable, unsign, split)
enc = _encode2dict(encode(counts))
return reduce(lambda x,y: x+enc[y], s, ""), enc
def str2encode(s):
'''Get Huffman encoding of the given str and the encoded binary string.'''
counts = count_chars(s)
enc = _encode2dict(encode(counts))
return reduce(lambda x,y: x+enc[y], s, ""), enc
def dig2enc(iterable):
'''Get Huffman encoding of the given iterable of numbers and the encoded
binary string. TEST version.'''
digits = reduce(lambda x,y: x+(str(y) if y < 0 else "+"+str(y)), iterable, "").strip()
counts = count_chars(digits)
enc = _encode2dict(encode(counts))
return reduce(lambda x,y: x+enc[y], digits, ""), enc, digits
def nums2bin(iterable, filepath=None):
'''Convert iterable of numbers to bytes than contain Huffman coded binary
string and optionally save it to a file.
Returns Huffman codes, bytes, and amount of zero bits appended to the end to
get last byte full.
'''
huff, enc, digits = dig2enc(iterable)
add = 8 - (len(huff) % 8) if len(huff) % 8 != 8 else 0
if add != 0:
for i in xrange(add): huff += '0'
bts = []
# Read 8 bits a time, convert it to unsigned integer
for i in xrange(0, len(huff), 8):
by = int(huff[i:i+8], 2)
bts.append(by)
if filepath is not None:
with open(filepath,'w') as f:
f.write(pack('{}B'.format(len(bts)),*bts))
return enc, bts, add
def bin2str(filepath, enc, nbts, add):
'''Convert Huffman coded binary file into readable format.
Args:
filepath: path to binary file
enc: Huffman code mappings key:char, value:encoding
bts: number of bytes in binary file
add: number of zero bits appended to end of the file to get full bytes
'''
# Get bytes and concatenate their binary representations
bts = unpack('{}B'.format(nbts), open(filepath,'r').read())
huff_bin = reduce(lambda x,y: x+'{:08b}'.format(y), bts, "")
if add != 0 and add != 8: # Remove extra bits
huff_bin = huff_bin[:-add]
huff_s = ""
it = enc.items()
while len(huff_bin) > 0:
# Get the index of the encoding which appears at the start of the Huffman binary
i = np.where(map(lambda x: huff_bin.startswith(x[1]), it))[0][0]
# Add character corresponding to the encoding
huff_s += it[i][0]
# Remove encoding at the start of Huffman binary.
huff_bin = huff_bin[len(it[i][1]):]
return huff_s
def bin2nums(filepath, enc, bts, add):
'''Convert Huffman coded binary file into number list.
'''
hf = bin2str(filepath, enc, bts, add)
return [float(x) for x in re.findall('[+-]\d+[.]\d+', hf)]
def _encode2dict(enc):
d ={}
for e in enc:
d[e[0]] = e[1]
return d
def encode(symb2freq):
"""Huffman encode the given dict mapping symbols to weights"""
heap = [[wt, [sym, ""]] for sym, wt in symb2freq.items()]
heapify(heap)
while len(heap) > 1:
lo = heappop(heap)
hi = heappop(heap)
for pair in lo[1:]:
pair[1] = '0' + pair[1]
for pair in hi[1:]:
pair[1] = '1' + pair[1]
heappush(heap, [lo[0] + hi[0]] + lo[1:] + hi[1:])
return sorted(heappop(heap)[1:], key=lambda p: (len(p[-1]), p)) | gpl-2.0 |
NetEaseGame/AutomatorX | scripts/monkey_playback.py | 9 | 2161 | #!/usr/bin/env monkeyrunner
# Copyright 2010, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from com.android.monkeyrunner import MonkeyRunner
# The format of the file we are parsing is very carfeully constructed.
# Each line corresponds to a single command. The line is split into 2
# parts with a | character. Text to the left of the pipe denotes
# which command to run. The text to the right of the pipe is a python
# dictionary (it can be evaled into existence) that specifies the
# arguments for the command. In most cases, this directly maps to the
# keyword argument dictionary that could be passed to the underlying
# command.
# Lookup table to map command strings to functions that implement that
# command.
CMD_MAP = {
'TOUCH': lambda dev, arg: dev.touch(**arg),
'DRAG': lambda dev, arg: dev.drag(**arg),
'PRESS': lambda dev, arg: dev.press(**arg),
'TYPE': lambda dev, arg: dev.type(**arg),
'WAIT': lambda dev, arg: MonkeyRunner.sleep(**arg)
}
# Process a single file for the specified device.
def process_file(fp, device):
for line in fp:
(cmd, rest) = line.split('|')
try:
# Parse the pydict
rest = eval(rest)
except:
print 'unable to parse options'
continue
if cmd not in CMD_MAP:
print 'unknown command: ' + cmd
continue
CMD_MAP[cmd](device, rest)
def main():
file = sys.argv[1]
fp = open(file, 'r')
device = MonkeyRunner.waitForConnection()
process_file(fp, device)
fp.close();
if __name__ == '__main__':
main()
| apache-2.0 |
EricCline/CEM_inc | env/lib/python2.7/site-packages/IPython/core/history.py | 7 | 30621 | """ History related magics and functionality """
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team.
#
# Distributed under the terms of the BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import atexit
import datetime
import os
import re
try:
import sqlite3
except ImportError:
try:
from pysqlite2 import dbapi2 as sqlite3
except ImportError:
sqlite3 = None
import threading
# Our own packages
from IPython.config.configurable import Configurable
from IPython.external.decorator import decorator
from IPython.utils.decorators import undoc
from IPython.utils.path import locate_profile
from IPython.utils import py3compat
from IPython.utils.traitlets import (
Any, Bool, Dict, Instance, Integer, List, Unicode, TraitError,
)
from IPython.utils.warn import warn
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
@undoc
class DummyDB(object):
"""Dummy DB that will act as a black hole for history.
Only used in the absence of sqlite"""
def execute(*args, **kwargs):
return []
def commit(self, *args, **kwargs):
pass
def __enter__(self, *args, **kwargs):
pass
def __exit__(self, *args, **kwargs):
pass
@decorator
def needs_sqlite(f, self, *a, **kw):
"""Decorator: return an empty list in the absence of sqlite."""
if sqlite3 is None or not self.enabled:
return []
else:
return f(self, *a, **kw)
if sqlite3 is not None:
DatabaseError = sqlite3.DatabaseError
else:
@undoc
class DatabaseError(Exception):
"Dummy exception when sqlite could not be imported. Should never occur."
@decorator
def catch_corrupt_db(f, self, *a, **kw):
"""A decorator which wraps HistoryAccessor method calls to catch errors from
a corrupt SQLite database, move the old database out of the way, and create
a new one.
"""
try:
return f(self, *a, **kw)
except DatabaseError:
if os.path.isfile(self.hist_file):
# Try to move the file out of the way
base,ext = os.path.splitext(self.hist_file)
newpath = base + '-corrupt' + ext
os.rename(self.hist_file, newpath)
self.init_db()
print("ERROR! History file wasn't a valid SQLite database.",
"It was moved to %s" % newpath, "and a new file created.")
return []
else:
# The hist_file is probably :memory: or something else.
raise
class HistoryAccessor(Configurable):
"""Access the history database without adding to it.
This is intended for use by standalone history tools. IPython shells use
HistoryManager, below, which is a subclass of this."""
# String holding the path to the history file
hist_file = Unicode(config=True,
help="""Path to file to use for SQLite history database.
By default, IPython will put the history database in the IPython
profile directory. If you would rather share one history among
profiles, you can set this value in each, so that they are consistent.
Due to an issue with fcntl, SQLite is known to misbehave on some NFS
mounts. If you see IPython hanging, try setting this to something on a
local disk, e.g::
ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
""")
enabled = Bool(True, config=True,
help="""enable the SQLite history
set enabled=False to disable the SQLite history,
in which case there will be no stored history, no SQLite connection,
and no background saving thread. This may be necessary in some
threaded environments where IPython is embedded.
"""
)
connection_options = Dict(config=True,
help="""Options for configuring the SQLite connection
These options are passed as keyword args to sqlite3.connect
when establishing database conenctions.
"""
)
# The SQLite database
db = Any()
def _db_changed(self, name, old, new):
"""validate the db, since it can be an Instance of two different types"""
connection_types = (DummyDB,)
if sqlite3 is not None:
connection_types = (DummyDB, sqlite3.Connection)
if not isinstance(new, connection_types):
msg = "%s.db must be sqlite3 Connection or DummyDB, not %r" % \
(self.__class__.__name__, new)
raise TraitError(msg)
def __init__(self, profile='default', hist_file=u'', **traits):
"""Create a new history accessor.
Parameters
----------
profile : str
The name of the profile from which to open history.
hist_file : str
Path to an SQLite history database stored by IPython. If specified,
hist_file overrides profile.
config : :class:`~IPython.config.loader.Config`
Config object. hist_file can also be set through this.
"""
# We need a pointer back to the shell for various tasks.
super(HistoryAccessor, self).__init__(**traits)
# defer setting hist_file from kwarg until after init,
# otherwise the default kwarg value would clobber any value
# set by config
if hist_file:
self.hist_file = hist_file
if self.hist_file == u'':
# No one has set the hist_file, yet.
self.hist_file = self._get_hist_file_name(profile)
if sqlite3 is None and self.enabled:
warn("IPython History requires SQLite, your history will not be saved")
self.enabled = False
self.init_db()
def _get_hist_file_name(self, profile='default'):
"""Find the history file for the given profile name.
This is overridden by the HistoryManager subclass, to use the shell's
active profile.
Parameters
----------
profile : str
The name of a profile which has a history file.
"""
return os.path.join(locate_profile(profile), 'history.sqlite')
@catch_corrupt_db
def init_db(self):
"""Connect to the database, and create tables if necessary."""
if not self.enabled:
self.db = DummyDB()
return
# use detect_types so that timestamps return datetime objects
kwargs = dict(detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
kwargs.update(self.connection_options)
self.db = sqlite3.connect(self.hist_file, **kwargs)
self.db.execute("""CREATE TABLE IF NOT EXISTS sessions (session integer
primary key autoincrement, start timestamp,
end timestamp, num_cmds integer, remark text)""")
self.db.execute("""CREATE TABLE IF NOT EXISTS history
(session integer, line integer, source text, source_raw text,
PRIMARY KEY (session, line))""")
# Output history is optional, but ensure the table's there so it can be
# enabled later.
self.db.execute("""CREATE TABLE IF NOT EXISTS output_history
(session integer, line integer, output text,
PRIMARY KEY (session, line))""")
self.db.commit()
def writeout_cache(self):
"""Overridden by HistoryManager to dump the cache before certain
database lookups."""
pass
## -------------------------------
## Methods for retrieving history:
## -------------------------------
def _run_sql(self, sql, params, raw=True, output=False):
"""Prepares and runs an SQL query for the history database.
Parameters
----------
sql : str
Any filtering expressions to go after SELECT ... FROM ...
params : tuple
Parameters passed to the SQL query (to replace "?")
raw, output : bool
See :meth:`get_range`
Returns
-------
Tuples as :meth:`get_range`
"""
toget = 'source_raw' if raw else 'source'
sqlfrom = "history"
if output:
sqlfrom = "history LEFT JOIN output_history USING (session, line)"
toget = "history.%s, output_history.output" % toget
cur = self.db.execute("SELECT session, line, %s FROM %s " %\
(toget, sqlfrom) + sql, params)
if output: # Regroup into 3-tuples, and parse JSON
return ((ses, lin, (inp, out)) for ses, lin, inp, out in cur)
return cur
@needs_sqlite
@catch_corrupt_db
def get_session_info(self, session):
"""Get info about a session.
Parameters
----------
session : int
Session number to retrieve.
Returns
-------
session_id : int
Session ID number
start : datetime
Timestamp for the start of the session.
end : datetime
Timestamp for the end of the session, or None if IPython crashed.
num_cmds : int
Number of commands run, or None if IPython crashed.
remark : unicode
A manually set description.
"""
query = "SELECT * from sessions where session == ?"
return self.db.execute(query, (session,)).fetchone()
@catch_corrupt_db
def get_last_session_id(self):
"""Get the last session ID currently in the database.
Within IPython, this should be the same as the value stored in
:attr:`HistoryManager.session_number`.
"""
for record in self.get_tail(n=1, include_latest=True):
return record[0]
@catch_corrupt_db
def get_tail(self, n=10, raw=True, output=False, include_latest=False):
"""Get the last n lines from the history database.
Parameters
----------
n : int
The number of lines to get
raw, output : bool
See :meth:`get_range`
include_latest : bool
If False (default), n+1 lines are fetched, and the latest one
is discarded. This is intended to be used where the function
is called by a user command, which it should not return.
Returns
-------
Tuples as :meth:`get_range`
"""
self.writeout_cache()
if not include_latest:
n += 1
cur = self._run_sql("ORDER BY session DESC, line DESC LIMIT ?",
(n,), raw=raw, output=output)
if not include_latest:
return reversed(list(cur)[1:])
return reversed(list(cur))
@catch_corrupt_db
def search(self, pattern="*", raw=True, search_raw=True,
output=False, n=None, unique=False):
"""Search the database using unix glob-style matching (wildcards
* and ?).
Parameters
----------
pattern : str
The wildcarded pattern to match when searching
search_raw : bool
If True, search the raw input, otherwise, the parsed input
raw, output : bool
See :meth:`get_range`
n : None or int
If an integer is given, it defines the limit of
returned entries.
unique : bool
When it is true, return only unique entries.
Returns
-------
Tuples as :meth:`get_range`
"""
tosearch = "source_raw" if search_raw else "source"
if output:
tosearch = "history." + tosearch
self.writeout_cache()
sqlform = "WHERE %s GLOB ?" % tosearch
params = (pattern,)
if unique:
sqlform += ' GROUP BY {0}'.format(tosearch)
if n is not None:
sqlform += " ORDER BY session DESC, line DESC LIMIT ?"
params += (n,)
elif unique:
sqlform += " ORDER BY session, line"
cur = self._run_sql(sqlform, params, raw=raw, output=output)
if n is not None:
return reversed(list(cur))
return cur
@catch_corrupt_db
def get_range(self, session, start=1, stop=None, raw=True,output=False):
"""Retrieve input by session.
Parameters
----------
session : int
Session number to retrieve.
start : int
First line to retrieve.
stop : int
End of line range (excluded from output itself). If None, retrieve
to the end of the session.
raw : bool
If True, return untranslated input
output : bool
If True, attempt to include output. This will be 'real' Python
objects for the current session, or text reprs from previous
sessions if db_log_output was enabled at the time. Where no output
is found, None is used.
Returns
-------
entries
An iterator over the desired lines. Each line is a 3-tuple, either
(session, line, input) if output is False, or
(session, line, (input, output)) if output is True.
"""
if stop:
lineclause = "line >= ? AND line < ?"
params = (session, start, stop)
else:
lineclause = "line>=?"
params = (session, start)
return self._run_sql("WHERE session==? AND %s" % lineclause,
params, raw=raw, output=output)
def get_range_by_str(self, rangestr, raw=True, output=False):
"""Get lines of history from a string of ranges, as used by magic
commands %hist, %save, %macro, etc.
Parameters
----------
rangestr : str
A string specifying ranges, e.g. "5 ~2/1-4". See
:func:`magic_history` for full details.
raw, output : bool
As :meth:`get_range`
Returns
-------
Tuples as :meth:`get_range`
"""
for sess, s, e in extract_hist_ranges(rangestr):
for line in self.get_range(sess, s, e, raw=raw, output=output):
yield line
class HistoryManager(HistoryAccessor):
"""A class to organize all history-related functionality in one place.
"""
# Public interface
# An instance of the IPython shell we are attached to
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
# Lists to hold processed and raw history. These start with a blank entry
# so that we can index them starting from 1
input_hist_parsed = List([""])
input_hist_raw = List([""])
# A list of directories visited during session
dir_hist = List()
def _dir_hist_default(self):
try:
return [py3compat.getcwd()]
except OSError:
return []
# A dict of output history, keyed with ints from the shell's
# execution count.
output_hist = Dict()
# The text/plain repr of outputs.
output_hist_reprs = Dict()
# The number of the current session in the history database
session_number = Integer()
db_log_output = Bool(False, config=True,
help="Should the history database include output? (default: no)"
)
db_cache_size = Integer(0, config=True,
help="Write to database every x commands (higher values save disk access & power).\n"
"Values of 1 or less effectively disable caching."
)
# The input and output caches
db_input_cache = List()
db_output_cache = List()
# History saving in separate thread
save_thread = Instance('IPython.core.history.HistorySavingThread')
try: # Event is a function returning an instance of _Event...
save_flag = Instance(threading._Event)
except AttributeError: # ...until Python 3.3, when it's a class.
save_flag = Instance(threading.Event)
# Private interface
# Variables used to store the three last inputs from the user. On each new
# history update, we populate the user's namespace with these, shifted as
# necessary.
_i00 = Unicode(u'')
_i = Unicode(u'')
_ii = Unicode(u'')
_iii = Unicode(u'')
# A regex matching all forms of the exit command, so that we don't store
# them in the history (it's annoying to rewind the first entry and land on
# an exit call).
_exit_re = re.compile(r"(exit|quit)(\s*\(.*\))?$")
def __init__(self, shell=None, config=None, **traits):
"""Create a new history manager associated with a shell instance.
"""
# We need a pointer back to the shell for various tasks.
super(HistoryManager, self).__init__(shell=shell, config=config,
**traits)
self.save_flag = threading.Event()
self.db_input_cache_lock = threading.Lock()
self.db_output_cache_lock = threading.Lock()
if self.enabled and self.hist_file != ':memory:':
self.save_thread = HistorySavingThread(self)
self.save_thread.start()
self.new_session()
def _get_hist_file_name(self, profile=None):
"""Get default history file name based on the Shell's profile.
The profile parameter is ignored, but must exist for compatibility with
the parent class."""
profile_dir = self.shell.profile_dir.location
return os.path.join(profile_dir, 'history.sqlite')
@needs_sqlite
def new_session(self, conn=None):
"""Get a new session number."""
if conn is None:
conn = self.db
with conn:
cur = conn.execute("""INSERT INTO sessions VALUES (NULL, ?, NULL,
NULL, "") """, (datetime.datetime.now(),))
self.session_number = cur.lastrowid
def end_session(self):
"""Close the database session, filling in the end time and line count."""
self.writeout_cache()
with self.db:
self.db.execute("""UPDATE sessions SET end=?, num_cmds=? WHERE
session==?""", (datetime.datetime.now(),
len(self.input_hist_parsed)-1, self.session_number))
self.session_number = 0
def name_session(self, name):
"""Give the current session a name in the history database."""
with self.db:
self.db.execute("UPDATE sessions SET remark=? WHERE session==?",
(name, self.session_number))
def reset(self, new_session=True):
"""Clear the session history, releasing all object references, and
optionally open a new session."""
self.output_hist.clear()
# The directory history can't be completely empty
self.dir_hist[:] = [py3compat.getcwd()]
if new_session:
if self.session_number:
self.end_session()
self.input_hist_parsed[:] = [""]
self.input_hist_raw[:] = [""]
self.new_session()
# ------------------------------
# Methods for retrieving history
# ------------------------------
def get_session_info(self, session=0):
"""Get info about a session.
Parameters
----------
session : int
Session number to retrieve. The current session is 0, and negative
numbers count back from current session, so -1 is the previous session.
Returns
-------
session_id : int
Session ID number
start : datetime
Timestamp for the start of the session.
end : datetime
Timestamp for the end of the session, or None if IPython crashed.
num_cmds : int
Number of commands run, or None if IPython crashed.
remark : unicode
A manually set description.
"""
if session <= 0:
session += self.session_number
return super(HistoryManager, self).get_session_info(session=session)
def _get_range_session(self, start=1, stop=None, raw=True, output=False):
"""Get input and output history from the current session. Called by
get_range, and takes similar parameters."""
input_hist = self.input_hist_raw if raw else self.input_hist_parsed
n = len(input_hist)
if start < 0:
start += n
if not stop or (stop > n):
stop = n
elif stop < 0:
stop += n
for i in range(start, stop):
if output:
line = (input_hist[i], self.output_hist_reprs.get(i))
else:
line = input_hist[i]
yield (0, i, line)
def get_range(self, session=0, start=1, stop=None, raw=True,output=False):
"""Retrieve input by session.
Parameters
----------
session : int
Session number to retrieve. The current session is 0, and negative
numbers count back from current session, so -1 is previous session.
start : int
First line to retrieve.
stop : int
End of line range (excluded from output itself). If None, retrieve
to the end of the session.
raw : bool
If True, return untranslated input
output : bool
If True, attempt to include output. This will be 'real' Python
objects for the current session, or text reprs from previous
sessions if db_log_output was enabled at the time. Where no output
is found, None is used.
Returns
-------
entries
An iterator over the desired lines. Each line is a 3-tuple, either
(session, line, input) if output is False, or
(session, line, (input, output)) if output is True.
"""
if session <= 0:
session += self.session_number
if session==self.session_number: # Current session
return self._get_range_session(start, stop, raw, output)
return super(HistoryManager, self).get_range(session, start, stop, raw,
output)
## ----------------------------
## Methods for storing history:
## ----------------------------
def store_inputs(self, line_num, source, source_raw=None):
"""Store source and raw input in history and create input cache
variables ``_i*``.
Parameters
----------
line_num : int
The prompt number of this input.
source : str
Python input.
source_raw : str, optional
If given, this is the raw input without any IPython transformations
applied to it. If not given, ``source`` is used.
"""
if source_raw is None:
source_raw = source
source = source.rstrip('\n')
source_raw = source_raw.rstrip('\n')
# do not store exit/quit commands
if self._exit_re.match(source_raw.strip()):
return
self.input_hist_parsed.append(source)
self.input_hist_raw.append(source_raw)
with self.db_input_cache_lock:
self.db_input_cache.append((line_num, source, source_raw))
# Trigger to flush cache and write to DB.
if len(self.db_input_cache) >= self.db_cache_size:
self.save_flag.set()
# update the auto _i variables
self._iii = self._ii
self._ii = self._i
self._i = self._i00
self._i00 = source_raw
# hackish access to user namespace to create _i1,_i2... dynamically
new_i = '_i%s' % line_num
to_main = {'_i': self._i,
'_ii': self._ii,
'_iii': self._iii,
new_i : self._i00 }
if self.shell is not None:
self.shell.push(to_main, interactive=False)
def store_output(self, line_num):
"""If database output logging is enabled, this saves all the
outputs from the indicated prompt number to the database. It's
called by run_cell after code has been executed.
Parameters
----------
line_num : int
The line number from which to save outputs
"""
if (not self.db_log_output) or (line_num not in self.output_hist_reprs):
return
output = self.output_hist_reprs[line_num]
with self.db_output_cache_lock:
self.db_output_cache.append((line_num, output))
if self.db_cache_size <= 1:
self.save_flag.set()
def _writeout_input_cache(self, conn):
with conn:
for line in self.db_input_cache:
conn.execute("INSERT INTO history VALUES (?, ?, ?, ?)",
(self.session_number,)+line)
def _writeout_output_cache(self, conn):
with conn:
for line in self.db_output_cache:
conn.execute("INSERT INTO output_history VALUES (?, ?, ?)",
(self.session_number,)+line)
@needs_sqlite
def writeout_cache(self, conn=None):
"""Write any entries in the cache to the database."""
if conn is None:
conn = self.db
with self.db_input_cache_lock:
try:
self._writeout_input_cache(conn)
except sqlite3.IntegrityError:
self.new_session(conn)
print("ERROR! Session/line number was not unique in",
"database. History logging moved to new session",
self.session_number)
try:
# Try writing to the new session. If this fails, don't
# recurse
self._writeout_input_cache(conn)
except sqlite3.IntegrityError:
pass
finally:
self.db_input_cache = []
with self.db_output_cache_lock:
try:
self._writeout_output_cache(conn)
except sqlite3.IntegrityError:
print("!! Session/line number for output was not unique",
"in database. Output will not be stored.")
finally:
self.db_output_cache = []
class HistorySavingThread(threading.Thread):
"""This thread takes care of writing history to the database, so that
the UI isn't held up while that happens.
It waits for the HistoryManager's save_flag to be set, then writes out
the history cache. The main thread is responsible for setting the flag when
the cache size reaches a defined threshold."""
daemon = True
stop_now = False
enabled = True
def __init__(self, history_manager):
super(HistorySavingThread, self).__init__(name="IPythonHistorySavingThread")
self.history_manager = history_manager
self.enabled = history_manager.enabled
atexit.register(self.stop)
@needs_sqlite
def run(self):
# We need a separate db connection per thread:
try:
self.db = sqlite3.connect(self.history_manager.hist_file,
**self.history_manager.connection_options
)
while True:
self.history_manager.save_flag.wait()
if self.stop_now:
return
self.history_manager.save_flag.clear()
self.history_manager.writeout_cache(self.db)
except Exception as e:
print(("The history saving thread hit an unexpected error (%s)."
"History will not be written to the database.") % repr(e))
def stop(self):
"""This can be called from the main thread to safely stop this thread.
Note that it does not attempt to write out remaining history before
exiting. That should be done by calling the HistoryManager's
end_session method."""
self.stop_now = True
self.history_manager.save_flag.set()
self.join()
# To match, e.g. ~5/8-~2/3
range_re = re.compile(r"""
((?P<startsess>~?\d+)/)?
(?P<start>\d+)?
((?P<sep>[\-:])
((?P<endsess>~?\d+)/)?
(?P<end>\d+))?
$""", re.VERBOSE)
def extract_hist_ranges(ranges_str):
"""Turn a string of history ranges into 3-tuples of (session, start, stop).
Examples
--------
>>> list(extract_hist_ranges("~8/5-~7/4 2"))
[(-8, 5, None), (-7, 1, 5), (0, 2, 3)]
"""
for range_str in ranges_str.split():
rmatch = range_re.match(range_str)
if not rmatch:
continue
start = rmatch.group("start")
if start:
start = int(start)
end = rmatch.group("end")
# If no end specified, get (a, a + 1)
end = int(end) if end else start + 1
else: # start not specified
if not rmatch.group('startsess'): # no startsess
continue
start = 1
end = None # provide the entire session hist
if rmatch.group("sep") == "-": # 1-3 == 1:4 --> [1, 2, 3]
end += 1
startsess = rmatch.group("startsess") or "0"
endsess = rmatch.group("endsess") or startsess
startsess = int(startsess.replace("~","-"))
endsess = int(endsess.replace("~","-"))
assert endsess >= startsess, "start session must be earlier than end session"
if endsess == startsess:
yield (startsess, start, end)
continue
# Multiple sessions in one range:
yield (startsess, start, None)
for sess in range(startsess+1, endsess):
yield (sess, 1, None)
yield (endsess, 1, end)
def _format_lineno(session, line):
"""Helper function to format line numbers properly."""
if session == 0:
return str(line)
return "%s#%s" % (session, line)
| mit |
credits-currency/credits | contrib/spendfrom/spendfrom.py | 1 | 10055 | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcredits received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a creditsd or Credits-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting CRE values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Credits/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Credits")
return os.path.expanduser("~/.credits")
def read_bitcoin_config(dbdir):
"""Read the credits.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "credits.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19332 if testnet else 9332
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the creditsd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(creditsd):
info = creditsd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
creditsd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = creditsd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(creditsd):
address_summary = dict()
address_to_account = dict()
for info in creditsd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = creditsd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = creditsd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(creditsd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(creditsd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f CRE available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to creditsd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = creditsd.createrawtransaction(inputs, outputs)
signed_rawtx = creditsd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(creditsd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = creditsd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(creditsd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = creditsd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(creditsd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of credits.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
creditsd = connect_JSON(config)
if options.amount is None:
address_summary = list_available(creditsd)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(creditsd) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(creditsd, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(creditsd, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = creditsd.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| mit |
phw/picard | test/test_similarity.py | 5 | 2100 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006 Lukáš Lalinský
# Copyright (C) 2013, 2018-2019 Laurent Monin
# Copyright (C) 2018 Wieland Hoffmann
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from test.picardtestcase import PicardTestCase
from picard.similarity import (
similarity,
similarity2,
)
class SimilarityTest(PicardTestCase):
def test_correct(self):
self.assertEqual(similarity("K!", "K!"), 1.0)
self.assertEqual(similarity("BBB", "AAA"), 0.0)
self.assertAlmostEqual(similarity("ABC", "ABB"), 0.7, 1)
class Similarity2Test(PicardTestCase):
def test_1(self):
a = b = "a b c"
self.assertEqual(similarity2(a, b), 1.0)
def test_2(self):
a = "a b c"
b = "A,B•C"
self.assertEqual(similarity2(a, b), 1.0)
def test_3(self):
a = "a b c"
b = ",A, B •C•"
self.assertEqual(similarity2(a, b), 1.0)
def test_4(self):
a = "a b c"
b = "c a b"
self.assertEqual(similarity2(a, b), 1.0)
def test_5(self):
a = "a b c"
b = "a b d"
self.assertAlmostEqual(similarity2(a, b), 0.6, 1)
def test_6(self):
a = "a b c"
b = "a f d"
self.assertAlmostEqual(similarity2(a, b), 0.3, 1)
def test_7(self):
a = "abc"
b = "def"
self.assertEqual(similarity2(a, b), 0.0)
| gpl-2.0 |
ds-hwang/chromium-crosswalk | build/gyp_chromium.py | 12 | 12953 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script is wrapper for Chromium that adds some support for how GYP
is invoked by Chromium beyond what can be done in the gclient hooks.
"""
import argparse
import gc
import glob
import gyp_environment
import os
import re
import shlex
import subprocess
import string
import sys
import vs_toolchain
script_dir = os.path.dirname(os.path.realpath(__file__))
chrome_src = os.path.abspath(os.path.join(script_dir, os.pardir))
sys.path.insert(0, os.path.join(chrome_src, 'tools', 'gyp', 'pylib'))
import gyp
# Assume this file is in a one-level-deep subdirectory of the source root.
SRC_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Add paths so that pymod_do_main(...) can import files.
sys.path.insert(1, os.path.join(chrome_src, 'android_webview', 'tools'))
sys.path.insert(1, os.path.join(chrome_src, 'build', 'android', 'gyp'))
sys.path.insert(1, os.path.join(chrome_src, 'chrome', 'tools', 'build'))
sys.path.insert(1, os.path.join(chrome_src, 'chromecast', 'tools', 'build'))
sys.path.insert(1, os.path.join(chrome_src, 'ios', 'chrome', 'tools', 'build'))
sys.path.insert(1, os.path.join(chrome_src, 'native_client', 'build'))
sys.path.insert(1, os.path.join(chrome_src, 'native_client_sdk', 'src',
'build_tools'))
sys.path.insert(1, os.path.join(chrome_src, 'remoting', 'tools', 'build'))
sys.path.insert(1, os.path.join(chrome_src, 'third_party', 'liblouis'))
sys.path.insert(1, os.path.join(chrome_src, 'third_party', 'WebKit',
'Source', 'build', 'scripts'))
sys.path.insert(1, os.path.join(chrome_src, 'build'))
sys.path.insert(1, os.path.join(chrome_src, 'tools'))
sys.path.insert(1, os.path.join(chrome_src, 'tools', 'generate_shim_headers'))
sys.path.insert(1, os.path.join(chrome_src, 'tools', 'grit'))
# On Windows, Psyco shortens warm runs of build/gyp_chromium by about
# 20 seconds on a z600 machine with 12 GB of RAM, from 90 down to 70
# seconds. Conversely, memory usage of build/gyp_chromium with Psyco
# maxes out at about 158 MB vs. 132 MB without it.
#
# Psyco uses native libraries, so we need to load a different
# installation depending on which OS we are running under. It has not
# been tested whether using Psyco on our Mac and Linux builds is worth
# it (the GYP running time is a lot shorter, so the JIT startup cost
# may not be worth it).
if sys.platform == 'win32':
try:
sys.path.insert(0, os.path.join(chrome_src, 'third_party', 'psyco_win32'))
import psyco
except:
psyco = None
else:
psyco = None
def GetSupplementalFiles():
"""Returns a list of the supplemental files that are included in all GYP
sources."""
return glob.glob(os.path.join(chrome_src, '*', 'supplement.gypi'))
def ProcessGypDefinesItems(items):
"""Converts a list of strings to a list of key-value pairs."""
result = []
for item in items:
tokens = item.split('=', 1)
# Some GYP variables have hyphens, which we don't support.
if len(tokens) == 2:
result += [(tokens[0], tokens[1])]
else:
# No value supplied, treat it as a boolean and set it. Note that we
# use the string '1' here so we have a consistent definition whether
# you do 'foo=1' or 'foo'.
result += [(tokens[0], '1')]
return result
def GetGypVars(supplemental_files):
"""Returns a dictionary of all GYP vars."""
# Find the .gyp directory in the user's home directory.
home_dot_gyp = os.environ.get('GYP_CONFIG_DIR', None)
if home_dot_gyp:
home_dot_gyp = os.path.expanduser(home_dot_gyp)
if not home_dot_gyp:
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
else:
break
if home_dot_gyp:
include_gypi = os.path.join(home_dot_gyp, "include.gypi")
if os.path.exists(include_gypi):
supplemental_files += [include_gypi]
# GYP defines from the supplemental.gypi files.
supp_items = []
for supplement in supplemental_files:
with open(supplement, 'r') as f:
try:
file_data = eval(f.read(), {'__builtins__': None}, None)
except SyntaxError, e:
e.filename = os.path.abspath(supplement)
raise
variables = file_data.get('variables', [])
for v in variables:
supp_items += [(v, str(variables[v]))]
# GYP defines from the environment.
env_items = ProcessGypDefinesItems(
shlex.split(os.environ.get('GYP_DEFINES', '')))
# GYP defines from the command line.
parser = argparse.ArgumentParser()
parser.add_argument('-D', dest='defines', action='append', default=[])
cmdline_input_items = parser.parse_known_args()[0].defines
cmdline_items = ProcessGypDefinesItems(cmdline_input_items)
vars_dict = dict(supp_items + env_items + cmdline_items)
return vars_dict
def GetOutputDirectory():
"""Returns the output directory that GYP will use."""
# Handle command line generator flags.
parser = argparse.ArgumentParser()
parser.add_argument('-G', dest='genflags', default=[], action='append')
genflags = parser.parse_known_args()[0].genflags
# Handle generator flags from the environment.
genflags += shlex.split(os.environ.get('GYP_GENERATOR_FLAGS', ''))
needle = 'output_dir='
for item in genflags:
if item.startswith(needle):
return item[len(needle):]
return 'out'
def additional_include_files(supplemental_files, args=[]):
"""
Returns a list of additional (.gypi) files to include, without duplicating
ones that are already specified on the command line. The list of supplemental
include files is passed in as an argument.
"""
# Determine the include files specified on the command line.
# This doesn't cover all the different option formats you can use,
# but it's mainly intended to avoid duplicating flags on the automatic
# makefile regeneration which only uses this format.
specified_includes = set()
for arg in args:
if arg.startswith('-I') and len(arg) > 2:
specified_includes.add(os.path.realpath(arg[2:]))
result = []
def AddInclude(path):
if os.path.realpath(path) not in specified_includes:
result.append(path)
if os.environ.get('GYP_INCLUDE_FIRST') != None:
AddInclude(os.path.join(chrome_src, os.environ.get('GYP_INCLUDE_FIRST')))
# Always include common.gypi.
AddInclude(os.path.join(script_dir, 'common.gypi'))
# Optionally add supplemental .gypi files if present.
for supplement in supplemental_files:
AddInclude(supplement)
if os.environ.get('GYP_INCLUDE_LAST') != None:
AddInclude(os.path.join(chrome_src, os.environ.get('GYP_INCLUDE_LAST')))
return result
def main():
# Disabling garbage collection saves about 1 second out of 16 on a Linux
# z620 workstation. Since this is a short-lived process it's not a problem to
# leak a few cyclyc references in order to spare the CPU cycles for
# scanning the heap.
gc.disable()
args = sys.argv[1:]
use_analyzer = len(args) and args[0] == '--analyzer'
if use_analyzer:
args.pop(0)
os.environ['GYP_GENERATORS'] = 'analyzer'
args.append('-Gconfig_path=' + args.pop(0))
args.append('-Ganalyzer_output_path=' + args.pop(0))
if int(os.environ.get('GYP_CHROMIUM_NO_ACTION', 0)):
print 'Skipping gyp_chromium due to GYP_CHROMIUM_NO_ACTION env var.'
sys.exit(0)
# Use the Psyco JIT if available.
if psyco:
psyco.profile()
print "Enabled Psyco JIT."
# Fall back on hermetic python if we happen to get run under cygwin.
# TODO(bradnelson): take this out once this issue is fixed:
# http://code.google.com/p/gyp/issues/detail?id=177
if sys.platform == 'cygwin':
import find_depot_tools
depot_tools_path = find_depot_tools.add_depot_tools_to_path()
python_dir = sorted(glob.glob(os.path.join(depot_tools_path,
'python2*_bin')))[-1]
env = os.environ.copy()
env['PATH'] = python_dir + os.pathsep + env.get('PATH', '')
cmd = [os.path.join(python_dir, 'python.exe')] + sys.argv
sys.exit(subprocess.call(cmd, env=env))
# This could give false positives since it doesn't actually do real option
# parsing. Oh well.
gyp_file_specified = any(arg.endswith('.gyp') for arg in args)
gyp_environment.SetEnvironment()
# If we didn't get a file, check an env var, and then fall back to
# assuming 'all.gyp' from the same directory as the script.
if not gyp_file_specified:
gyp_file = os.environ.get('CHROMIUM_GYP_FILE')
if gyp_file:
# Note that CHROMIUM_GYP_FILE values can't have backslashes as
# path separators even on Windows due to the use of shlex.split().
args.extend(shlex.split(gyp_file))
else:
args.append(os.path.join(script_dir, 'all.gyp'))
supplemental_includes = GetSupplementalFiles()
gyp_vars_dict = GetGypVars(supplemental_includes)
# There shouldn't be a circular dependency relationship between .gyp files,
# but in Chromium's .gyp files, on non-Mac platforms, circular relationships
# currently exist. The check for circular dependencies is currently
# bypassed on other platforms, but is left enabled on iOS, where a violation
# of the rule causes Xcode to misbehave badly.
# TODO(mark): Find and kill remaining circular dependencies, and remove this
# option. http://crbug.com/35878.
# TODO(tc): Fix circular dependencies in ChromiumOS then add linux2 to the
# list.
if gyp_vars_dict.get('OS') != 'ios':
args.append('--no-circular-check')
# libtool on Mac warns about duplicate basenames in static libraries, so
# they're disallowed in general by gyp. We are lax on this point, so disable
# this check other than on Mac. GN does not use static libraries as heavily,
# so over time this restriction will mostly go away anyway, even on Mac.
# https://code.google.com/p/gyp/issues/detail?id=384
if sys.platform != 'darwin':
args.append('--no-duplicate-basename-check')
# We explicitly don't support the make gyp generator (crbug.com/348686). Be
# nice and fail here, rather than choking in gyp.
if re.search(r'(^|,|\s)make($|,|\s)', os.environ.get('GYP_GENERATORS', '')):
print 'Error: make gyp generator not supported (check GYP_GENERATORS).'
sys.exit(1)
# We explicitly don't support the native msvs gyp generator. Be nice and
# fail here, rather than generating broken projects.
if re.search(r'(^|,|\s)msvs($|,|\s)', os.environ.get('GYP_GENERATORS', '')):
print 'Error: msvs gyp generator not supported (check GYP_GENERATORS).'
print 'Did you mean to use the `msvs-ninja` generator?'
sys.exit(1)
# If CHROMIUM_GYP_SYNTAX_CHECK is set to 1, it will invoke gyp with --check
# to enfore syntax checking.
syntax_check = os.environ.get('CHROMIUM_GYP_SYNTAX_CHECK')
if syntax_check and int(syntax_check):
args.append('--check')
# TODO(dmikurube): Remove these checks and messages after a while.
if ('linux_use_tcmalloc' in gyp_vars_dict or
'android_use_tcmalloc' in gyp_vars_dict):
print '*****************************************************************'
print '"linux_use_tcmalloc" and "android_use_tcmalloc" are deprecated!'
print '-----------------------------------------------------------------'
print 'You specify "linux_use_tcmalloc" or "android_use_tcmalloc" in'
print 'your GYP_DEFINES. Please switch them into "use_allocator" now.'
print 'See http://crbug.com/345554 for the details.'
print '*****************************************************************'
# Automatically turn on crosscompile support for platforms that need it.
# (The Chrome OS build sets CC_host / CC_target which implicitly enables
# this mode.)
if all(('ninja' in os.environ.get('GYP_GENERATORS', ''),
gyp_vars_dict.get('OS') in ['android', 'ios'],
'GYP_CROSSCOMPILE' not in os.environ)):
os.environ['GYP_CROSSCOMPILE'] = '1'
if gyp_vars_dict.get('OS') == 'android':
args.append('--check')
args.extend(
['-I' + i for i in additional_include_files(supplemental_includes, args)])
args.extend(['-D', 'gyp_output_dir=' + GetOutputDirectory()])
if not use_analyzer:
print 'Updating projects from gyp files...'
sys.stdout.flush()
# Off we go...
gyp_rc = gyp.main(args)
if not use_analyzer:
vs2013_runtime_dll_dirs = vs_toolchain.SetEnvironmentAndGetRuntimeDllDirs()
if vs2013_runtime_dll_dirs:
x64_runtime, x86_runtime = vs2013_runtime_dll_dirs
vs_toolchain.CopyVsRuntimeDlls(
os.path.join(chrome_src, GetOutputDirectory()),
(x86_runtime, x64_runtime))
sys.exit(gyp_rc)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
uw-it-aca/myuw | myuw/management/commands/notice_mapping.py | 1 | 2987 | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import csv
import logging
import json
from django.core.management.base import BaseCommand, CommandError
logger = logging.getLogger("commands")
item_format = (" \"myuw_category\": \"{}\",\n" +
" \"location_tags\": {},\n" +
" \"critical\": {}")
class Command(BaseCommand):
help = 'Builds NOTICE_MAPPING based on the spreadsheet from' +\
'https://docs.google.com/a/uw.edu/spreadsheet/ccc?key=' +\
'0AkNIKfyuX9lwdEYtR2JfRUlqUXBRazBqNWNldk9YV2c&usp=drive_web#gid=0' +\
'Workflow: 1. download the spreadsheet as map.csv;\n' +\
'2. transfer the file if it is not local;\n' +\
'3. Run: python manage.py notice_mapping' +\
'map.csv notice_categorization.py;\n' +\
'4. Move the mapping file into myuw/dao/;\n' +\
'5. Add and commit the notice_categorization.py'
def add_arguments(self, parser):
parser.add_argument('spreadsheet-csv-path')
parser.add_argument('outfile')
def handle(self, *args, **options):
seen_category_keys = set()
try:
csv_path = options['spreadsheet-csv-path']
seen_category_keys = set()
categories = []
reader = csv.reader(open(csv_path, 'r', encoding='utf8'),
delimiter=',')
next(reader)
for row in reader:
try:
myuw_id = row[2].replace(" ", "")
if myuw_id is None or len(myuw_id) == 0:
continue
if myuw_id in seen_category_keys:
continue
seen_category_keys.add(myuw_id)
# row[3]: myuw_category
# row[4]: critical
item = item_format.format(row[3],
self._get_location_tags(row[5]),
len(row[4]) > 0)
categories.append(" \"{0}\": {1}\n{2}\n{3}".format(
myuw_id.lower(), "{", item, " }"))
except Exception as ex:
logger.error("{} in line: {}".format(str(ex), row))
output_string = "NOTICE_CATEGORIES = {0}{1}\n{2}".format(
"{\n", ",\n".join(categories), "}\n")
outfile = options['outfile']
f = open(outfile, 'w')
f.write(output_string)
f.close()
except IndexError as e:
raise CommandError(e)
except Exception as ex:
raise CommandError(ex)
def _get_location_tags(self, tag_string):
tags = []
tag_pieces = tag_string.split(",")
for piece in tag_pieces:
piece = piece.strip()
if piece is not "?" and len(piece) > 0:
tags.append(piece)
return tags
| apache-2.0 |
Teagan42/home-assistant | homeassistant/components/automation/device.py | 24 | 1041 | """Offer device oriented automation."""
import voluptuous as vol
from homeassistant.components.device_automation import (
TRIGGER_BASE_SCHEMA,
async_get_device_automation_platform,
)
from homeassistant.const import CONF_DOMAIN
# mypy: allow-untyped-defs, no-check-untyped-defs
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend({}, extra=vol.ALLOW_EXTRA)
async def async_validate_trigger_config(hass, config):
"""Validate config."""
platform = await async_get_device_automation_platform(
hass, config[CONF_DOMAIN], "trigger"
)
if hasattr(platform, "async_validate_trigger_config"):
return await getattr(platform, "async_validate_trigger_config")(hass, config)
return platform.TRIGGER_SCHEMA(config)
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for trigger."""
platform = await async_get_device_automation_platform(
hass, config[CONF_DOMAIN], "trigger"
)
return await platform.async_attach_trigger(hass, config, action, automation_info)
| apache-2.0 |
zasdfgbnm/tensorflow | tensorflow/python/ops/distributions/student_t.py | 5 | 13087 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Student's t distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"StudentT",
"StudentTWithAbsDfSoftplusScale",
]
@tf_export("distributions.StudentT")
class StudentT(distribution.Distribution):
"""Student's t-distribution.
This distribution has parameters: degree of freedom `df`, location `loc`,
and `scale`.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; df, mu, sigma) = (1 + y**2 / df)**(-0.5 (df + 1)) / Z
where,
y = (x - mu) / sigma
Z = abs(sigma) sqrt(df pi) Gamma(0.5 df) / Gamma(0.5 (df + 1))
```
where:
* `loc = mu`,
* `scale = sigma`, and,
* `Z` is the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The StudentT distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ StudentT(df, loc=0, scale=1)
Y = loc + scale * X
```
Notice that `scale` has semantics more similar to standard deviation than
variance. However it is not actually the std. deviation; the Student's
t-distribution std. dev. is `scale sqrt(df / (df - 2))` when `df > 2`.
#### Examples
Examples of initialization of one or a batch of distributions.
```python
# Define a single scalar Student t distribution.
single_dist = tf.distributions.StudentT(df=3)
# Evaluate the pdf at 1, returning a scalar Tensor.
single_dist.prob(1.)
# Define a batch of two scalar valued Student t's.
# The first has degrees of freedom 2, mean 1, and scale 11.
# The second 3, 2 and 22.
multi_dist = tf.distributions.StudentT(df=[2, 3],
loc=[1, 2.],
scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
multi_dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
multi_dist.sample(3)
```
Arguments are broadcast when possible.
```python
# Define a batch of two Student's t distributions.
# Both have df 2 and mean 1, but different scales.
dist = tf.distributions.StudentT(df=2, loc=1, scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
# pylint: enable=line-too-long
def __init__(self,
df,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="StudentT"):
"""Construct Student's t distributions.
The distributions have degree of freedom `df`, mean `loc`, and scale
`scale`.
The parameters `df`, `loc`, and `scale` must be shaped in a way that
supports broadcasting (e.g. `df + loc + scale` is a valid operation).
Args:
df: Floating-point `Tensor`. The degrees of freedom of the
distribution(s). `df` must contain only positive values.
loc: Floating-point `Tensor`. The mean(s) of the distribution(s).
scale: Floating-point `Tensor`. The scaling factor(s) for the
distribution(s). Note that `scale` is not technically the standard
deviation of this distribution but has semantics more similar to
standard deviation than variance.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if loc and scale are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[df, loc, scale]):
with ops.control_dependencies([check_ops.assert_positive(df)]
if validate_args else []):
self._df = array_ops.identity(df, name="df")
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype(
(self._df, self._loc, self._scale))
super(StudentT, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._df, self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("df", "loc", "scale"), (
[ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 3)))
@property
def df(self):
"""Degrees of freedom in these Student's t distribution(s)."""
return self._df
@property
def loc(self):
"""Locations of these Student's t distribution(s)."""
return self._loc
@property
def scale(self):
"""Scaling factors of these Student's t distribution(s)."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.df),
array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale)))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
array_ops.broadcast_static_shape(self.df.get_shape(),
self.loc.get_shape()),
self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=math_ops.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# The sampling method comes from the fact that if:
# X ~ Normal(0, 1)
# Z ~ Chi2(df)
# Y = X / sqrt(Z / df)
# then:
# Y ~ StudentT(df).
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed)
df = self.df * array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
gamma_sample = random_ops.random_gamma(
[n],
0.5 * df,
beta=0.5,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(seed, salt="student_t"))
samples = normal_sample * math_ops.rsqrt(gamma_sample / df)
return samples * self.scale + self.loc # Abs(scale) not wanted.
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _log_unnormalized_prob(self, x):
y = (x - self.loc) / self.scale # Abs(scale) superfluous.
return -0.5 * (self.df + 1.) * math_ops.log1p(y**2. / self.df)
def _log_normalization(self):
return (math_ops.log(math_ops.abs(self.scale)) +
0.5 * math_ops.log(self.df) +
0.5 * np.log(np.pi) +
math_ops.lgamma(0.5 * self.df) -
math_ops.lgamma(0.5 * (self.df + 1.)))
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _cdf(self, x):
# Take Abs(scale) to make subsequent where work correctly.
y = (x - self.loc) / math_ops.abs(self.scale)
x_t = self.df / (y**2. + self.df)
neg_cdf = 0.5 * math_ops.betainc(0.5 * self.df, 0.5, x_t)
return array_ops.where(math_ops.less(y, 0.), neg_cdf, 1. - neg_cdf)
def _entropy(self):
v = array_ops.ones(self.batch_shape_tensor(),
dtype=self.dtype)[..., array_ops.newaxis]
u = v * self.df[..., array_ops.newaxis]
beta_arg = array_ops.concat([u, v], -1) / 2.
return (math_ops.log(math_ops.abs(self.scale)) +
0.5 * math_ops.log(self.df) +
special_math_ops.lbeta(beta_arg) +
0.5 * (self.df + 1.) *
(math_ops.digamma(0.5 * (self.df + 1.)) -
math_ops.digamma(0.5 * self.df)))
@distribution_util.AppendDocstring(
"""The mean of Student's T equals `loc` if `df > 1`, otherwise it is
`NaN`. If `self.allow_nan_stats=True`, then an exception will be raised
rather than returning `NaN`.""")
def _mean(self):
mean = self.loc * array_ops.ones(self.batch_shape_tensor(),
dtype=self.dtype)
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where(
math_ops.greater(
self.df,
array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)),
mean,
array_ops.fill(self.batch_shape_tensor(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies(
[
check_ops.assert_less(
array_ops.ones([], dtype=self.dtype),
self.df,
message="mean not defined for components of df <= 1"),
],
mean)
@distribution_util.AppendDocstring("""
The variance for Student's T equals
```
df / (df - 2), when df > 2
infinity, when 1 < df <= 2
NaN, when df <= 1
```
""")
def _variance(self):
# We need to put the tf.where inside the outer tf.where to ensure we never
# hit a NaN in the gradient.
denom = array_ops.where(math_ops.greater(self.df, 2.),
self.df - 2.,
array_ops.ones_like(self.df))
# Abs(scale) superfluous.
var = (array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) *
math_ops.square(self.scale) * self.df / denom)
# When 1 < df <= 2, variance is infinite.
inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())
result_where_defined = array_ops.where(
self.df > array_ops.fill(self.batch_shape_tensor(), 2.),
var,
array_ops.fill(self.batch_shape_tensor(), inf, name="inf"))
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where(
math_ops.greater(
self.df,
array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)),
result_where_defined,
array_ops.fill(self.batch_shape_tensor(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies(
[
check_ops.assert_less(
array_ops.ones([], dtype=self.dtype),
self.df,
message="variance not defined for components of df <= 1"),
],
result_where_defined)
def _mode(self):
return array_ops.identity(self.loc)
class StudentTWithAbsDfSoftplusScale(StudentT):
"""StudentT with `df = floor(abs(df))` and `scale = softplus(scale)`."""
def __init__(self,
df,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="StudentTWithAbsDfSoftplusScale"):
parameters = locals()
with ops.name_scope(name, values=[df, scale]):
super(StudentTWithAbsDfSoftplusScale, self).__init__(
df=math_ops.floor(math_ops.abs(df)),
loc=loc,
scale=nn.softplus(scale, name="softplus_scale"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
| apache-2.0 |
evilp/android_kernel_hp_phobos | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
aleksandra-tarkowska/openmicroscopy | components/tools/OmeroPy/test/integration/test_tickets2000.py | 9 | 15420 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Integration test focused on the omero.api.IShare interface
a running server.
Copyright 2008-2014 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import time
import library as lib
import pytest
import omero
from omero.rtypes import rbool, rstring, rtime, rlong, rint
from omero_model_DatasetI import DatasetI
from omero_model_ProjectI import ProjectI
from omero_model_ExperimenterI import ExperimenterI
from omero_model_ExperimenterGroupI import ExperimenterGroupI
class TestTickets2000(lib.ITest):
def test1064(self):
admin = self.client.sf.getAdminService()
admin.getEventContext()
def test1067(self):
admin = self.root.sf.getAdminService()
test_group0 = self.new_group()
test_user = self.new_user(group=test_group0)
# Non-memebr group
groups = list()
gr1 = self.new_group()
groups.append(gr1)
exp = admin.lookupExperimenter(test_user.omeName.val)
contained_grs = admin.containedGroups(exp.id.val)
# if groupexperimetnermap contains text group should be remove
for gr in contained_grs:
if gr.id.val == gr1.id.val:
admin.removeGroups(exp, groups)
admin.addGroups(exp, groups)
# thrown an exception because gr1 is not on the GroupExperimenterMap
admin.setDefaultGroup(exp, gr1)
def test1027(self):
self.client.sf.getAdminService().getEventContext().sessionUuid
self.client.sf.getAdminService().lookupLdapAuthExperimenters()
def test1069(self):
unique = rstring(self.uuid())
project = self.make_project(name=unique, description="NOTME")
self.index(project)
search = self.client.sf.createSearchService()
search.onlyType("Project")
search.bySomeMustNone([unique.val], [], ["NOTME"])
assert not search.hasNext()
search.bySomeMustNone([unique.val], [], ["NOTME", "SOMETHINGELSE"])
assert not search.hasNext()
search.bySomeMustNone([unique.val], [], [])
assert search.hasNext()
def test1071(self):
uuid = self.ctx.sessionUuid
pojos = self.sf.getContainerService()
# projects
pr1 = self.make_project(name='test1071-pr1-%s' % (uuid))
pr2 = self.make_project(name='test1071-pr2-%s' % (uuid))
# datasets
ds1 = self.new_dataset(name='test1071-ds1-%s' % (uuid))
ds2 = self.new_dataset(name='test1071-ds2-%s' % (uuid))
ds3 = self.make_dataset(name='test1071-ds3-%s' % (uuid))
# images
im2 = self.make_image(name='test1071-im2-%s' % (uuid))
# links
#
# im2 -> ds3
# +-> ds2 --> pr2
# | \
# \-> ds1 --> pr1
#
self.link(pr1, ds1)
self.link(pr1, ds2)
self.link(pr2, ds2)
self.link(ds1, im2)
self.link(ds2, im2)
self.link(ds3, im2)
# test:
hier = pojos.findContainerHierarchies(
"Project", [long(im2.id.val)], None)
assert 3 == len(hier), \
"len of hier != 3: %s" % [type(x) for x in hier]
for c in hier:
if c.id.val == pr1.id.val and isinstance(c, ProjectI):
assert c.sizeOfDatasetLinks() == 2, "length 2 != " + \
str(c.sizeOfDatasetLinks())
for pdl in c.copyDatasetLinks():
assert pdl.child.sizeOfImageLinks() == 1
for dil in pdl.child.copyImageLinks():
assert dil.child.id.val == im2.id.val
elif c.id.val == pr2.id.val and isinstance(c, ProjectI):
assert c.sizeOfDatasetLinks() == 1
elif c.id.val == ds3.id.val and isinstance(c, DatasetI):
assert c.sizeOfImageLinks() == 1
def test1071_1(self):
common_group = self.new_group(perms="rwrw--")
c1 = self.new_client(common_group)
c2 = self.new_client(common_group)
c1_uuid = c1.sf.getAdminService().getEventContext().sessionUuid
c2_pojos = c2.sf.getContainerService()
c2_uuid = c2.sf.getAdminService().getEventContext().sessionUuid
# projects
pr1 = self.make_project(name='test1071-pr1-%s' % (c1_uuid), client=c1)
pr2 = self.make_project(name='test1071-pr2-%s' % (c2_uuid), client=c2)
# datasets
ds1 = self.make_dataset(name='test1071-ds1-%s' % (c1_uuid), client=c1)
ds2 = self.make_dataset(name='test1071-ds2-%s' % (c2_uuid), client=c2)
# images
im2 = self.make_image(name='test1071-im2-%s' % (c2_uuid), client=c2)
# links
# im2 owned by u2
#
# im2 -> ds2 --> pr2 (owned by u2)
# |
# \-> ds1 --> pr1 (owned by u1)
#
self.link(pr1, ds1, client=c1)
self.link(pr2, ds2, client=c2)
self.link(ds2, im2, client=c2)
self.link(ds1, im2, client=c1)
# test:
hier = c2_pojos.findContainerHierarchies(
"Project", [long(im2.id.val)], None)
assert 2 == len(hier), "size of hier != 2: %s" % \
[type(x) for x in hier]
for c in hier:
if c.id.val == pr1.id.val and isinstance(c, ProjectI):
assert 1 == c.sizeOfDatasetLinks()
for pdl in c.copyDatasetLinks():
assert 1 == pdl.child.sizeOfImageLinks()
for dil in pdl.child.copyImageLinks():
assert dil.child.id.val == im2.id.val
elif c.id.val == pr2.id.val and isinstance(c, ProjectI):
assert 1 == c.sizeOfDatasetLinks()
elif c.id.val == ds2.id.val and isinstance(c, DatasetI):
assert 1 == c.sizeOfImageLinks()
def test1072(self):
# create two users where both are in the same active group
admin = self.root.sf.getAdminService()
uuid = admin.getEventContext().sessionUuid
new_gr = self.new_group(perms="rwr---")
c1, test_user = self.new_client_and_user(new_gr)
c2, test_user2 = self.new_client_and_user(new_gr)
# login as user1
pr1 = self.new_project(name='test1072-pr1-%s' % (uuid))
ds1 = self.new_dataset(name='test1072-ds1-%s' % (uuid))
self.link(pr1, ds1, client=c1)
# login as user2
pojos = c2.sf.getContainerService()
assert c2.sf.getAdminService().getEventContext()
# print c1.sf.getAdminService().getEventContext()
p = omero.sys.ParametersI()
p.grp(rlong(c2.sf.getAdminService().getEventContext().groupId))
pojos.loadContainerHierarchy("Project", None, p)
def test1088(self):
admin = self.root.sf.getAdminService()
q = self.root.sf.getQueryService()
cx = admin.getEventContext()
p = omero.sys.Parameters()
p.map = {}
p.map["uid"] = rlong(cx.userId)
p.map['start'] = rtime(1218529874000)
p.map['end'] = rtime(1221121874000)
sql1 = "select el from EventLog el left outer join " \
"fetch el.event ev where el.entityType in " \
"('ome.model.core.Pixels', 'ome.model.core.Image', " \
"'ome.model.containers.Dataset', " \
"'ome.model.containers.Project') " \
"and ev.id in (select id from Event where " \
"experimenter.id=:uid and time > :start and time < :end)"
sql2 = "select el from EventLog el left outer join "\
"fetch el.event ev where el.entityType in " \
"('ome.model.core.Pixels', 'ome.model.core.Image', " \
"'ome.model.containers.Dataset', " \
"'ome.model.containers.Project') " \
"and ev.experimenter.id=:uid " \
"and ev.time > :start and ev.time < :end"
# Much of the timing code here was already commented out, to fix
# flake8 warnings the whole lot is commented out with just the two
# queries repeated below.
# import time
# sql1_start = time.time()
# l = q.findAllByQuery(sql1, p)
# sql1_stop = time.time()
# # print "\nSQL1: %s objects in %s seconds" % (str(len(l)),
# # str(sql1_stop - sql1_start))
q.findAllByQuery(sql1, p)
# sql2_start = time.time()
# l = q.findAllByQuery(sql2, p)
# sql2_stop = time.time()
# # print "SQL2: %s objects in %s seconds\n" % (str(len(l)),
# # str(sql2_stop - sql2_start))
q.findAllByQuery(sql2, p)
def test1109(self):
uuid = self.uuid()
admin = self.root.sf.getAdminService()
# Replace defaultGroup with something new
defaultGroup = self.new_group()
# create data
# group1
new_gr1 = ExperimenterGroupI()
new_gr1.name = rstring("group1_%s" % uuid)
new_gr1.ldap = rbool(False)
gid = admin.createGroup(new_gr1)
# new user1
new_exp = ExperimenterI()
new_exp.omeName = rstring("user_%s" % uuid)
new_exp.firstName = rstring("New")
new_exp.lastName = rstring("Test")
new_exp.ldap = rbool(False)
new_exp.email = rstring("newtest@emaildomain.com")
listOfGroups = list()
# defaultGroup = admin.lookupGroup("default") Removed in 4.2
listOfGroups.append(admin.getGroup(gid))
listOfGroups.append(admin.lookupGroup("user"))
eid = admin.createExperimenter(new_exp, defaultGroup, listOfGroups)
# test
exp = admin.getExperimenter(eid)
# print "exp: ", exp.id.val, " his default group is: ",
# admin.getDefaultGroup(exp.id.val).id.val
gr1 = admin.getGroup(2)
indefault = admin.containedExperimenters(gr1.id.val)
# print "members of group %s %i" % (gr1.name.val, gr1.id.val)
for m in indefault:
if m.id.val == exp.id.val:
assert m.copyGroupExperimenterMap()[0].parent.id.val == \
admin.getDefaultGroup(exp.id.val).id.val
# print "exp: id=", m.id.val, "; GEM[0]: ",
# type(m.copyGroupExperimenterMap()[0].parent),
# m.copyGroupExperimenterMap()[0].parent.id.val
gr2 = admin.getGroup(gid)
members2 = admin.containedExperimenters(gr2.id.val)
# print "members of group %s %i" % (gr2.name.val, gr2.id.val)
for m in members2:
if m.id.val == exp.id.val:
copied_id = m.copyGroupExperimenterMap()[0].parent.id.val
got_id = admin.getDefaultGroup(exp.id.val).id.val
contained = admin.containedGroups(m.id.val)
assert copied_id == got_id, \
"""
%s != %s. Groups for experimenter %s = %s
(graph) or %s (contained)
""" % (
copied_id, got_id, exp.id.val,
[x.parent.id.val for x in
m.copyGroupExperimenterMap()],
[y.id.val for y in contained])
# print "exp: id=", m.id.val, "; GEM[0]: ",
# type(m.copyGroupExperimenterMap()[0].parent),
# m.copyGroupExperimenterMap()[0].parent.id.val
def test1163(self):
uuid = self.uuid()
new_gr1 = self.new_group(perms="rw----")
client_share1, new_exp_obj = self.new_client_and_user(new_gr1)
search1 = client_share1.sf.createSearchService()
# create image and index
img = self.make_image(name='test1154-img-%s' % uuid,
client=client_share1)
self.index(img)
# search
search1.onlyType('Image')
search1.addOrderByAsc("name")
search1.byFullText("test*")
assert search1.hasNext()
res = search1.results()
assert 1 == len(res)
@pytest.mark.broken(ticket="11543")
def test1184(self):
uuid = self.uuid()
client = self.new_client(perms="rw----")
query = client.sf.getQueryService()
update = client.sf.getUpdateService()
admin = client.sf.getAdminService()
cont = client.sf.getContainerService()
ds = self.new_dataset(name='test1184-ds-%s' % (uuid))
for i in range(1, 2001):
img = self.new_image(name='img1184-%s' % (uuid))
ds.linkImage(img)
ds = update.saveAndReturnObject(ds)
c = cont.getCollectionCount(
ds.__class__.__name__, ("imageLinks"), [ds.id.val], None)
assert c[ds.id.val] == 2000
page = 1
p = omero.sys.Parameters()
p.map = {}
p.map["eid"] = rlong(admin.getEventContext().userId)
p.map["oid"] = rlong(ds.id.val)
if page is not None:
f = omero.sys.Filter()
f.limit = rint(24)
f.offset = rint((int(page) - 1) * 24)
p.theFilter = f
sql = "select im from Image im join fetch im.details.owner " \
"join fetch im.details.group left outer join fetch " \
"im.datasetLinks dil left outer join fetch dil.parent d " \
"where d.id = :oid and im.details.owner.id=:eid " \
"order by im.id asc"
start = time.time()
res = query.findAllByQuery(sql, p)
assert 24 == len(res)
end = time.time()
elapsed = end - start
assert elapsed < 3.0,\
"Expected the test to complete in < 3 seconds, took: %f" % elapsed
def test1183(self):
# Annotation added before
p = self.new_project(name="ticket1183")
p.linkAnnotation(omero.model.CommentAnnotationI())
p = self.update.saveAndReturnObject(p)
p.description = rstring("desc")
p = self.update.saveAndReturnObject(p)
# Annotation added after
p = self.make_project(name="ticket1183")
p.description = rstring("desc")
p.linkAnnotation(omero.model.CommentAnnotationI())
p = self.update.saveAndReturnObject(p)
p = self.update.saveAndReturnObject(p)
# Unloading annotation after save
p = self.new_project(name="ticket1183")
p.linkAnnotation(omero.model.CommentAnnotationI())
p = self.update.saveAndReturnObject(p)
for l in p.copyAnnotationLinks():
l.child.unload()
p.description = rstring("desc")
p = self.update.saveAndReturnObject(p)
p = self.update.saveAndReturnObject(p)
# Unloaded annotation to save (before)
c = omero.model.CommentAnnotationI()
c = self.update.saveAndReturnObject(c)
c.unload()
p = self.new_project(name="ticket1183")
p.linkAnnotation(c)
p = self.update.saveAndReturnObject(p)
p.description = rstring("desc")
p = self.update.saveAndReturnObject(p)
p = self.update.saveAndReturnObject(p)
# Unloaded annotation to save (after)
c = omero.model.CommentAnnotationI()
c = self.update.saveAndReturnObject(c)
c.unload()
p = self.make_project(name="ticket1183")
p.description = rstring("desc")
p.linkAnnotation(c)
p = self.update.saveAndReturnObject(p)
p = self.update.saveAndReturnObject(p)
| gpl-2.0 |
whn09/tensorflow | tensorflow/python/tools/strip_unused_lib.py | 37 | 4314 | # pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to remove unneeded nodes from a GraphDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from google.protobuf import text_format
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
def strip_unused(input_graph_def, input_node_names, output_node_names,
placeholder_type_enum):
"""Removes unused nodes from a GraphDef.
Args:
input_graph_def: A graph with nodes we want to prune.
input_node_names: A list of the nodes we use as inputs.
output_node_names: A list of the output nodes.
placeholder_type_enum: The AttrValue enum for the placeholder data type, or
a list that specifies one value per input node name.
Returns:
A GraphDef with all unnecessary ops removed.
"""
# Here we replace the nodes we're going to override as inputs with
# placeholders so that any unused nodes that are inputs to them are
# automatically stripped out by extract_sub_graph().
inputs_replaced_graph_def = graph_pb2.GraphDef()
for node in input_graph_def.node:
if node.name in input_node_names:
placeholder_node = node_def_pb2.NodeDef()
placeholder_node.op = "Placeholder"
placeholder_node.name = node.name
if isinstance(placeholder_type_enum, list):
input_node_index = input_node_names.index(node.name)
placeholder_node.attr["dtype"].CopyFrom(
attr_value_pb2.AttrValue(type=placeholder_type_enum[
input_node_index]))
else:
placeholder_node.attr["dtype"].CopyFrom(
attr_value_pb2.AttrValue(type=placeholder_type_enum))
if "_output_shapes" in node.attr:
placeholder_node.attr["_output_shapes"].CopyFrom(node.attr[
"_output_shapes"])
inputs_replaced_graph_def.node.extend([placeholder_node])
else:
inputs_replaced_graph_def.node.extend([copy.deepcopy(node)])
output_graph_def = graph_util.extract_sub_graph(inputs_replaced_graph_def,
output_node_names)
return output_graph_def
def strip_unused_from_files(input_graph, input_binary, output_graph,
output_binary, input_node_names, output_node_names,
placeholder_type_enum):
"""Removes unused nodes from a graph file."""
if not gfile.Exists(input_graph):
print("Input graph file '" + input_graph + "' does not exist!")
return -1
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
input_graph_def = graph_pb2.GraphDef()
mode = "rb" if input_binary else "r"
with gfile.FastGFile(input_graph, mode) as f:
if input_binary:
input_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), input_graph_def)
output_graph_def = strip_unused(input_graph_def,
input_node_names.split(","),
output_node_names.split(","),
placeholder_type_enum)
if output_binary:
with gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
else:
with gfile.GFile(output_graph, "w") as f:
f.write(text_format.MessageToString(output_graph_def))
print("%d ops in the final graph." % len(output_graph_def.node))
| apache-2.0 |
loco-odoo/localizacion_co | openerp/addons/crm_partner_assign/__openerp__.py | 244 | 2369 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Partner Assignation & Geolocation',
'version': '1.0',
'category': 'Customer Relationship Management',
'description': """
This is the module used by OpenERP SA to redirect customers to its partners, based on geolocation.
======================================================================================================
This modules lets you geolocate Leads, Opportunities and Partners based on their address.
Once the coordinates of the Lead/Opportunity is known, they can be automatically assigned
to an appropriate local partner, based on the distance and the weight that was assigned to the partner.
""",
'author': 'OpenERP SA',
'depends': ['base_geolocalize', 'crm', 'account', 'portal'],
'data': [
'security/ir.model.access.csv',
'res_partner_view.xml',
'wizard/crm_forward_to_partner_view.xml',
'wizard/crm_channel_interested_view.xml',
'crm_lead_view.xml',
'crm_partner_assign_data.xml',
'crm_portal_view.xml',
'portal_data.xml',
'report/crm_lead_report_view.xml',
'report/crm_partner_report_view.xml',
],
'demo': [
'res_partner_demo.xml',
'crm_lead_demo.xml'
],
'test': ['test/partner_assign.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tedder/ansible | lib/ansible/modules/windows/win_iis_webapppool.py | 28 | 7155 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_iis_webapppool
version_added: "2.0"
short_description: Configure IIS Web Application Pools
description:
- Creates, removes and configures an IIS Web Application Pool.
options:
attributes:
description:
- This field is a free form dictionary value for the application pool
attributes.
- These attributes are based on the naming standard at
U(https://www.iis.net/configreference/system.applicationhost/applicationpools/add#005),
see the examples section for more details on how to set this.
- You can also set the attributes of child elements like cpu and
processModel, see the examples to see how it is done.
- While you can use the numeric values for enums it is recommended to use
the enum name itself, e.g. use SpecificUser instead of 3 for
processModel.identityType.
- managedPipelineMode may be either "Integrated" or "Classic".
- startMode may be either "OnDemand" or "AlwaysRunning".
- Use C(state) module parameter to modify the state of the app pool.
- When trying to set 'processModel.password' and you receive a 'Value
does fall within the expected range' error, you have a corrupted
keystore. Please follow
U(http://structuredsight.com/2014/10/26/im-out-of-range-youre-out-of-range/)
to help fix your host.
name:
description:
- Name of the application pool.
type: str
required: yes
state:
description:
- The state of the application pool.
- If C(absent) will ensure the app pool is removed.
- If C(present) will ensure the app pool is configured and exists.
- If C(restarted) will ensure the app pool exists and will restart, this
is never idempotent.
- If C(started) will ensure the app pool exists and is started.
- If C(stopped) will ensure the app pool exists and is stopped.
type: str
choices: [ absent, present, restarted, started, stopped ]
default: present
seealso:
- module: win_iis_virtualdirectory
- module: win_iis_webapplication
- module: win_iis_webbinding
- module: win_iis_website
author:
- Henrik Wallström (@henrikwallstrom)
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: Return information about an existing application pool
win_iis_webapppool:
name: DefaultAppPool
state: present
- name: Create a new application pool in 'Started' state
win_iis_webapppool:
name: AppPool
state: started
- name: Stop an application pool
win_iis_webapppool:
name: AppPool
state: stopped
- name: Restart an application pool (non-idempotent)
win_iis_webapppool:
name: AppPool
state: restart
- name: Change application pool attributes using new dict style
win_iis_webapppool:
name: AppPool
attributes:
managedRuntimeVersion: v4.0
autoStart: no
- name: Creates an application pool, sets attributes and starts it
win_iis_webapppool:
name: AnotherAppPool
state: started
attributes:
managedRuntimeVersion: v4.0
autoStart: no
# In the below example we are setting attributes in child element processModel
# https://www.iis.net/configreference/system.applicationhost/applicationpools/add/processmodel
- name: Manage child element and set identity of application pool
win_iis_webapppool:
name: IdentitiyAppPool
state: started
attributes:
managedPipelineMode: Classic
processModel.identityType: SpecificUser
processModel.userName: '{{ansible_user}}'
processModel.password: '{{ansible_password}}'
processModel.loadUserProfile: true
- name: Manage a timespan attribute
win_iis_webapppool:
name: TimespanAppPool
state: started
attributes:
# Timespan with full string "day:hour:minute:second.millisecond"
recycling.periodicRestart.time: "00:00:05:00.000000"
recycling.periodicRestart.schedule: ["00:10:00", "05:30:00"]
# Shortened timespan "hour:minute:second"
processModel.pingResponseTime: "00:03:00"
'''
RETURN = r'''
attributes:
description: Application Pool attributes that were set and processed by this
module invocation.
returned: success
type: dict
sample:
enable32BitAppOnWin64: "true"
managedRuntimeVersion: "v4.0"
managedPipelineMode: "Classic"
info:
description: Information on current state of the Application Pool. See
https://www.iis.net/configreference/system.applicationhost/applicationpools/add#005
for the full list of return attributes based on your IIS version.
returned: success
type: complex
sample:
contains:
attributes:
description: Key value pairs showing the current Application Pool attributes.
returned: success
type: dict
sample:
autoStart: true
managedRuntimeLoader: "webengine4.dll"
managedPipelineMode: "Classic"
name: "DefaultAppPool"
CLRConfigFile: ""
passAnonymousToken: true
applicationPoolSid: "S-1-5-82-1352790163-598702362-1775843902-1923651883-1762956711"
queueLength: 1000
managedRuntimeVersion: "v4.0"
state: "Started"
enableConfigurationOverride: true
startMode: "OnDemand"
enable32BitAppOnWin64: true
cpu:
description: Key value pairs showing the current Application Pool cpu attributes.
returned: success
type: dict
sample:
action: "NoAction"
limit: 0
resetInterval:
Days: 0
Hours: 0
failure:
description: Key value pairs showing the current Application Pool failure attributes.
returned: success
type: dict
sample:
autoShutdownExe: ""
orphanActionExe: ""
rapidFailProtextionInterval:
Days: 0
Hours: 0
name:
description: Name of Application Pool that was processed by this module invocation.
returned: success
type: str
sample: "DefaultAppPool"
processModel:
description: Key value pairs showing the current Application Pool processModel attributes.
returned: success
type: dict
sample:
identityType: "ApplicationPoolIdentity"
logonType: "LogonBatch"
pingInterval:
Days: 0
Hours: 0
recycling:
description: Key value pairs showing the current Application Pool recycling attributes.
returned: success
type: dict
sample:
disallowOverlappingRotation: false
disallowRotationOnConfigChange: false
logEventOnRecycle: "Time,Requests,Schedule,Memory,IsapiUnhealthy,OnDemand,ConfigChange,PrivateMemory"
state:
description: Current runtime state of the pool as the module completed.
returned: success
type: str
sample: "Started"
'''
| gpl-3.0 |
pschmitt/home-assistant | homeassistant/components/mqtt/const.py | 4 | 1178 | """Constants used by multiple MQTT modules."""
from homeassistant.const import CONF_PAYLOAD
ATTR_DISCOVERY_HASH = "discovery_hash"
ATTR_DISCOVERY_PAYLOAD = "discovery_payload"
ATTR_DISCOVERY_TOPIC = "discovery_topic"
ATTR_PAYLOAD = "payload"
ATTR_QOS = "qos"
ATTR_RETAIN = "retain"
ATTR_TOPIC = "topic"
CONF_BROKER = "broker"
CONF_BIRTH_MESSAGE = "birth_message"
CONF_DISCOVERY = "discovery"
CONF_QOS = ATTR_QOS
CONF_RETAIN = ATTR_RETAIN
CONF_STATE_TOPIC = "state_topic"
CONF_WILL_MESSAGE = "will_message"
DEFAULT_PREFIX = "homeassistant"
DEFAULT_BIRTH_WILL_TOPIC = DEFAULT_PREFIX + "/status"
DEFAULT_DISCOVERY = False
DEFAULT_QOS = 0
DEFAULT_PAYLOAD_AVAILABLE = "online"
DEFAULT_PAYLOAD_NOT_AVAILABLE = "offline"
DEFAULT_RETAIN = False
DEFAULT_BIRTH = {
ATTR_TOPIC: DEFAULT_BIRTH_WILL_TOPIC,
CONF_PAYLOAD: DEFAULT_PAYLOAD_AVAILABLE,
ATTR_QOS: DEFAULT_QOS,
ATTR_RETAIN: DEFAULT_RETAIN,
}
DEFAULT_WILL = {
ATTR_TOPIC: DEFAULT_BIRTH_WILL_TOPIC,
CONF_PAYLOAD: DEFAULT_PAYLOAD_NOT_AVAILABLE,
ATTR_QOS: DEFAULT_QOS,
ATTR_RETAIN: DEFAULT_RETAIN,
}
MQTT_CONNECTED = "mqtt_connected"
MQTT_DISCONNECTED = "mqtt_disconnected"
PROTOCOL_311 = "3.1.1"
| apache-2.0 |
menardorama/ReadyNAS-Add-ons | headphones-1.0.0/debian/headphones/etc/apps/headphones/lib/unidecode/x04e.py | 128 | 4646 | data = (
'Yi ', # 0x00
'Ding ', # 0x01
'Kao ', # 0x02
'Qi ', # 0x03
'Shang ', # 0x04
'Xia ', # 0x05
'[?] ', # 0x06
'Mo ', # 0x07
'Zhang ', # 0x08
'San ', # 0x09
'Shang ', # 0x0a
'Xia ', # 0x0b
'Ji ', # 0x0c
'Bu ', # 0x0d
'Yu ', # 0x0e
'Mian ', # 0x0f
'Gai ', # 0x10
'Chou ', # 0x11
'Chou ', # 0x12
'Zhuan ', # 0x13
'Qie ', # 0x14
'Pi ', # 0x15
'Shi ', # 0x16
'Shi ', # 0x17
'Qiu ', # 0x18
'Bing ', # 0x19
'Ye ', # 0x1a
'Cong ', # 0x1b
'Dong ', # 0x1c
'Si ', # 0x1d
'Cheng ', # 0x1e
'Diu ', # 0x1f
'Qiu ', # 0x20
'Liang ', # 0x21
'Diu ', # 0x22
'You ', # 0x23
'Liang ', # 0x24
'Yan ', # 0x25
'Bing ', # 0x26
'Sang ', # 0x27
'Gun ', # 0x28
'Jiu ', # 0x29
'Ge ', # 0x2a
'Ya ', # 0x2b
'Qiang ', # 0x2c
'Zhong ', # 0x2d
'Ji ', # 0x2e
'Jie ', # 0x2f
'Feng ', # 0x30
'Guan ', # 0x31
'Chuan ', # 0x32
'Chan ', # 0x33
'Lin ', # 0x34
'Zhuo ', # 0x35
'Zhu ', # 0x36
'Ha ', # 0x37
'Wan ', # 0x38
'Dan ', # 0x39
'Wei ', # 0x3a
'Zhu ', # 0x3b
'Jing ', # 0x3c
'Li ', # 0x3d
'Ju ', # 0x3e
'Pie ', # 0x3f
'Fu ', # 0x40
'Yi ', # 0x41
'Yi ', # 0x42
'Nai ', # 0x43
'Shime ', # 0x44
'Jiu ', # 0x45
'Jiu ', # 0x46
'Zhe ', # 0x47
'Yao ', # 0x48
'Yi ', # 0x49
'[?] ', # 0x4a
'Zhi ', # 0x4b
'Wu ', # 0x4c
'Zha ', # 0x4d
'Hu ', # 0x4e
'Fa ', # 0x4f
'Le ', # 0x50
'Zhong ', # 0x51
'Ping ', # 0x52
'Pang ', # 0x53
'Qiao ', # 0x54
'Hu ', # 0x55
'Guai ', # 0x56
'Cheng ', # 0x57
'Cheng ', # 0x58
'Yi ', # 0x59
'Yin ', # 0x5a
'[?] ', # 0x5b
'Mie ', # 0x5c
'Jiu ', # 0x5d
'Qi ', # 0x5e
'Ye ', # 0x5f
'Xi ', # 0x60
'Xiang ', # 0x61
'Gai ', # 0x62
'Diu ', # 0x63
'Hal ', # 0x64
'[?] ', # 0x65
'Shu ', # 0x66
'Twul ', # 0x67
'Shi ', # 0x68
'Ji ', # 0x69
'Nang ', # 0x6a
'Jia ', # 0x6b
'Kel ', # 0x6c
'Shi ', # 0x6d
'[?] ', # 0x6e
'Ol ', # 0x6f
'Mai ', # 0x70
'Luan ', # 0x71
'Cal ', # 0x72
'Ru ', # 0x73
'Xue ', # 0x74
'Yan ', # 0x75
'Fu ', # 0x76
'Sha ', # 0x77
'Na ', # 0x78
'Gan ', # 0x79
'Sol ', # 0x7a
'El ', # 0x7b
'Cwul ', # 0x7c
'[?] ', # 0x7d
'Gan ', # 0x7e
'Chi ', # 0x7f
'Gui ', # 0x80
'Gan ', # 0x81
'Luan ', # 0x82
'Lin ', # 0x83
'Yi ', # 0x84
'Jue ', # 0x85
'Liao ', # 0x86
'Ma ', # 0x87
'Yu ', # 0x88
'Zheng ', # 0x89
'Shi ', # 0x8a
'Shi ', # 0x8b
'Er ', # 0x8c
'Chu ', # 0x8d
'Yu ', # 0x8e
'Yu ', # 0x8f
'Yu ', # 0x90
'Yun ', # 0x91
'Hu ', # 0x92
'Qi ', # 0x93
'Wu ', # 0x94
'Jing ', # 0x95
'Si ', # 0x96
'Sui ', # 0x97
'Gen ', # 0x98
'Gen ', # 0x99
'Ya ', # 0x9a
'Xie ', # 0x9b
'Ya ', # 0x9c
'Qi ', # 0x9d
'Ya ', # 0x9e
'Ji ', # 0x9f
'Tou ', # 0xa0
'Wang ', # 0xa1
'Kang ', # 0xa2
'Ta ', # 0xa3
'Jiao ', # 0xa4
'Hai ', # 0xa5
'Yi ', # 0xa6
'Chan ', # 0xa7
'Heng ', # 0xa8
'Mu ', # 0xa9
'[?] ', # 0xaa
'Xiang ', # 0xab
'Jing ', # 0xac
'Ting ', # 0xad
'Liang ', # 0xae
'Xiang ', # 0xaf
'Jing ', # 0xb0
'Ye ', # 0xb1
'Qin ', # 0xb2
'Bo ', # 0xb3
'You ', # 0xb4
'Xie ', # 0xb5
'Dan ', # 0xb6
'Lian ', # 0xb7
'Duo ', # 0xb8
'Wei ', # 0xb9
'Ren ', # 0xba
'Ren ', # 0xbb
'Ji ', # 0xbc
'La ', # 0xbd
'Wang ', # 0xbe
'Yi ', # 0xbf
'Shi ', # 0xc0
'Ren ', # 0xc1
'Le ', # 0xc2
'Ding ', # 0xc3
'Ze ', # 0xc4
'Jin ', # 0xc5
'Pu ', # 0xc6
'Chou ', # 0xc7
'Ba ', # 0xc8
'Zhang ', # 0xc9
'Jin ', # 0xca
'Jie ', # 0xcb
'Bing ', # 0xcc
'Reng ', # 0xcd
'Cong ', # 0xce
'Fo ', # 0xcf
'San ', # 0xd0
'Lun ', # 0xd1
'Sya ', # 0xd2
'Cang ', # 0xd3
'Zi ', # 0xd4
'Shi ', # 0xd5
'Ta ', # 0xd6
'Zhang ', # 0xd7
'Fu ', # 0xd8
'Xian ', # 0xd9
'Xian ', # 0xda
'Tuo ', # 0xdb
'Hong ', # 0xdc
'Tong ', # 0xdd
'Ren ', # 0xde
'Qian ', # 0xdf
'Gan ', # 0xe0
'Yi ', # 0xe1
'Di ', # 0xe2
'Dai ', # 0xe3
'Ling ', # 0xe4
'Yi ', # 0xe5
'Chao ', # 0xe6
'Chang ', # 0xe7
'Sa ', # 0xe8
'[?] ', # 0xe9
'Yi ', # 0xea
'Mu ', # 0xeb
'Men ', # 0xec
'Ren ', # 0xed
'Jia ', # 0xee
'Chao ', # 0xef
'Yang ', # 0xf0
'Qian ', # 0xf1
'Zhong ', # 0xf2
'Pi ', # 0xf3
'Wan ', # 0xf4
'Wu ', # 0xf5
'Jian ', # 0xf6
'Jie ', # 0xf7
'Yao ', # 0xf8
'Feng ', # 0xf9
'Cang ', # 0xfa
'Ren ', # 0xfb
'Wang ', # 0xfc
'Fen ', # 0xfd
'Di ', # 0xfe
'Fang ', # 0xff
)
| gpl-2.0 |
drpaneas/linuxed.gr | lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/filepost.py | 551 | 2512 | # urllib3/filepost.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import codecs
import mimetypes
from uuid import uuid4
from io import BytesIO
from .packages import six
from .packages.six import b
from .fields import RequestField
writer = codecs.lookup('utf-8')[3]
def choose_boundary():
"""
Our embarassingly-simple replacement for mimetools.choose_boundary.
"""
return uuid4().hex
def iter_field_objects(fields):
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts, and lists of
:class:`~urllib3.fields.RequestField`.
"""
if isinstance(fields, dict):
i = six.iteritems(fields)
else:
i = iter(fields)
for field in i:
if isinstance(field, RequestField):
yield field
else:
yield RequestField.from_tuples(*field)
def iter_fields(fields):
"""
.. deprecated:: 1.6
Iterate over fields.
The addition of :class:`~urllib3.fields.RequestField` makes this function
obsolete. Instead, use :func:`iter_field_objects`, which returns
:class:`~urllib3.fields.RequestField` objects.
Supports list of (k, v) tuples and dicts.
"""
if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields))
return ((k, v) for k, v in fields)
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields:
Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for field in iter_field_objects(fields):
body.write(b('--%s\r\n' % (boundary)))
writer(body).write(field.render_headers())
data = field.data
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = str('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
| mit |
saurabh6790/medsyn-lib1 | webnotes/widgets/report_dump.py | 34 | 2776 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes
import json
import copy
@webnotes.whitelist()
def get_data(doctypes, last_modified):
from startup.report_data_map import data_map
import datetime
out = {}
doctypes = json.loads(doctypes)
last_modified = json.loads(last_modified)
start = datetime.datetime.now()
for d in doctypes:
args = copy.deepcopy(data_map[d])
dt = d.find("[") != -1 and d[:d.find("[")] or d
out[dt] = {}
if args.get("from"):
modified_table = "item."
else:
modified_table = ""
conditions = order_by = ""
table = args.get("from") or ("`tab%s`" % dt)
if d in last_modified:
if not args.get("conditions"):
args['conditions'] = []
args['conditions'].append(modified_table + "modified > '" + last_modified[d] + "'")
out[dt]["modified_names"] = webnotes.conn.sql_list("""select %sname from %s
where %smodified > %s""" % (modified_table, table, modified_table, "%s"), last_modified[d])
if args.get("force_index"):
conditions = " force index (%s) " % args["force_index"]
if args.get("conditions"):
conditions += " where " + " and ".join(args["conditions"])
if args.get("order_by"):
order_by = " order by " + args["order_by"]
out[dt]["data"] = [list(t) for t in webnotes.conn.sql("""select %s from %s %s %s""" \
% (",".join(args["columns"]), table, conditions, order_by))]
# last modified
modified_table = table
if "," in table:
modified_table = " ".join(table.split(",")[0].split(" ")[:-1])
tmp = webnotes.conn.sql("""select `modified`
from %s order by modified desc limit 1""" % modified_table)
out[dt]["last_modified"] = tmp and tmp[0][0] or ""
out[dt]["columns"] = map(lambda c: c.split(" as ")[-1], args["columns"])
if args.get("links"):
out[dt]["links"] = args["links"]
for d in out:
unused_links = []
# only compress full dumps (not partial)
if out[d].get("links") and (d not in last_modified):
for link_key in out[d]["links"]:
link = out[d]["links"][link_key]
if link[0] in out and (link[0] not in last_modified):
# make a map of link ids
# to index
link_map = {}
doctype_data = out[link[0]]
col_idx = doctype_data["columns"].index(link[1])
for row_idx in xrange(len(doctype_data["data"])):
row = doctype_data["data"][row_idx]
link_map[row[col_idx]] = row_idx
for row in out[d]["data"]:
col_idx = out[d]["columns"].index(link_key)
# replace by id
if row[col_idx]:
row[col_idx] = link_map.get(row[col_idx])
else:
unused_links.append(link_key)
for link in unused_links:
del out[d]["links"][link]
return out
| mit |
marymhayes/wats4000-final-project | node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings_test.py | 1446 | 65937 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the MSVSSettings.py file."""
import StringIO
import unittest
import gyp.MSVSSettings as MSVSSettings
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def _ExpectedWarnings(self, expected):
"""Compares recorded lines to expected warnings."""
self.stderr.seek(0)
actual = self.stderr.read().split('\n')
actual = [line for line in actual if line]
self.assertEqual(sorted(expected), sorted(actual))
def testValidateMSVSSettings_tool_names(self):
"""Tests that only MSVS tool names are allowed."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {},
'VCLinkerTool': {},
'VCMIDLTool': {},
'foo': {},
'VCResourceCompilerTool': {},
'VCLibrarianTool': {},
'VCManifestTool': {},
'ClCompile': {}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized tool foo',
'Warning: unrecognized tool ClCompile'])
def testValidateMSVSSettings_settings(self):
"""Tests that for invalid MSVS settings."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '5',
'BrowseInformation': 'fdkslj',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '-1',
'CompileAs': '1',
'DebugInformationFormat': '2',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': '1',
'ExceptionHandling': '1',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '1',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '1',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'string1;string2',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '1',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '1',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalDependencies_excluded': 'file3',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '2',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'CLRImageType': '2',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '2',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': '2',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'ErrorReporting': '2',
'FixedBaseAddress': '2',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '2',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '2',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '2',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '2',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '2',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'true',
'Version': 'a string1'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'CPreprocessOptions': 'a string1',
'DefaultCharType': '1',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '1',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'notgood': 'bogus',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'VCResourceCompilerTool': {
'AdditionalOptions': 'a string1',
'AdditionalIncludeDirectories': 'folder1;folder2',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'notgood2': 'bogus',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a string1',
'ManifestResourceFile': 'a_file_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'truel',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}},
self.stderr)
self._ExpectedWarnings([
'Warning: for VCCLCompilerTool/BasicRuntimeChecks, '
'index value (5) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/BrowseInformation, '
"invalid literal for int() with base 10: 'fdkslj'",
'Warning: for VCCLCompilerTool/CallingConvention, '
'index value (-1) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/DebugInformationFormat, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCCLCompilerTool/Enableprefast',
'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ',
'Warning: for VCLinkerTool/TargetMachine, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCMIDLTool/notgood',
'Warning: unrecognized setting VCResourceCompilerTool/notgood2',
'Warning: for VCManifestTool/UpdateFileHashes, '
"expected bool; got 'truel'"
''])
def testValidateMSBuildSettings_settings(self):
"""Tests that for invalid MSBuild settings."""
MSVSSettings.ValidateMSBuildSettings(
{'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'false',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'BuildingInIDE': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'CompileAsManaged': 'true',
'CreateHotpatchableImage': 'true',
'DebugInformationFormat': 'ProgramDatabase',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'SyncCThrow',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Precise',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'FunctionLevelLinking': 'false',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'false',
'MinimalRebuild': 'true',
'MultiProcessorCompilation': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Disabled',
'PrecompiledHeader': 'NotUsing',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'string1;string2',
'PreprocessOutputPath': 'a string1',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'false',
'ProcessorNumber': '33',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TreatSpecificWarningsAsErrors': 'string1;string2',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UseUnicodeForAssemblerListing': 'true',
'WarningLevel': 'TurnOffAllWarnings',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'Link': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'BuildingInIDE': 'true',
'CLRImageType': 'ForceIJWImage',
'CLRSupportLastError': 'Enabled',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'CreateHotPatchableImage': 'X86Image',
'DataExecutionPrevention': 'false',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': 'NotSet',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'FixedBaseAddress': 'false',
'ForceFileOutput': 'Enabled',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'a_file_list',
'ImageHasSafeExceptionHandlers': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'false',
'LinkDLL': 'true',
'LinkErrorReporting': 'SendErrorReport',
'LinkStatus': 'true',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'MSDOSStubFileName': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': 'false',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'PreventDllBinding': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SectionAlignment': '33',
'SetChecksum': 'true',
'ShowProgress': 'LinkVerboseREF',
'SpecifySectionAttributes': 'a string1',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Console',
'SupportNobindOfDelayLoadedDLL': 'true',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TrackerLogDirectory': 'a_folder',
'TreatLinkerWarningAsErrors': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'AsInvoker',
'UACUIAccess': 'true',
'Version': 'a string1'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'Culture': '0x236',
'IgnoreStandardIncludePath': 'true',
'NullTerminateStrings': 'true',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ApplicationConfigurationMode': 'true',
'ClientStubFile': 'a_file_name',
'CPreprocessOptions': 'a string1',
'DefaultCharType': 'Signed',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'EnableCustom',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateClientFiles': 'Stub',
'GenerateServerFiles': 'None',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'LocaleID': '33',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'ServerStubFile': 'a_file_name',
'StructMemberAlignment': 'NotSet',
'SuppressCompilerWarnings': 'true',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Itanium',
'TrackerLogDirectory': 'a_folder',
'TypeLibFormat': 'NewFormat',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'Lib': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'DisplayLibrary': 'a string1',
'ErrorReporting': 'PromptImmediately',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkTimeCodeGeneration': 'true',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'Name': 'a_file_name',
'OutputFile': 'a_file_name',
'RemoveObjects': 'file1;file2',
'SubSystem': 'Console',
'SuppressStartupBanner': 'true',
'TargetMachine': 'MachineX86i',
'TrackerLogDirectory': 'a_folder',
'TreatLibWarningAsErrors': 'true',
'UseUnicodeResponseFiles': 'true',
'Verbose': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'EnableDPIAwareness': 'fal',
'GenerateCatalogFiles': 'truel',
'GenerateCategoryTags': 'true',
'InputResourceManifests': 'a string1',
'ManifestFromManagedAssembly': 'a_file_name',
'notgood3': 'bogus',
'OutputManifestFile': 'a_file_name',
'OutputResourceManifests': 'a string1',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressDependencyElement': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'a_file_name'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized setting ClCompile/Enableprefast',
'Warning: unrecognized setting ClCompile/ZZXYZ',
'Warning: unrecognized setting Manifest/notgood3',
'Warning: for Manifest/GenerateCatalogFiles, '
"expected bool; got 'truel'",
'Warning: for Lib/TargetMachine, unrecognized enumerated value '
'MachineX86i',
"Warning: for Manifest/EnableDPIAwareness, expected bool; got 'fal'"])
def testConvertToMSBuildSettings_empty(self):
"""Tests an empty conversion."""
msvs_settings = {}
expected_msbuild_settings = {}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_minimal(self):
"""Tests a minimal conversion."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': '0',
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': '1',
'ErrorReporting': '1',
'DataExecutionPrevention': '2',
},
}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': 'Default',
},
'Link': {
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'LinkErrorReporting': 'PromptImmediately',
'DataExecutionPrevention': 'true',
},
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_warnings(self):
"""Tests conversion that generates warnings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2',
# These are incorrect values:
'BasicRuntimeChecks': '12',
'BrowseInformation': '21',
'UsePrecompiledHeader': '13',
'GeneratePreprocessedFile': '14'},
'VCLinkerTool': {
# These are incorrect values:
'Driver': '10',
'LinkTimeCodeGeneration': '31',
'ErrorReporting': '21',
'FixedBaseAddress': '6'},
'VCResourceCompilerTool': {
# Custom
'Culture': '1003'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2'},
'Link': {},
'ResourceCompile': {
# Custom
'Culture': '0x03eb'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([
'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to '
'MSBuild, index value (12) not in expected range [0, 4)',
'Warning: while converting VCCLCompilerTool/BrowseInformation to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to '
'MSBuild, index value (13) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to '
'MSBuild, value must be one of [0, 1, 2]; got 14',
'Warning: while converting VCLinkerTool/Driver to '
'MSBuild, index value (10) not in expected range [0, 4)',
'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to '
'MSBuild, index value (31) not in expected range [0, 5)',
'Warning: while converting VCLinkerTool/ErrorReporting to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCLinkerTool/FixedBaseAddress to '
'MSBuild, index value (6) not in expected range [0, 3)',
])
def testConvertToMSBuildSettings_full_synthetic(self):
"""Tests conversion of all the MSBuild settings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '1',
'BrowseInformation': '2',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '0',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': '0',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '1',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '0',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '2',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '0',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '0',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': '1',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': '1',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '0',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'ErrorReporting': '0',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2;file3',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '1',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '0',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '0',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '3',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '1',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'false',
'UseUnicodeResponseFiles': 'true',
'Version': 'a_string'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': '0',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '2',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'EmbedManifest': 'true',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'ManifestResourceFile': 'my_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string /J',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'true',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': 'NotSet',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'AnySuitable',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'Create',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'WarningLevel': 'Level2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'Link': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': 'ForceIJWImage',
'CLRThreadAttribute': 'STAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': 'Driver',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'NoErrorReport',
'LinkTimeCodeGeneration': 'PGInstrument',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': '',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'true',
'ShowProgress': 'NotSet',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Windows',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineARM',
'TerminalServerAware': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'HighestAvailable',
'UACUIAccess': 'true',
'Version': 'a_string'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '0x03eb',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': 'Unsigned',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'All',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '4',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Win32',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'Lib': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'my_name'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'false'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_actual(self):
"""Tests the conversion of an actual project.
A VS2008 project with most of the options defined was created through the
VS2008 IDE. It was then converted to VS2010. The tool settings found in
the .vcproj and .vcxproj files were converted to the two dictionaries
msvs_settings and expected_msbuild_settings.
Note that for many settings, the VS2010 converter adds macros like
%(AdditionalIncludeDirectories) to make sure than inherited values are
included. Since the Gyp projects we generate do not use inheritance,
we removed these macros. They were:
ClCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)'
AdditionalOptions: ' %(AdditionalOptions)'
AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)'
DisableSpecificWarnings: ';%(DisableSpecificWarnings)',
ForcedIncludeFiles: ';%(ForcedIncludeFiles)',
ForcedUsingFiles: ';%(ForcedUsingFiles)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
UndefinePreprocessorDefinitions:
';%(UndefinePreprocessorDefinitions)',
Link:
AdditionalDependencies: ';%(AdditionalDependencies)',
AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)',
AdditionalManifestDependencies:
';%(AdditionalManifestDependencies)',
AdditionalOptions: ' %(AdditionalOptions)',
AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)',
AssemblyLinkResource: ';%(AssemblyLinkResource)',
DelayLoadDLLs: ';%(DelayLoadDLLs)',
EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)',
ForceSymbolReferences: ';%(ForceSymbolReferences)',
IgnoreSpecificDefaultLibraries:
';%(IgnoreSpecificDefaultLibraries)',
ResourceCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)',
AdditionalOptions: ' %(AdditionalOptions)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
Manifest:
AdditionalManifestFiles: ';%(AdditionalManifestFiles)',
AdditionalOptions: ' %(AdditionalOptions)',
InputResourceManifests: ';%(InputResourceManifests)',
"""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)\\a',
'AssemblerOutput': '1',
'BasicRuntimeChecks': '3',
'BrowseInformation': '1',
'BrowseInformationFile': '$(IntDir)\\e',
'BufferSecurityCheck': 'false',
'CallingConvention': '1',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '2',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '2',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'GeneratePreprocessedFile': '2',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': '$(IntDir)\\b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche',
'PrecompiledHeaderThrough': 'StdAfx.hd',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb',
'RuntimeLibrary': '3',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'false',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '0',
'UseUnicodeResponseFiles': 'false',
'WarnAsError': 'true',
'WarningLevel': '3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)\\c'},
'VCLinkerTool': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': '1',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': '3',
'CLRThreadAttribute': '1',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': '1',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'ErrorReporting': '2',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'false',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'flob;flok',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': '2',
'LinkIncremental': '0',
'LinkLibraryDependencies': 'false',
'LinkTimeCodeGeneration': '1',
'ManifestFile':
'$(IntDir)\\$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'OptimizeForWindows98': '2',
'OptimizeReferences': '2',
'OutputFile': '$(OutDir)\\$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'false',
'ShowProgress': '1',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': '1',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '1',
'TerminalServerAware': '1',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'false',
'Version': '333'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '3084',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res',
'ShowProgress': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'DependencyInformationFile': '$(IntDir)\\mt.depdfd',
'EmbedManifest': 'false',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'ManifestResourceFile':
'$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'false',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more /J',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)a',
'AssemblerOutput': 'AssemblyCode',
'BasicRuntimeChecks': 'EnableFastChecks',
'BrowseInformation': 'true',
'BrowseInformationFile': '$(IntDir)e',
'BufferSecurityCheck': 'false',
'CallingConvention': 'FastCall',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Queue',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Size',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': '$(IntDir)b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'NotUsing', # Actual conversion gives ''
'PrecompiledHeaderFile': 'StdAfx.hd',
'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'PreprocessSuppressLineNumbers': 'true',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb',
'RuntimeLibrary': 'MultiThreadedDebugDLL',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '4Bytes',
'SuppressStartupBanner': 'false',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'WarningLevel': 'Level3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)c'},
'Link': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': 'true',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': 'ForceSafeILImage',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': 'UpOnly',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'flob;flok',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'QueueForNextLogin',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'NoEntryPoint': 'true',
'OptimizeReferences': 'true',
'OutputFile': '$(OutDir)$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'false',
'ShowProgress': 'LinkVerbose',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': 'Console',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': 'RequireAdministrator',
'UACUIAccess': 'true',
'Version': '333'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '0x0c0c',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)%(Filename)3.res',
'ShowProgress': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'false',
'UseLibraryDependencyInputs': 'true'},
'': {
'EmbedManifest': 'false',
'GenerateManifest': 'false',
'IgnoreImportLibrary': 'true',
'LinkIncremental': ''
},
'ManifestResourceCompile': {
'ResourceOutputFileName':
'$(IntDir)$(TargetFileName).embed.manifest.resfdsf'}
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
nmercier/linux-cross-gcc | linux/lib/python2.7/encodings/shift_jis.py | 816 | 1039 | #
# shift_jis.py: Python Unicode Codec for SHIFT_JIS
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jis')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='shift_jis',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| bsd-3-clause |
mlechner/green_flask | index.py | 1 | 2248 | from flask import Flask
from flask import request
from flask import render_template
from sqlalchemy import *
from sqlalchemy.orm import *
import geojson
from sqlalchemy.ext.declarative import declarative_base
from datetime import datetime
#from geoalchemy import *
#from geoalchemy.spatialite import SQLiteComparator
#from pysqlite2 import dbapi2 as sqlite
#import pysqlite2
app = Flask(__name__)
#engine = create_engine('sqlite:////tmp/green_flask.db', module=sqlite, echo=True)
engine = create_engine('sqlite:///green_flask.db')
#connection = engine.raw_connection().connnection
#connection.enable_load_extension(True)
metadata = MetaData(engine)
Session = sessionmaker(bind=engine)
session = Session()
#session.execute("select load_extension('/usr/local/lib/libspatialite.so')")
Base = declarative_base(metadata=metadata)
class Geom(Base):
__tablename__ = 'area'
id = Column(Integer, primary_key=True)
name = Column(Unicode, nullable=True)
#geom = GeometryColumn(MultiPolygon(2))
geom = Column(Text, nullable=True)
#GeometryDDL(Area,__table__)
geocoll = session.query(Geom).all()
@app.route('/')
def hello_world():
return 'Marcos flask testing'
@app.route('/areas', methods=['GET'])
def areas_get():
return geojson.dumps(geocoll)
@app.route('/area/get/<int:id>', methods=['GET'])
def area_get(id):
return 'Area %d returned.' % id
@app.route('/area/add', methods=['GET', 'POST'])
def area_add():
r = request
if request.method == 'POST':
mygeo = geojson.loads(r.data)
if mygeo.type == 'FeatureCollection':
for feature in mygeo.features:
f = Geom()
f.id = feature.id
f.geom = geojson.dumps(feature.geometry)
geocoll.features.append(f)
geostr = geojson.dumps()
return geostr
else:
return 'type is not FeatureCollection but %s' %(mygeo.type)
else:
return 'New area added using GET.'
@app.route('/area/edit/<int:id>', methods=['GET', 'POST'])
def area_edit(id):
return 'Area %d edited:' % id
@app.route('/map')
def map():
return render_template('map.html')
if __name__ == '__main__':
metadata.create_all()
app.run(host='0.0.0.0')
| mit |
nvie/GitPython | git/test/test_fun.py | 1 | 9348 | from io import BytesIO
from stat import S_IFDIR, S_IFREG, S_IFLNK
from unittest.case import skipIf
from git.compat import PY3
from git.index import IndexFile
from git.index.fun import (
aggressive_tree_merge
)
from git.objects.fun import (
traverse_tree_recursive,
traverse_trees_recursive,
tree_to_stream,
tree_entries_from_data
)
from git.test.lib import (
TestBase,
with_rw_repo
)
from git.util import bin_to_hex
from gitdb.base import IStream
from gitdb.typ import str_tree_type
class TestFun(TestBase):
def _assert_index_entries(self, entries, trees):
index = IndexFile.from_tree(self.rorepo, *[self.rorepo.tree(bin_to_hex(t).decode('ascii')) for t in trees])
assert entries
assert len(index.entries) == len(entries)
for entry in entries:
assert (entry.path, entry.stage) in index.entries
# END assert entry matches fully
def test_aggressive_tree_merge(self):
# head tree with additions, removals and modification compared to its predecessor
odb = self.rorepo.odb
HC = self.rorepo.commit("6c1faef799095f3990e9970bc2cb10aa0221cf9c")
H = HC.tree
B = HC.parents[0].tree
# entries from single tree
trees = [H.binsha]
self._assert_index_entries(aggressive_tree_merge(odb, trees), trees)
# from multiple trees
trees = [B.binsha, H.binsha]
self._assert_index_entries(aggressive_tree_merge(odb, trees), trees)
# three way, no conflict
tree = self.rorepo.tree
B = tree("35a09c0534e89b2d43ec4101a5fb54576b577905")
H = tree("4fe5cfa0e063a8d51a1eb6f014e2aaa994e5e7d4")
M = tree("1f2b19de3301e76ab3a6187a49c9c93ff78bafbd")
trees = [B.binsha, H.binsha, M.binsha]
self._assert_index_entries(aggressive_tree_merge(odb, trees), trees)
# three-way, conflict in at least one file, both modified
B = tree("a7a4388eeaa4b6b94192dce67257a34c4a6cbd26")
H = tree("f9cec00938d9059882bb8eabdaf2f775943e00e5")
M = tree("44a601a068f4f543f73fd9c49e264c931b1e1652")
trees = [B.binsha, H.binsha, M.binsha]
self._assert_index_entries(aggressive_tree_merge(odb, trees), trees)
# too many trees
self.failUnlessRaises(ValueError, aggressive_tree_merge, odb, trees * 2)
def mktree(self, odb, entries):
"""create a tree from the given tree entries and safe it to the database"""
sio = BytesIO()
tree_to_stream(entries, sio.write)
sio.seek(0)
istream = odb.store(IStream(str_tree_type, len(sio.getvalue()), sio))
return istream.binsha
@with_rw_repo('0.1.6')
def test_three_way_merge(self, rwrepo):
def mkfile(name, sha, executable=0):
return (sha, S_IFREG | 0o644 | executable * 0o111, name)
def mkcommit(name, sha):
return (sha, S_IFDIR | S_IFLNK, name)
def assert_entries(entries, num_entries, has_conflict=False):
assert len(entries) == num_entries
assert has_conflict == (len([e for e in entries if e.stage != 0]) > 0)
mktree = self.mktree
shaa = b"\1" * 20
shab = b"\2" * 20
shac = b"\3" * 20
odb = rwrepo.odb
# base tree
bfn = 'basefile'
fbase = mkfile(bfn, shaa)
tb = mktree(odb, [fbase])
# non-conflicting new files, same data
fa = mkfile('1', shab)
th = mktree(odb, [fbase, fa])
fb = mkfile('2', shac)
tm = mktree(odb, [fbase, fb])
# two new files, same base file
trees = [tb, th, tm]
assert_entries(aggressive_tree_merge(odb, trees), 3)
# both delete same file, add own one
fa = mkfile('1', shab)
th = mktree(odb, [fa])
fb = mkfile('2', shac)
tm = mktree(odb, [fb])
# two new files
trees = [tb, th, tm]
assert_entries(aggressive_tree_merge(odb, trees), 2)
# same file added in both, differently
fa = mkfile('1', shab)
th = mktree(odb, [fa])
fb = mkfile('1', shac)
tm = mktree(odb, [fb])
# expect conflict
trees = [tb, th, tm]
assert_entries(aggressive_tree_merge(odb, trees), 2, True)
# same file added, different mode
fa = mkfile('1', shab)
th = mktree(odb, [fa])
fb = mkcommit('1', shab)
tm = mktree(odb, [fb])
# expect conflict
trees = [tb, th, tm]
assert_entries(aggressive_tree_merge(odb, trees), 2, True)
# same file added in both
fa = mkfile('1', shab)
th = mktree(odb, [fa])
fb = mkfile('1', shab)
tm = mktree(odb, [fb])
# expect conflict
trees = [tb, th, tm]
assert_entries(aggressive_tree_merge(odb, trees), 1)
# modify same base file, differently
fa = mkfile(bfn, shab)
th = mktree(odb, [fa])
fb = mkfile(bfn, shac)
tm = mktree(odb, [fb])
# conflict, 3 versions on 3 stages
trees = [tb, th, tm]
assert_entries(aggressive_tree_merge(odb, trees), 3, True)
# change mode on same base file, by making one a commit, the other executable
# no content change ( this is totally unlikely to happen in the real world )
fa = mkcommit(bfn, shaa)
th = mktree(odb, [fa])
fb = mkfile(bfn, shaa, executable=1)
tm = mktree(odb, [fb])
# conflict, 3 versions on 3 stages, because of different mode
trees = [tb, th, tm]
assert_entries(aggressive_tree_merge(odb, trees), 3, True)
for is_them in range(2):
# only we/they change contents
fa = mkfile(bfn, shab)
th = mktree(odb, [fa])
trees = [tb, th, tb]
if is_them:
trees = [tb, tb, th]
entries = aggressive_tree_merge(odb, trees)
assert len(entries) == 1 and entries[0].binsha == shab
# only we/they change the mode
fa = mkcommit(bfn, shaa)
th = mktree(odb, [fa])
trees = [tb, th, tb]
if is_them:
trees = [tb, tb, th]
entries = aggressive_tree_merge(odb, trees)
assert len(entries) == 1 and entries[0].binsha == shaa and entries[0].mode == fa[1]
# one side deletes, the other changes = conflict
fa = mkfile(bfn, shab)
th = mktree(odb, [fa])
tm = mktree(odb, [])
trees = [tb, th, tm]
if is_them:
trees = [tb, tm, th]
# as one is deleted, there are only 2 entries
assert_entries(aggressive_tree_merge(odb, trees), 2, True)
# END handle ours, theirs
def _assert_tree_entries(self, entries, num_trees):
for entry in entries:
assert len(entry) == num_trees
paths = set(e[2] for e in entry if e)
# only one path per set of entries
assert len(paths) == 1
# END verify entry
def test_tree_traversal(self):
# low level tree tarversal
odb = self.rorepo.odb
H = self.rorepo.tree('29eb123beb1c55e5db4aa652d843adccbd09ae18') # head tree
M = self.rorepo.tree('e14e3f143e7260de9581aee27e5a9b2645db72de') # merge tree
B = self.rorepo.tree('f606937a7a21237c866efafcad33675e6539c103') # base tree
B_old = self.rorepo.tree('1f66cfbbce58b4b552b041707a12d437cc5f400a') # old base tree
# two very different trees
entries = traverse_trees_recursive(odb, [B_old.binsha, H.binsha], '')
self._assert_tree_entries(entries, 2)
oentries = traverse_trees_recursive(odb, [H.binsha, B_old.binsha], '')
assert len(oentries) == len(entries)
self._assert_tree_entries(oentries, 2)
# single tree
is_no_tree = lambda i, d: i.type != 'tree'
entries = traverse_trees_recursive(odb, [B.binsha], '')
assert len(entries) == len(list(B.traverse(predicate=is_no_tree)))
self._assert_tree_entries(entries, 1)
# two trees
entries = traverse_trees_recursive(odb, [B.binsha, H.binsha], '')
self._assert_tree_entries(entries, 2)
# tree trees
entries = traverse_trees_recursive(odb, [B.binsha, H.binsha, M.binsha], '')
self._assert_tree_entries(entries, 3)
def test_tree_traversal_single(self):
max_count = 50
count = 0
odb = self.rorepo.odb
for commit in self.rorepo.commit("29eb123beb1c55e5db4aa652d843adccbd09ae18").traverse():
if count >= max_count:
break
count += 1
entries = traverse_tree_recursive(odb, commit.tree.binsha, '')
assert entries
# END for each commit
@skipIf(PY3, 'odd types returned ... maybe figure it out one day')
def test_tree_entries_from_data_with_failing_name_decode_py2(self):
r = tree_entries_from_data(b'100644 \x9f\0aaa')
assert r == [('aaa', 33188, u'\udc9f')], r
@skipIf(not PY3, 'odd types returned ... maybe figure it out one day')
def test_tree_entries_from_data_with_failing_name_decode_py3(self):
r = tree_entries_from_data(b'100644 \x9f\0aaa')
assert r == [(b'aaa', 33188, '\udc9f')], r
| bsd-3-clause |
STRML/OpenBazaar | node/multisig.py | 3 | 6383 | import logging
import random
import re
import urllib2
import obelisk
# Create new private key:
#
# $ sx newkey > key1
#
# Show private secret:
#
# $ cat key1 | sx wif-to-secret
#
# Show compressed public key:
#
# $ cat key1 | sx pubkey
#
# You will need 3 keys for buyer, seller and arbitrer
def build_output_info_list(unspent_rows):
unspent_infos = []
for row in unspent_rows:
assert len(row) == 4
outpoint = obelisk.OutPoint()
outpoint.hash = row[0]
outpoint.index = row[1]
value = row[3]
unspent_infos.append(
obelisk.OutputInfo(outpoint, value))
return unspent_infos
class Multisig(object):
def __init__(self, client, number_required, pubkeys):
if number_required > len(pubkeys):
raise Exception("number_required > len(pubkeys)")
self.client = client
self.number_required = number_required
self.pubkeys = pubkeys
self.log = logging.getLogger(self.__class__.__name__)
@property
def script(self):
result = chr(80 + self.number_required)
for pubkey in self.pubkeys:
result += chr(33) + pubkey
result += chr(80 + len(self.pubkeys))
# checkmultisig
result += "\xae"
return result
@property
def address(self):
raw_addr = obelisk.hash_160(self.script)
return obelisk.hash_160_to_bc_address(raw_addr, addrtype=0x05)
def create_unsigned_transaction(self, destination, finished_cb):
def fetched(ec, history):
if ec is not None:
self.log.error("Error fetching history: %s", ec)
return
self._fetched(history, destination, finished_cb)
self.client.fetch_history(self.address, fetched)
def _fetched(self, history, destination, finished_cb):
unspent = [row[:4] for row in history if row[4] is None]
tx = self._build_actual_tx(unspent, destination)
finished_cb(tx)
@staticmethod
def _build_actual_tx(unspent, destination):
# Send all unspent outputs (everything in the address) minus the fee
tx = obelisk.Transaction()
total_amount = 0
for row in unspent:
assert len(row) == 4
outpoint = obelisk.OutPoint()
outpoint.hash = row[0]
outpoint.index = row[1]
value = row[3]
total_amount += value
add_input(tx, outpoint)
# Constrain fee so we don't get negative amount to send
fee = min(total_amount, 10000)
send_amount = total_amount - fee
add_output(tx, destination, send_amount)
return tx
def sign_all_inputs(self, tx, secret):
signatures = []
key = obelisk.EllipticCurveKey()
key.set_secret(secret)
for i, _ in enumerate(tx.inputs):
sighash = generate_signature_hash(tx, i, self.script)
# Add sighash::all to end of signature.
signature = key.sign(sighash) + "\x01"
signatures.append(signature.encode('hex'))
return signatures
@staticmethod
def make_request(*args):
opener = urllib2.build_opener()
opener.addheaders = [(
'User-agent',
'Mozilla/5.0' + str(random.randrange(1000000))
)]
try:
return opener.open(*args).read().strip()
except Exception as e:
try:
p = e.read().strip()
except Exception:
p = e
raise Exception(p)
@staticmethod
def eligius_pushtx(tx):
print 'FINAL TRANSACTION: %s' % tx
s = Multisig.make_request(
'http://eligius.st/~wizkid057/newstats/pushtxn.php',
'transaction=' + tx + '&send=Push'
)
strings = re.findall('string[^"]*"[^"]*"', s)
for string in strings:
quote = re.findall('"[^"]*"', string)[0]
if len(quote) >= 5:
return quote[1:-1]
@staticmethod
def broadcast(tx):
raw_tx = tx.serialize().encode("hex")
Multisig.eligius_pushtx(raw_tx)
# gateway_broadcast(raw_tx)
# bci_pushtx(raw_tx)
def add_input(tx, prevout):
tx_input = obelisk.TxIn()
tx_input.previous_output.hash = prevout.hash
tx_input.previous_output.index = prevout.index
tx.inputs.append(tx_input)
def add_output(tx, address, value):
output = obelisk.TxOut()
output.value = value
output.script = obelisk.output_script(address)
tx.outputs.append(output)
def generate_signature_hash(parent_tx, input_index, script_code):
tx = obelisk.copy_tx(parent_tx)
if input_index >= len(tx.inputs):
return None
for tx_input in tx.inputs:
tx_input.script = ""
tx.inputs[input_index].script = script_code
raw_tx = tx.serialize() + "\x01\x00\x00\x00"
return obelisk.Hash(raw_tx)
class Escrow(object):
def __init__(self, client, buyer_pubkey, seller_pubkey, arbit_pubkey):
pubkeys = (buyer_pubkey, seller_pubkey, arbit_pubkey)
self.multisig = Multisig(client, 2, pubkeys)
# 1. BUYER: Deposit funds for seller
@property
def deposit_address(self):
return self.multisig.address
# 2. BUYER: Send unsigned tx to seller
def initiate(self, destination_address, finished_cb):
self.multisig.create_unsigned_transaction(
destination_address, finished_cb)
# ...
# 3. BUYER: Release funds by sending signature to seller
def release_funds(self, tx, secret):
return self.multisig.sign_all_inputs(tx, secret)
# 4. SELLER: Claim your funds by generating a signature.
def claim_funds(self, tx, secret, buyer_sigs):
seller_sigs = self.multisig.sign_all_inputs(tx, secret)
return Escrow.complete(tx, buyer_sigs, seller_sigs,
self.multisig.script)
@staticmethod
def complete(tx, buyer_sigs, seller_sigs, script_code):
for i, _ in enumerate(tx.inputs):
sigs = (buyer_sigs[i], seller_sigs[i])
script = "\x00"
for sig in sigs:
script += chr(len(sig)) + sig
script += "\x4c"
assert len(script_code) < 255
script += chr(len(script_code)) + script_code
tx.inputs[i].script = script
return tx
| mit |
prarthitm/edxplatform | common/lib/xmodule/xmodule/poll_module.py | 3 | 7883 | """Poll module is ungraded xmodule used by students to
to do set of polls.
On the client side we show:
If student does not yet anwered - Question with set of choices.
If student have answered - Question with statistics for each answers.
"""
import cgi
import json
import logging
from copy import deepcopy
from collections import OrderedDict
from lxml import etree
from openedx.core.djangolib.markup import Text
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.stringify import stringify_children
from xmodule.mako_module import MakoModuleDescriptor
from xmodule.xml_module import XmlDescriptor
from xblock.fields import Scope, String, Dict, Boolean, List
log = logging.getLogger(__name__)
_ = lambda text: text
class PollFields(object):
# Name of poll to use in links to this poll
display_name = String(
help=_("The display name for this component."),
scope=Scope.settings
)
voted = Boolean(
help=_("Whether this student has voted on the poll"),
scope=Scope.user_state,
default=False
)
poll_answer = String(
help=_("Student answer"),
scope=Scope.user_state,
default=''
)
poll_answers = Dict(
help=_("Poll answers from all students"),
scope=Scope.user_state_summary
)
# List of answers, in the form {'id': 'some id', 'text': 'the answer text'}
answers = List(
help=_("Poll answers from xml"),
scope=Scope.content,
default=[]
)
question = String(
help=_("Poll question"),
scope=Scope.content,
default=''
)
class PollModule(PollFields, XModule):
"""Poll Module"""
js = {
'js': [
resource_string(__name__, 'js/src/javascript_loader.js'),
resource_string(__name__, 'js/src/poll/poll.js'),
resource_string(__name__, 'js/src/poll/poll_main.js')
]
}
css = {'scss': [resource_string(__name__, 'css/poll/display.scss')]}
js_module_name = "Poll"
def handle_ajax(self, dispatch, data):
"""Ajax handler.
Args:
dispatch: string request slug
data: dict request data parameters
Returns:
json string
"""
if dispatch in self.poll_answers and not self.voted:
# FIXME: fix this, when xblock will support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
temp_poll_answers[dispatch] += 1
self.poll_answers = temp_poll_answers
self.voted = True
self.poll_answer = dispatch
return json.dumps({'poll_answers': self.poll_answers,
'total': sum(self.poll_answers.values()),
'callback': {'objectName': 'Conditional'}
})
elif dispatch == 'get_state':
return json.dumps({'poll_answer': self.poll_answer,
'poll_answers': self.poll_answers,
'total': sum(self.poll_answers.values())
})
elif dispatch == 'reset_poll' and self.voted and \
self.descriptor.xml_attributes.get('reset', 'True').lower() != 'false':
self.voted = False
# FIXME: fix this, when xblock will support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
temp_poll_answers[self.poll_answer] -= 1
self.poll_answers = temp_poll_answers
self.poll_answer = ''
return json.dumps({'status': 'success'})
else: # return error message
return json.dumps({'error': 'Unknown Command!'})
def get_html(self):
"""Renders parameters to template."""
params = {
'element_id': self.location.html_id(),
'element_class': self.location.category,
'ajax_url': self.system.ajax_url,
'configuration_json': self.dump_poll(),
}
self.content = self.system.render_template('poll.html', params)
return self.content
def dump_poll(self):
"""Dump poll information.
Returns:
string - Serialize json.
"""
# FIXME: hack for resolving caching `default={}` during definition
# poll_answers field
if self.poll_answers is None:
self.poll_answers = {}
answers_to_json = OrderedDict()
# FIXME: fix this, when xblock support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
# Fill self.poll_answers, prepare data for template context.
for answer in self.answers:
# Set default count for answer = 0.
if answer['id'] not in temp_poll_answers:
temp_poll_answers[answer['id']] = 0
answers_to_json[answer['id']] = cgi.escape(answer['text'])
self.poll_answers = temp_poll_answers
return json.dumps({
'answers': answers_to_json,
'question': cgi.escape(self.question),
# to show answered poll after reload:
'poll_answer': self.poll_answer,
'poll_answers': self.poll_answers if self.voted else {},
'total': sum(self.poll_answers.values()) if self.voted else 0,
'reset': str(self.descriptor.xml_attributes.get('reset', 'true')).lower()
})
class PollDescriptor(PollFields, MakoModuleDescriptor, XmlDescriptor):
_tag_name = 'poll_question'
_child_tag_name = 'answer'
module_class = PollModule
resources_dir = None
@classmethod
def definition_from_xml(cls, xml_object, system):
"""Pull out the data into dictionary.
Args:
xml_object: xml from file.
system: `system` object.
Returns:
(definition, children) - tuple
definition - dict:
{
'answers': <List of answers>,
'question': <Question string>
}
"""
# Check for presense of required tags in xml.
if len(xml_object.xpath(cls._child_tag_name)) == 0:
raise ValueError("Poll_question definition must include \
at least one 'answer' tag")
xml_object_copy = deepcopy(xml_object)
answers = []
for element_answer in xml_object_copy.findall(cls._child_tag_name):
answer_id = element_answer.get('id', None)
if answer_id:
answers.append({
'id': answer_id,
'text': stringify_children(element_answer)
})
xml_object_copy.remove(element_answer)
definition = {
'answers': answers,
'question': stringify_children(xml_object_copy)
}
children = []
return (definition, children)
def definition_to_xml(self, resource_fs):
"""Return an xml element representing to this definition."""
poll_str = u'<{tag_name}>{text}</{tag_name}>'.format(
tag_name=self._tag_name, text=self.question)
xml_object = etree.fromstring(poll_str)
xml_object.set('display_name', self.display_name)
def add_child(xml_obj, answer):
# Escape answer text before adding to xml tree.
answer_text = unicode(Text(answer['text']))
child_str = u'<{tag_name} id="{id}">{text}</{tag_name}>'.format(
tag_name=self._child_tag_name, id=answer['id'],
text=answer_text)
child_node = etree.fromstring(child_str)
xml_object.append(child_node)
for answer in self.answers:
add_child(xml_object, answer)
return xml_object
| agpl-3.0 |
CountZer0/PipelineConstructionSet | python/maya/site-packages/pymel-1.0.5/extras/completion/py/maya/OpenMayaFX.py | 3 | 33487 | from . import OpenMaya
import _OpenMayaFX
import weakref
from __builtin__ import property as _swig_property
from __builtin__ import object as _object
class MDynSweptLine(_object):
def __init__(self, *args):
pass
def __repr__(self):
pass
def length(*args, **kwargs):
pass
def normal(*args, **kwargs):
pass
def tangent(*args, **kwargs):
pass
def vertex(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
__dict__ = None
__weakref__ = None
thisown = None
__swig_destroy__ = None
class MnSolver(_object):
def __init__(self, *args):
pass
def __repr__(self):
pass
def addNObject(*args, **kwargs):
pass
def createNSolver(*args, **kwargs):
pass
def makeAllCollide(*args, **kwargs):
pass
def removeAllCollisions(*args, **kwargs):
pass
def removeNObject(*args, **kwargs):
pass
def setAirDensity(*args, **kwargs):
pass
def setDisabled(*args, **kwargs):
pass
def setGravity(*args, **kwargs):
pass
def setGravityDir(*args, **kwargs):
pass
def setMaxIterations(*args, **kwargs):
pass
def setStartTime(*args, **kwargs):
pass
def setSubsteps(*args, **kwargs):
pass
def setWindDir(*args, **kwargs):
pass
def setWindNoiseIntensity(*args, **kwargs):
pass
def setWindSpeed(*args, **kwargs):
pass
def solve(*args, **kwargs):
pass
__dict__ = None
__weakref__ = None
thisown = None
__swig_destroy__ = None
class new_instancemethod(_object):
"""
instancemethod(function, instance, class)
Create an instance method object.
"""
def __call__(*args, **kwargs):
"""
x.__call__(...) <==> x(...)
"""
pass
def __cmp__(*args, **kwargs):
"""
x.__cmp__(y) <==> cmp(x,y)
"""
pass
def __delattr__(*args, **kwargs):
"""
x.__delattr__('name') <==> del x.name
"""
pass
def __get__(*args, **kwargs):
"""
descr.__get__(obj[, type]) -> value
"""
pass
def __getattribute__(*args, **kwargs):
"""
x.__getattribute__('name') <==> x.name
"""
pass
def __hash__(*args, **kwargs):
"""
x.__hash__() <==> hash(x)
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __setattr__(*args, **kwargs):
"""
x.__setattr__('name', value) <==> x.name = value
"""
pass
__func__ = None
__self__ = None
im_class = None
im_func = None
im_self = None
__new__ = None
class MFnNObjectData(OpenMaya.MFnData):
def __init__(self, *args):
pass
def __repr__(self):
pass
def create(*args, **kwargs):
pass
def getClothObjectPtr(*args, **kwargs):
pass
def getCollide(*args, **kwargs):
pass
def getParticleObjectPtr(*args, **kwargs):
pass
def getRigidObjectPtr(*args, **kwargs):
pass
def isCached(*args, **kwargs):
pass
def setCached(*args, **kwargs):
pass
def setObjectPtr(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
class MnObject(_object):
def __init__(self, *args):
pass
def __repr__(self):
pass
__dict__ = None
__weakref__ = None
thisown = None
__swig_destroy__ = None
class MRenderLineArray(_object):
def __init__(self, *args):
pass
def __repr__(self):
pass
def assign(*args, **kwargs):
pass
def deleteArray(*args, **kwargs):
pass
def length(*args, **kwargs):
pass
def renderLine(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
__dict__ = None
__weakref__ = None
thisown = None
__swig_destroy__ = None
class MRenderLine(_object):
def __init__(self, *args):
pass
def __repr__(self):
pass
def assign(*args, **kwargs):
pass
def getColor(*args, **kwargs):
pass
def getFlatness(*args, **kwargs):
pass
def getIncandescence(*args, **kwargs):
pass
def getLine(*args, **kwargs):
pass
def getParameter(*args, **kwargs):
pass
def getTransparency(*args, **kwargs):
pass
def getTwist(*args, **kwargs):
pass
def getWidth(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
__dict__ = None
__weakref__ = None
thisown = None
__swig_destroy__ = None
class MFnNIdData(OpenMaya.MFnData):
def __init__(self, *args):
pass
def __repr__(self):
pass
def create(*args, **kwargs):
pass
def getObjectPtr(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
class MFnDynSweptGeometryData(OpenMaya.MFnData):
def __init__(self, *args):
pass
def __repr__(self):
pass
def create(*args, **kwargs):
pass
def lineCount(*args, **kwargs):
pass
def sweptLine(*args, **kwargs):
pass
def sweptTriangle(*args, **kwargs):
pass
def triangleCount(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
class MFnInstancer(OpenMaya.MFnDagNode):
def __init__(self, *args):
pass
def __repr__(self):
pass
def allInstances(*args, **kwargs):
pass
def instancesForParticle(*args, **kwargs):
pass
def particleCount(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
class MDynSweptTriangle(_object):
def __init__(self, *args):
pass
def __repr__(self):
pass
def area(*args, **kwargs):
pass
def normal(*args, **kwargs):
pass
def normalToPoint(*args, **kwargs):
pass
def uvPoint(*args, **kwargs):
pass
def vertex(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
__dict__ = None
__weakref__ = None
thisown = None
__swig_destroy__ = None
class MDynamicsUtil(_object):
def __init__(self, *args):
pass
def __repr__(self):
pass
def evalDynamics2dTexture(*args, **kwargs):
pass
def hasValidDynamics2dTexture(*args, **kwargs):
pass
__dict__ = None
__weakref__ = None
thisown = None
__swig_destroy__ = None
class MHairSystem(_object):
def __init__(self, *args):
pass
def __repr__(self):
pass
def className(*args, **kwargs):
pass
def getCollisionObject(*args, **kwargs):
pass
def getFollicle(*args, **kwargs):
pass
def registerCollisionSolverCollide(*args, **kwargs):
pass
def registerCollisionSolverPreFrame(*args, **kwargs):
pass
def registeringCallableScript(*args, **kwargs):
pass
def setRegisteringCallableScript(*args, **kwargs):
pass
def unregisterCollisionSolverCollide(*args, **kwargs):
pass
def unregisterCollisionSolverPreFrame(*args, **kwargs):
pass
__dict__ = None
__weakref__ = None
thisown = None
__swig_destroy__ = None
class MFnPfxGeometry(OpenMaya.MFnDagNode):
def __init__(self, *args):
pass
def __repr__(self):
pass
def getBoundingBox(*args, **kwargs):
pass
def getLineData(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
class MFnFluid(OpenMaya.MFnDagNode):
def __init__(self, *args):
pass
def __repr__(self):
pass
def create2D(*args, **kwargs):
pass
def create3D(*args, **kwargs):
pass
def density(*args, **kwargs):
pass
def emitIntoArrays(*args, **kwargs):
pass
def falloff(*args, **kwargs):
pass
def fuel(*args, **kwargs):
pass
def getColorMode(*args, **kwargs):
pass
def getColors(*args, **kwargs):
pass
def getCoordinateMode(*args, **kwargs):
pass
def getCoordinates(*args, **kwargs):
pass
def getDensityMode(*args, **kwargs):
pass
def getDimensions(*args, **kwargs):
pass
def getFalloffMode(*args, **kwargs):
pass
def getForceAtPoint(*args, **kwargs):
pass
def getFuelMode(*args, **kwargs):
pass
def getResolution(*args, **kwargs):
pass
def getTemperatureMode(*args, **kwargs):
pass
def getVelocity(*args, **kwargs):
pass
def getVelocityMode(*args, **kwargs):
pass
def gridSize(*args, **kwargs):
pass
def index(*args, **kwargs):
pass
def pressure(*args, **kwargs):
pass
def setColorMode(*args, **kwargs):
pass
def setCoordinateMode(*args, **kwargs):
pass
def setDensityMode(*args, **kwargs):
pass
def setFalloffMode(*args, **kwargs):
pass
def setFuelMode(*args, **kwargs):
pass
def setSize(*args, **kwargs):
pass
def setTemperatureMode(*args, **kwargs):
pass
def setVelocityMode(*args, **kwargs):
pass
def temperature(*args, **kwargs):
pass
def toGridIndex(*args, **kwargs):
pass
def updateGrid(*args, **kwargs):
pass
def velocityGridSizes(*args, **kwargs):
pass
def voxelCenterPosition(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
kCenterGradient = 7
kConstant = 0
kDynamicColorGrid = 2
kDynamicGrid = 2
kFixed = 0
kGradient = 3
kGrid = 1
kNegXGradient = 4
kNegYGradient = 5
kNegZGradient = 6
kNoFalloffGrid = 0
kStaticColorGrid = 1
kStaticFalloffGrid = 1
kStaticGrid = 1
kUseShadingColor = 0
kXGradient = 1
kYGradient = 2
kZGradient = 3
kZero = 0
class MFnParticleSystem(OpenMaya.MFnDagNode):
def __init__(self, *args):
pass
def __repr__(self):
pass
def acceleration(*args, **kwargs):
pass
def age(*args, **kwargs):
pass
def betterIllum(*args, **kwargs):
pass
def castsShadows(*args, **kwargs):
pass
def count(*args, **kwargs):
pass
def create(*args, **kwargs):
pass
def deformedParticleShape(*args, **kwargs):
pass
def disableCloudAxis(*args, **kwargs):
pass
def emission(*args, **kwargs):
pass
def emit(*args, **kwargs):
pass
def evaluateDynamics(*args, **kwargs):
pass
def flatShaded(*args, **kwargs):
pass
def getPerParticleAttribute(*args, **kwargs):
pass
def hasEmission(*args, **kwargs):
pass
def hasLifespan(*args, **kwargs):
pass
def hasOpacity(*args, **kwargs):
pass
def hasRgb(*args, **kwargs):
pass
def isDeformedParticleShape(*args, **kwargs):
pass
def isPerParticleDoubleAttribute(*args, **kwargs):
pass
def isPerParticleIntAttribute(*args, **kwargs):
pass
def isPerParticleVectorAttribute(*args, **kwargs):
pass
def isValid(*args, **kwargs):
pass
def lifespan(*args, **kwargs):
pass
def mass(*args, **kwargs):
pass
def opacity(*args, **kwargs):
pass
def originalParticleShape(*args, **kwargs):
pass
def particleIds(*args, **kwargs):
pass
def particleName(*args, **kwargs):
pass
def position(*args, **kwargs):
pass
def position0(*args, **kwargs):
pass
def position1(*args, **kwargs):
pass
def primaryVisibility(*args, **kwargs):
pass
def radius(*args, **kwargs):
pass
def radius0(*args, **kwargs):
pass
def radius1(*args, **kwargs):
pass
def receiveShadows(*args, **kwargs):
pass
def renderType(*args, **kwargs):
pass
def rgb(*args, **kwargs):
pass
def saveInitialState(*args, **kwargs):
pass
def setCount(*args, **kwargs):
pass
def setPerParticleAttribute(*args, **kwargs):
pass
def surfaceShading(*args, **kwargs):
pass
def tailSize(*args, **kwargs):
pass
def threshold(*args, **kwargs):
pass
def velocity(*args, **kwargs):
pass
def visibleInReflections(*args, **kwargs):
pass
def visibleInRefractions(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
kBlobby = 2
kCloud = 0
kMultiPoint = 3
kMultiStreak = 4
kNumeric = 5
kPoints = 6
kSpheres = 7
kSprites = 8
kStreak = 9
kTube = 1
class MFnField(OpenMaya.MFnDagNode):
def __init__(self, *args):
pass
def __repr__(self):
pass
def attenuation(*args, **kwargs):
pass
def falloffCurve(*args, **kwargs):
pass
def getForceAtPoint(*args, **kwargs):
pass
def isFalloffCurveConstantOne(*args, **kwargs):
pass
def magnitude(*args, **kwargs):
pass
def maxDistance(*args, **kwargs):
pass
def perVertex(*args, **kwargs):
pass
def setAttenuation(*args, **kwargs):
pass
def setMagnitude(*args, **kwargs):
pass
def setMaxDistance(*args, **kwargs):
pass
def setPerVertex(*args, **kwargs):
pass
def setUseMaxDistance(*args, **kwargs):
pass
def useMaxDistance(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
class MFnNewtonField(MFnField):
def __init__(self, *args):
pass
def __repr__(self):
pass
def minDistance(*args, **kwargs):
pass
def setMinDistance(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
class MnRigid(MnObject):
def __init__(self, *args):
pass
def __repr__(self):
pass
def createNRigid(*args, **kwargs):
pass
def getBounce(*args, **kwargs):
pass
def getFriction(*args, **kwargs):
pass
def getInverseMass(*args, **kwargs):
pass
def getNumVertices(*args, **kwargs):
pass
def getPositions(*args, **kwargs):
pass
def getThickness(*args, **kwargs):
pass
def getVelocities(*args, **kwargs):
pass
def setBounce(*args, **kwargs):
pass
def setCollisionFlags(*args, **kwargs):
pass
def setFriction(*args, **kwargs):
pass
def setPositions(*args, **kwargs):
pass
def setThickness(*args, **kwargs):
pass
def setTopology(*args, **kwargs):
pass
def setVelocities(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
class MnParticle(MnObject):
def __init__(self, *args):
pass
def __repr__(self):
pass
def createNParticle(*args, **kwargs):
pass
def getBounce(*args, **kwargs):
pass
def getFriction(*args, **kwargs):
pass
def getInverseMass(*args, **kwargs):
pass
def getNumVertices(*args, **kwargs):
pass
def getPositions(*args, **kwargs):
pass
def getThickness(*args, **kwargs):
pass
def getVelocities(*args, **kwargs):
pass
def setBounce(*args, **kwargs):
pass
def setCollide(*args, **kwargs):
pass
def setDamping(*args, **kwargs):
pass
def setDisableGravity(*args, **kwargs):
pass
def setDragAndLift(*args, **kwargs):
pass
def setFriction(*args, **kwargs):
pass
def setIncompressibility(*args, **kwargs):
pass
def setInverseMass(*args, **kwargs):
pass
def setLiquidRadiusScale(*args, **kwargs):
pass
def setLiquidSimulation(*args, **kwargs):
pass
def setMaxIterations(*args, **kwargs):
pass
def setMaxSelfCollisionIterations(*args, **kwargs):
pass
def setPositions(*args, **kwargs):
pass
def setRestDensity(*args, **kwargs):
pass
def setSelfCollide(*args, **kwargs):
pass
def setSelfCollideWidth(*args, **kwargs):
pass
def setSelfCollisionSoftness(*args, **kwargs):
pass
def setSurfaceTension(*args, **kwargs):
pass
def setThickness(*args, **kwargs):
pass
def setTopology(*args, **kwargs):
pass
def setVelocities(*args, **kwargs):
pass
def setViscosity(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
class MnCloth(MnObject):
def __init__(self, *args):
pass
def __repr__(self):
pass
def createNCloth(*args, **kwargs):
pass
def getBounce(*args, **kwargs):
pass
def getFriction(*args, **kwargs):
pass
def getInverseMass(*args, **kwargs):
pass
def getNumVertices(*args, **kwargs):
pass
def getPositions(*args, **kwargs):
pass
def getThickness(*args, **kwargs):
pass
def getVelocities(*args, **kwargs):
pass
def setAddCrossLinks(*args, **kwargs):
pass
def setAirTightness(*args, **kwargs):
pass
def setBendAngleDropoff(*args, **kwargs):
pass
def setBendAngleScale(*args, **kwargs):
pass
def setBendResistance(*args, **kwargs):
pass
def setBendRestAngleFromPositions(*args, **kwargs):
pass
def setBounce(*args, **kwargs):
pass
def setCollisionFlags(*args, **kwargs):
pass
def setComputeRestAngles(*args, **kwargs):
pass
def setComputeRestLength(*args, **kwargs):
pass
def setDamping(*args, **kwargs):
pass
def setDisableGravity(*args, **kwargs):
pass
def setDragAndLift(*args, **kwargs):
pass
def setFriction(*args, **kwargs):
pass
def setIncompressibility(*args, **kwargs):
pass
def setInputMeshAttractAndRigidStrength(*args, **kwargs):
pass
def setInputMeshAttractDamping(*args, **kwargs):
pass
def setInputMeshAttractPositions(*args, **kwargs):
pass
def setInverseMass(*args, **kwargs):
pass
def setLinksRestLengthFromPositions(*args, **kwargs):
pass
def setMaxIterations(*args, **kwargs):
pass
def setMaxSelfCollisionIterations(*args, **kwargs):
pass
def setPositions(*args, **kwargs):
pass
def setPressure(*args, **kwargs):
pass
def setPressureDamping(*args, **kwargs):
pass
def setPumpRate(*args, **kwargs):
pass
def setRestitutionAngle(*args, **kwargs):
pass
def setRestitutionTension(*args, **kwargs):
pass
def setSealHoles(*args, **kwargs):
pass
def setSelfCollideWidth(*args, **kwargs):
pass
def setSelfCollisionFlags(*args, **kwargs):
pass
def setSelfCollisionSoftness(*args, **kwargs):
pass
def setSelfCrossoverPush(*args, **kwargs):
pass
def setSelfTrappedCheck(*args, **kwargs):
pass
def setShearResistance(*args, **kwargs):
pass
def setStartPressure(*args, **kwargs):
pass
def setStretchAndCompressionResistance(*args, **kwargs):
pass
def setTangentialDrag(*args, **kwargs):
pass
def setThickness(*args, **kwargs):
pass
def setTopology(*args, **kwargs):
pass
def setTrackVolume(*args, **kwargs):
pass
def setVelocities(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
class MFnAirField(MFnField):
def __init__(self, *args):
pass
def __repr__(self):
pass
def componentOnly(*args, **kwargs):
pass
def direction(*args, **kwargs):
pass
def enableSpread(*args, **kwargs):
pass
def inheritRotation(*args, **kwargs):
pass
def inheritVelocity(*args, **kwargs):
pass
def setComponentOnly(*args, **kwargs):
pass
def setDirection(*args, **kwargs):
pass
def setEnableSpread(*args, **kwargs):
pass
def setInheritRotation(*args, **kwargs):
pass
def setInheritVelocity(*args, **kwargs):
pass
def setSpeed(*args, **kwargs):
pass
def setSpread(*args, **kwargs):
pass
def speed(*args, **kwargs):
pass
def spread(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
class MFnVolumeAxisField(MFnField):
def __init__(self, *args):
pass
def __repr__(self):
pass
def detailTurbulence(*args, **kwargs):
pass
def direction(*args, **kwargs):
pass
def directionalSpeed(*args, **kwargs):
pass
def invertAttenuation(*args, **kwargs):
pass
def setDirection(*args, **kwargs):
pass
def setDirectionalSpeed(*args, **kwargs):
pass
def setInvertAttenuation(*args, **kwargs):
pass
def setSpeedAlongAxis(*args, **kwargs):
pass
def setSpeedAroundAxis(*args, **kwargs):
pass
def setSpeedAwayFromAxis(*args, **kwargs):
pass
def setSpeedAwayFromCenter(*args, **kwargs):
pass
def setTurbulence(*args, **kwargs):
pass
def setTurbulenceFrequency(*args, **kwargs):
pass
def setTurbulenceOffset(*args, **kwargs):
pass
def setTurbulenceSpeed(*args, **kwargs):
pass
def speedAlongAxis(*args, **kwargs):
pass
def speedAroundAxis(*args, **kwargs):
pass
def speedAwayFromAxis(*args, **kwargs):
pass
def speedAwayFromCenter(*args, **kwargs):
pass
def turbulence(*args, **kwargs):
pass
def turbulenceFrequency(*args, **kwargs):
pass
def turbulenceOffset(*args, **kwargs):
pass
def turbulenceSpeed(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
class MFnVortexField(MFnField):
def __init__(self, *args):
pass
def __repr__(self):
pass
def axis(*args, **kwargs):
pass
def setAxis(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
class MFnGravityField(MFnField):
def __init__(self, *args):
pass
def __repr__(self):
pass
def direction(*args, **kwargs):
pass
def setDirection(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
class MFnRadialField(MFnField):
def __init__(self, *args):
pass
def __repr__(self):
pass
def radialType(*args, **kwargs):
pass
def setType(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
class MFnTurbulenceField(MFnField):
def __init__(self, *args):
pass
def __repr__(self):
pass
def frequency(*args, **kwargs):
pass
def phase(*args, **kwargs):
pass
def setFrequency(*args, **kwargs):
pass
def setPhase(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
class MFnUniformField(MFnField):
def __init__(self, *args):
pass
def __repr__(self):
pass
def direction(*args, **kwargs):
pass
def setDirection(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
class MFnDragField(MFnField):
def __init__(self, *args):
pass
def __repr__(self):
pass
def direction(*args, **kwargs):
pass
def setDirection(*args, **kwargs):
pass
def setUseDirection(*args, **kwargs):
pass
def useDirection(*args, **kwargs):
pass
def className(*args, **kwargs):
pass
thisown = None
__swig_destroy__ = None
def MFnDragField_className(*args, **kwargs):
pass
def MFnInstancer_swigregister(*args, **kwargs):
pass
def MHairSystem_setRegisteringCallableScript(*args, **kwargs):
pass
def MnObject_swigregister(*args, **kwargs):
pass
def MHairSystem_className(*args, **kwargs):
pass
def MFnParticleSystem_className(*args, **kwargs):
pass
def MDynamicsUtil_evalDynamics2dTexture(*args, **kwargs):
pass
def MDynamicsUtil_swigregister(*args, **kwargs):
pass
def MFnVortexField_className(*args, **kwargs):
pass
def MFnVolumeAxisField_className(*args, **kwargs):
pass
def MFnNIdData_swigregister(*args, **kwargs):
pass
def MFnGravityField_className(*args, **kwargs):
pass
def MFnDynSweptGeometryData_swigregister(*args, **kwargs):
pass
def MFnDynSweptGeometryData_className(*args, **kwargs):
pass
def MHairSystem_unregisterCollisionSolverCollide(*args, **kwargs):
pass
def MFnAirField_className(*args, **kwargs):
pass
def MDynSweptTriangle_className(*args, **kwargs):
pass
def _swig_repr(self):
pass
def MnSolver_swigregister(*args, **kwargs):
pass
def MFnInstancer_className(*args, **kwargs):
pass
def MRenderLine_className(*args, **kwargs):
pass
def MnRigid_swigregister(*args, **kwargs):
pass
def MHairSystem_unregisterCollisionSolverPreFrame(*args, **kwargs):
pass
def MFnUniformField_className(*args, **kwargs):
pass
def MFnDragField_swigregister(*args, **kwargs):
pass
def MFnPfxGeometry_swigregister(*args, **kwargs):
pass
def MFnNewtonField_className(*args, **kwargs):
pass
def MFnNewtonField_swigregister(*args, **kwargs):
pass
def weakref_proxy(*args, **kwargs):
"""
proxy(object[, callback]) -- create a proxy object that weakly
references 'object'. 'callback', if given, is called with a
reference to the proxy when 'object' is about to be finalized.
"""
pass
def MRenderLineArray_className(*args, **kwargs):
pass
def MFnNIdData_className(*args, **kwargs):
pass
def MHairSystem_swigregister(*args, **kwargs):
pass
def MFnAirField_swigregister(*args, **kwargs):
pass
def MRenderLine_swigregister(*args, **kwargs):
pass
def MRenderLineArray_swigregister(*args, **kwargs):
pass
def MHairSystem_registerCollisionSolverPreFrame(*args, **kwargs):
pass
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
pass
def MFnNObjectData_className(*args, **kwargs):
pass
def MFnField_className(*args, **kwargs):
pass
def MHairSystem_getCollisionObject(*args, **kwargs):
pass
def MHairSystem_registeringCallableScript(*args, **kwargs):
pass
def MFnTurbulenceField_swigregister(*args, **kwargs):
pass
def MFnFluid_className(*args, **kwargs):
pass
def MFnVortexField_swigregister(*args, **kwargs):
pass
def MnParticle_swigregister(*args, **kwargs):
pass
def MnCloth_swigregister(*args, **kwargs):
pass
def MFnVolumeAxisField_swigregister(*args, **kwargs):
pass
def MHairSystem_registerCollisionSolverCollide(*args, **kwargs):
pass
def MFnUniformField_swigregister(*args, **kwargs):
pass
def MFnParticleSystem_swigregister(*args, **kwargs):
pass
def MDynSweptLine_swigregister(*args, **kwargs):
pass
def MFnTurbulenceField_className(*args, **kwargs):
pass
def MFnRadialField_className(*args, **kwargs):
pass
def MFnPfxGeometry_className(*args, **kwargs):
pass
def MFnField_swigregister(*args, **kwargs):
pass
def MDynSweptLine_className(*args, **kwargs):
pass
def _swig_setattr_nondynamic_method(set):
pass
def MDynSweptTriangle_swigregister(*args, **kwargs):
pass
def MFnRadialField_swigregister(*args, **kwargs):
pass
def _swig_setattr(self, class_type, name, value):
pass
def MFnGravityField_swigregister(*args, **kwargs):
pass
def MFnFluid_swigregister(*args, **kwargs):
pass
def MHairSystem_getFollicle(*args, **kwargs):
pass
def MDynamicsUtil_hasValidDynamics2dTexture(*args, **kwargs):
pass
def MFnNObjectData_swigregister(*args, **kwargs):
pass
def _swig_getattr(self, class_type, name):
pass
_newclass = 1
| bsd-3-clause |
philchristensen/antioch | bin/update_zappa_settings.py | 1 | 1034 | #!/usr/bin/env python
import json
import boto3
cfn = boto3.client('cloudformation')
paginator = cfn.get_paginator('describe_stacks')
pages = paginator.paginate(StackName="antioch-prod")
details = {}
for page in pages:
for stack in page['Stacks']:
for output in stack['Outputs']:
details[output['OutputKey']] = output['OutputValue']
with(open('zappa_settings.json')) as f:
cfg = json.loads(f.read())
cfg['prod']['vpc_config']['SecurityGroupIds'] = [details['WebappSecurityGroup']]
cfg['prod']['vpc_config']['SubnetIds'] = [
details['PrivateSubnet1AID'],
details['PrivateSubnet2AID'],
details['PrivateSubnet3AID']
]
cfg['prod']['environment_variables']['DB_HOST'] = details['DatabaseHost']
cfg['prod']['environment_variables']['DB_PORT'] = details['DatabasePort']
cfg['prod']['environment_variables']['STATIC_BUCKET'] = details['StaticBucketName']
with(open('zappa_settings.json', 'w')) as f:
f.write(json.dumps(cfg, indent=4))
print("Updated zappa_settings.json with stack variables.") | mit |
datalogics/scons | test/Builder/non-multi.py | 2 | 1830 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that a builder without "multi" set can still be called multiple
times if the calls are the same.
"""
import TestSCons
test = TestSCons.TestSCons(match=TestSCons.match_re)
test.write('SConstruct', """
def build(env, target, source):
file = open(str(target[0]), 'wb')
for s in source:
file.write(open(str(s), 'rb').read())
B = Builder(action=build, multi=0)
env = Environment(BUILDERS = { 'B' : B })
env.B(target = 'file7.out', source = 'file7.in')
env.B(target = 'file7.out', source = 'file7.in')
""")
test.write('file7.in', 'file7.in\n')
test.run(arguments='file7.out')
test.must_match('file7.out', "file7.in\n")
test.pass_test()
| mit |
prodromou87/gem5 | ext/ply/test/yacc_badargs.py | 174 | 1528 | # -----------------------------------------------------------------------------
# yacc_badargs.py
#
# Rules with wrong # args
# -----------------------------------------------------------------------------
import sys
sys.tracebacklimit = 0
sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t,s):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr():
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
| bsd-3-clause |
doirisks/dori | models/10.1161:CIRCULATIONAHA.107.699579/model_b.py | 1 | 2564 | """
model_b.py
by Ted Morin
contains a function to predict 10-year Coronary Heart Disease risks using model from
10.1161/CIRCULATIONAHA.107.699579
General Cardiovascular Risk Profile for Use in Primary Care
Framingham Heart Study
function expects parameters of
"ismale" "antihypertensive medication use" "age" "total colesterol" "HDL cholesterol" "SBD" "Smoking" "Diabetes"
years mg/dL mg/dL mm Hg
bool bool int/float int/float int/float int/float bool bool
"""
def model(ismale,antihyp,age,totchol,hdlchol,sbp, smoke,diabet):
# imports
import numpy as np
# beta values
female_betas = np.array([
2.32888, #"Log of age"
1.20904, #"Log of total cholesterol"
-0.70833, #"Log of HDL cholesterol"
2.76157, #"Log of SBP if not treated"
2.82263, #"Log of SBP if treated"
0.52873, #"Smoking"
0.69154 #"Diabetes"
])
female_s0 = 0.95012
female_xbar = 26.1931
male_betas = np.array([
3.06117, #"Log of age"
1.12370, #"Log of total cholesterol"
-0.93263, #"Log of HDL cholesterol"
1.93303, #"Log of SBD if not treated"
1.99881, #"Log of SBD if treated"
0.65451, #"Smoking"
0.57367 #"Diabetes"
])
male_s0 = 0.88936
male_xbar = 23.9802
#determine which beta values should be used
betas = None
# accounts for gender
if ismale :
betas = male_betas
s0 = male_s0
xbar = male_xbar
else :
betas = female_betas
s0 = female_s0
xbar = female_xbar
# transforms input values logarithmically
age = np.log(age)
totchol = np.log(totchol)
hdlchol = np.log(hdlchol)
sbp = np.log(sbp)
# accounts for antihypertensive medication
if antihyp:
sbp_antihyp = sbp
sbp_noantihyp = 0.
else :
sbp_antihyp = 0.
sbp_noantihyp = sbp
# builds a numpy array of the values
values = np.array([age,totchol,hdlchol,sbp_noantihyp,sbp_antihyp,smoke,diabet])
# dots the betas and the values
value = np.dot(betas,values)
# returns the cox-calculated value
cvd = 1 - np.power(s0,np.exp(value - xbar))
male_calib_factor = 0.7174
female_calib_factor = 0.6086
if ismale:
return cvd * male_calib_factor
else :
return cvd * female_calib_factor
| gpl-3.0 |
maxamillion/ansible | test/units/plugins/strategy/test_linear.py | 58 | 6626 | # Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from units.compat.mock import patch, MagicMock
from ansible.executor.play_iterator import PlayIterator
from ansible.playbook import Playbook
from ansible.playbook.play_context import PlayContext
from ansible.plugins.strategy.linear import StrategyModule
from ansible.executor.task_queue_manager import TaskQueueManager
from units.mock.loader import DictDataLoader
from units.mock.path import mock_unfrackpath_noop
class TestStrategyLinear(unittest.TestCase):
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_noop(self):
fake_loader = DictDataLoader({
"test_play.yml": """
- hosts: all
gather_facts: no
tasks:
- block:
- block:
- name: task1
debug: msg='task1'
failed_when: inventory_hostname == 'host01'
- name: task2
debug: msg='task2'
rescue:
- name: rescue1
debug: msg='rescue1'
- name: rescue2
debug: msg='rescue2'
""",
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
inventory = MagicMock()
inventory.hosts = {}
hosts = []
for i in range(0, 2):
host = MagicMock()
host.name = host.get_name.return_value = 'host%02d' % i
hosts.append(host)
inventory.hosts[host.name] = host
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
mock_var_manager._fact_cache['host00'] = dict()
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
tqm = TaskQueueManager(
inventory=inventory,
variable_manager=mock_var_manager,
loader=fake_loader,
passwords=None,
forks=5,
)
tqm._initialize_processes(3)
strategy = StrategyModule(tqm)
strategy._hosts_cache = [h.name for h in hosts]
strategy._hosts_cache_all = [h.name for h in hosts]
# implicit meta: flush_handlers
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'meta')
# debug: task1, debug: task1
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'debug')
self.assertEqual(host2_task.action, 'debug')
self.assertEqual(host1_task.name, 'task1')
self.assertEqual(host2_task.name, 'task1')
# mark the second host failed
itr.mark_host_failed(hosts[1])
# debug: task2, meta: noop
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'debug')
self.assertEqual(host2_task.action, 'meta')
self.assertEqual(host1_task.name, 'task2')
self.assertEqual(host2_task.name, '')
# meta: noop, debug: rescue1
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'debug')
self.assertEqual(host1_task.name, '')
self.assertEqual(host2_task.name, 'rescue1')
# meta: noop, debug: rescue2
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'debug')
self.assertEqual(host1_task.name, '')
self.assertEqual(host2_task.name, 'rescue2')
# implicit meta: flush_handlers
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'meta')
# implicit meta: flush_handlers
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'meta')
# end of iteration
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNone(host1_task)
self.assertIsNone(host2_task)
| gpl-3.0 |
luvit/gyp | test/win/gyptest-link-base-address.py | 137 | 1816 | #!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure the base address setting is extracted properly.
"""
import TestGyp
import re
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('base-address.gyp', chdir=CHDIR)
test.build('base-address.gyp', test.ALL, chdir=CHDIR)
def GetHeaders(exe):
full_path = test.built_file_path(exe, chdir=CHDIR)
return test.run_dumpbin('/headers', full_path)
# Extract the image base address from the headers output.
image_base_reg_ex = re.compile('.*\s+([0-9]+) image base.*', re.DOTALL)
exe_headers = GetHeaders('test_base_specified_exe.exe')
exe_match = image_base_reg_ex.match(exe_headers)
if not exe_match or not exe_match.group(1):
test.fail_test()
if exe_match.group(1) != '420000':
test.fail_test()
dll_headers = GetHeaders('test_base_specified_dll.dll')
dll_match = image_base_reg_ex.match(dll_headers)
if not dll_match or not dll_match.group(1):
test.fail_test()
if dll_match.group(1) != '10420000':
test.fail_test()
default_exe_headers = GetHeaders('test_base_default_exe.exe')
default_exe_match = image_base_reg_ex.match(default_exe_headers)
if not default_exe_match or not default_exe_match.group(1):
test.fail_test()
if default_exe_match.group(1) != '400000':
test.fail_test()
default_dll_headers = GetHeaders('test_base_default_dll.dll')
default_dll_match = image_base_reg_ex.match(default_dll_headers)
if not default_dll_match or not default_dll_match.group(1):
test.fail_test()
if default_dll_match.group(1) != '10000000':
test.fail_test()
test.pass_test()
| bsd-3-clause |
mpdehaan/certmaster | setup.py | 1 | 2394 | #!/usr/bin/python
from distutils.core import setup
#from setuptools import setup,find_packages
NAME = "certmaster"
VERSION = "0.24"
SHORT_DESC = "%s remote configuration and management api" % NAME
LONG_DESC = """
A small pluggable xml-rpc daemon used by %s to implement various web services hooks
""" % NAME
if __name__ == "__main__":
manpath = "share/man/man1/"
etcpath = "/etc/%s" % NAME
initpath = "/etc/init.d/"
logpath = "/var/log/%s/" % NAME
certdir = "/var/lib/%s/" % NAME
trigpath = "/var/lib/%s/triggers/"% NAME
pkipath = "/etc/pki/%s" % NAME
rotpath = "/etc/logrotate.d"
aclpath = "%s/minion-acl.d" % etcpath
setup(
name="%s" % NAME,
version = VERSION,
author = "Lots",
author_email = "func-list@redhat.com",
url = "https://fedorahosted.org/certmaster/",
license = "GPL",
scripts = [
"scripts/certmaster", "scripts/certmaster-ca",
"scripts/certmaster-request",
],
# package_data = { '' : ['*.*'] },
package_dir = {"%s" % NAME: "%s" % NAME
},
packages = ["%s" % NAME,
],
data_files = [(initpath, ["init-scripts/certmaster"]),
(etcpath, ["etc/minion.conf"]),
(etcpath, ["etc/certmaster.conf"]),
(manpath, ["docs/certmaster.1.gz"]),
(manpath, ["docs/certmaster-request.1.gz"]),
(manpath, ["docs/certmaster-ca.1.gz"]),
(rotpath, ['etc/certmaster_rotate']),
(logpath, []),
(certdir, []),
(etcpath, []),
(pkipath, []),
(aclpath, []),
("%s/sign/pre/" % trigpath, []),
("%s/sign/post/" % trigpath, []),
("%s/remove/pre/" % trigpath, []),
("%s/remove/post/" % trigpath, []),
("%s/request/pre/" % trigpath, []),
("%s/request/post/" % trigpath, []),
],
description = SHORT_DESC,
long_description = LONG_DESC
)
| gpl-2.0 |
chirilo/mozillians | mozillians/mozspaces/admin.py | 8 | 1676 | from django import forms
from django.contrib import admin
from django.core.urlresolvers import reverse
import autocomplete_light
from import_export.admin import ExportMixin
from product_details import product_details
from sorl.thumbnail.admin import AdminImageMixin
from models import Keyword, MozSpace, Photo
class KeywordAdmin(admin.StackedInline):
""" Keyword Inline Admin."""
model = Keyword
class PhotoAdmin(AdminImageMixin, admin.StackedInline):
""" Photo Inline Admin."""
model = Photo
class MozSpaceAdminForm(forms.ModelForm):
"""MozSpace Admin Form."""
def __init__(self, *args, **kwargs):
super(MozSpaceAdminForm, self).__init__(*args, **kwargs)
queryset = Photo.objects.none()
if 'instance' in kwargs:
queryset = Photo.objects.filter(mozspace__id=kwargs['instance'].id)
self.fields['cover_photo'].queryset = queryset
class meta:
model = MozSpace
class MozSpaceAdmin(ExportMixin, admin.ModelAdmin):
form = MozSpaceAdminForm
inlines = [PhotoAdmin, KeywordAdmin]
search_fields = ['name']
list_display = ['name', 'city', 'country', 'coordinator_link']
form = autocomplete_light.modelform_factory(MozSpace)
def coordinator_link(self, obj):
url = reverse('admin:auth_user_change', args=[obj.id])
full_name = obj.coordinator.userprofile.full_name
return u'<a href="%s">%s</a>' % (url, full_name)
coordinator_link.allow_tags = True
coordinator_link.short_description = 'coordinator'
def country(self, obj):
return product_details.get_regions('en-US')[obj.country]
admin.site.register(MozSpace, MozSpaceAdmin)
| bsd-3-clause |
thomaslima/PySpice | PySpice/Physics/PhysicalConstants.py | 1 | 2887 | # -*- coding: utf-8 -*-
####################################################################################################
#
# PySpice - A Spice Package for Python
# Copyright (C) 2014 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
#
# Physical Constants from Particle Data Group 2013
# http://pdg.lbl.gov/2013/reviews/rpp2013-rev-phys-constants.pdf
#
####################################################################################################
####################################################################################################
pi = 3.141592653589793238 # π = 3.141 592 653 589 793 238
####################################################################################################
speed_of_light_in_vacuum = c = 299792458 # 299 792 458 m s−1
electron_charge_magnitude = e = q = 1.602176565e-19 # 1.602 176 565(35)×10−19 C = 4.803 204 50(11)×10−10 esu
permeability_of_free_space = mu0 = 4*pi*1e-7 # 4π × 10−7 N A−2 = 12.566 370 614 ... ×10−7 N A−2
permittivity_of_free_space = epsilon0 = 1./(mu0*c**2) # 8.854 187 817 ... ×10−12 F m −1
avogadro_constant = Na = 6.02214129e23 # 6.022 141 29(27)×1023 mol−1
boltzmann_constant = k = 1.3806488e-23 # 1.380 6488(13)×10−23 J K−1 = 8.617 3324(78)×10−5 eV K−1
# 1 eV = 1.602 176 565(35) × 10−19 J
# 1 eV/c2 = 1.782 661 845(39) × 10−36 kg
####################################################################################################
# 0 ◦C ≡ 273.15 K
def degree_to_kelvin(x):
return x + 273.15
def kelvin_to_degree(x):
return x - 273.15
def temperature(degree=None, kelvin=None):
if degree is not None:
return degree_to_kelvin(degree)
else:
return kelvin
# kT at 300 K = [38.681 731(35)]−1 eV
def kT(degree=None, kelvin=None):
return k*temperature(degree=degree, kelvin=kelvin)
####################################################################################################
#
# End
#
####################################################################################################
| gpl-3.0 |
gavinelliott/gyspadmin | node_modules/node-gyp/gyp/pylib/gyp/MSVSToolFile.py | 2736 | 1804 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
| mit |
LIKAIMO/MissionPlanner | Lib/lib2to3/fixes/fix_exitfunc.py | 57 | 2577 | """
Convert use of sys.exitfunc to use the atexit module.
"""
# Author: Benjamin Peterson
from lib2to3 import pytree, fixer_base
from lib2to3.fixer_util import Name, Attr, Call, Comma, Newline, syms
class FixExitfunc(fixer_base.BaseFix):
keep_line_order = True
BM_compatible = True
PATTERN = """
(
sys_import=import_name<'import'
('sys'
|
dotted_as_names< (any ',')* 'sys' (',' any)* >
)
>
|
expr_stmt<
power< 'sys' trailer< '.' 'exitfunc' > >
'=' func=any >
)
"""
def __init__(self, *args):
super(FixExitfunc, self).__init__(*args)
def start_tree(self, tree, filename):
super(FixExitfunc, self).start_tree(tree, filename)
self.sys_import = None
def transform(self, node, results):
# First, find a the sys import. We'll just hope it's global scope.
if "sys_import" in results:
if self.sys_import is None:
self.sys_import = results["sys_import"]
return
func = results["func"].clone()
func.prefix = u""
register = pytree.Node(syms.power,
Attr(Name(u"atexit"), Name(u"register"))
)
call = Call(register, [func], node.prefix)
node.replace(call)
if self.sys_import is None:
# That's interesting.
self.warning(node, "Can't find sys import; Please add an atexit "
"import at the top of your file.")
return
# Now add an atexit import after the sys import.
names = self.sys_import.children[1]
if names.type == syms.dotted_as_names:
names.append_child(Comma())
names.append_child(Name(u"atexit", u" "))
else:
containing_stmt = self.sys_import.parent
position = containing_stmt.children.index(self.sys_import)
stmt_container = containing_stmt.parent
new_import = pytree.Node(syms.import_name,
[Name(u"import"), Name(u"atexit", u" ")]
)
new = pytree.Node(syms.simple_stmt, [new_import])
containing_stmt.insert_child(position + 1, Newline())
containing_stmt.insert_child(position + 2, new)
| gpl-3.0 |
agconti/njode | env/lib/python2.7/site-packages/django/contrib/auth/tests/test_hashers.py | 38 | 14783 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import skipUnless
from django.conf.global_settings import PASSWORD_HASHERS as default_hashers
from django.contrib.auth.hashers import (is_password_usable, BasePasswordHasher,
check_password, make_password, PBKDF2PasswordHasher, load_hashers, PBKDF2SHA1PasswordHasher,
get_hasher, identify_hasher, UNUSABLE_PASSWORD_PREFIX, UNUSABLE_PASSWORD_SUFFIX_LENGTH)
from django.test import SimpleTestCase
from django.utils import six
try:
import crypt
except ImportError:
crypt = None
try:
import bcrypt
except ImportError:
bcrypt = None
class PBKDF2SingleIterationHasher(PBKDF2PasswordHasher):
iterations = 1
class TestUtilsHashPass(SimpleTestCase):
def setUp(self):
load_hashers(password_hashers=default_hashers)
def test_simple(self):
encoded = make_password('lètmein')
self.assertTrue(encoded.startswith('pbkdf2_sha256$'))
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
# Blank passwords
blank_encoded = make_password('')
self.assertTrue(blank_encoded.startswith('pbkdf2_sha256$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
def test_pkbdf2(self):
encoded = make_password('lètmein', 'seasalt', 'pbkdf2_sha256')
self.assertEqual(encoded,
'pbkdf2_sha256$12000$seasalt$Ybw8zsFxqja97tY/o6G+Fy1ksY4U/Hw3DRrGED6Up4s=')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "pbkdf2_sha256")
# Blank passwords
blank_encoded = make_password('', 'seasalt', 'pbkdf2_sha256')
self.assertTrue(blank_encoded.startswith('pbkdf2_sha256$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
def test_sha1(self):
encoded = make_password('lètmein', 'seasalt', 'sha1')
self.assertEqual(encoded,
'sha1$seasalt$cff36ea83f5706ce9aa7454e63e431fc726b2dc8')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "sha1")
# Blank passwords
blank_encoded = make_password('', 'seasalt', 'sha1')
self.assertTrue(blank_encoded.startswith('sha1$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
def test_md5(self):
encoded = make_password('lètmein', 'seasalt', 'md5')
self.assertEqual(encoded,
'md5$seasalt$3f86d0d3d465b7b458c231bf3555c0e3')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "md5")
# Blank passwords
blank_encoded = make_password('', 'seasalt', 'md5')
self.assertTrue(blank_encoded.startswith('md5$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
def test_unsalted_md5(self):
encoded = make_password('lètmein', '', 'unsalted_md5')
self.assertEqual(encoded, '88a434c88cca4e900f7874cd98123f43')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_md5")
# Alternate unsalted syntax
alt_encoded = "md5$$%s" % encoded
self.assertTrue(is_password_usable(alt_encoded))
self.assertTrue(check_password('lètmein', alt_encoded))
self.assertFalse(check_password('lètmeinz', alt_encoded))
# Blank passwords
blank_encoded = make_password('', '', 'unsalted_md5')
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
def test_unsalted_sha1(self):
encoded = make_password('lètmein', '', 'unsalted_sha1')
self.assertEqual(encoded, 'sha1$$6d138ca3ae545631b3abd71a4f076ce759c5700b')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_sha1")
# Raw SHA1 isn't acceptable
alt_encoded = encoded[6:]
self.assertFalse(check_password('lètmein', alt_encoded))
# Blank passwords
blank_encoded = make_password('', '', 'unsalted_sha1')
self.assertTrue(blank_encoded.startswith('sha1$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(crypt, "no crypt module to generate password.")
def test_crypt(self):
encoded = make_password('lètmei', 'ab', 'crypt')
self.assertEqual(encoded, 'crypt$$ab1Hv2Lg7ltQo')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmei', encoded))
self.assertFalse(check_password('lètmeiz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "crypt")
# Blank passwords
blank_encoded = make_password('', 'ab', 'crypt')
self.assertTrue(blank_encoded.startswith('crypt$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(bcrypt, "bcrypt not installed")
def test_bcrypt_sha256(self):
encoded = make_password('lètmein', hasher='bcrypt_sha256')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(encoded.startswith('bcrypt_sha256$'))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt_sha256")
# Verify that password truncation no longer works
password = ('VSK0UYV6FFQVZ0KG88DYN9WADAADZO1CTSIVDJUNZSUML6IBX7LN7ZS3R5'
'JGB3RGZ7VI7G7DJQ9NI8BQFSRPTG6UWTTVESA5ZPUN')
encoded = make_password(password, hasher='bcrypt_sha256')
self.assertTrue(check_password(password, encoded))
self.assertFalse(check_password(password[:72], encoded))
# Blank passwords
blank_encoded = make_password('', hasher='bcrypt_sha256')
self.assertTrue(blank_encoded.startswith('bcrypt_sha256$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(bcrypt, "bcrypt not installed")
def test_bcrypt(self):
encoded = make_password('lètmein', hasher='bcrypt')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(encoded.startswith('bcrypt$'))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt")
# Blank passwords
blank_encoded = make_password('', hasher='bcrypt')
self.assertTrue(blank_encoded.startswith('bcrypt$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
def test_unusable(self):
encoded = make_password(None)
self.assertEqual(len(encoded), len(UNUSABLE_PASSWORD_PREFIX) + UNUSABLE_PASSWORD_SUFFIX_LENGTH)
self.assertFalse(is_password_usable(encoded))
self.assertFalse(check_password(None, encoded))
self.assertFalse(check_password(encoded, encoded))
self.assertFalse(check_password(UNUSABLE_PASSWORD_PREFIX, encoded))
self.assertFalse(check_password('', encoded))
self.assertFalse(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertRaises(ValueError, identify_hasher, encoded)
# Assert that the unusable passwords actually contain a random part.
# This might fail one day due to a hash collision.
self.assertNotEqual(encoded, make_password(None), "Random password collision?")
def test_unspecified_password(self):
"""
Makes sure specifying no plain password with a valid encoded password
returns `False`.
"""
self.assertFalse(check_password(None, make_password('lètmein')))
def test_bad_algorithm(self):
with self.assertRaises(ValueError):
make_password('lètmein', hasher='lolcat')
self.assertRaises(ValueError, identify_hasher, "lolcat$salt$hash")
def test_bad_encoded(self):
self.assertFalse(is_password_usable('lètmein_badencoded'))
self.assertFalse(is_password_usable(''))
def test_low_level_pkbdf2(self):
hasher = PBKDF2PasswordHasher()
encoded = hasher.encode('lètmein', 'seasalt2')
self.assertEqual(encoded,
'pbkdf2_sha256$12000$seasalt2$hlDLKsxgkgb1aeOppkM5atCYw5rPzAjCNQZ4NYyUROw=')
self.assertTrue(hasher.verify('lètmein', encoded))
def test_low_level_pbkdf2_sha1(self):
hasher = PBKDF2SHA1PasswordHasher()
encoded = hasher.encode('lètmein', 'seasalt2')
self.assertEqual(encoded,
'pbkdf2_sha1$12000$seasalt2$JeMRVfjjgtWw3/HzlnlfqBnQ6CA=')
self.assertTrue(hasher.verify('lètmein', encoded))
def test_upgrade(self):
self.assertEqual('pbkdf2_sha256', get_hasher('default').algorithm)
for algo in ('sha1', 'md5'):
encoded = make_password('lètmein', hasher=algo)
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
self.assertTrue(check_password('lètmein', encoded, setter))
self.assertTrue(state['upgraded'])
def test_no_upgrade(self):
encoded = make_password('lètmein')
state = {'upgraded': False}
def setter():
state['upgraded'] = True
self.assertFalse(check_password('WRONG', encoded, setter))
self.assertFalse(state['upgraded'])
def test_no_upgrade_on_incorrect_pass(self):
self.assertEqual('pbkdf2_sha256', get_hasher('default').algorithm)
for algo in ('sha1', 'md5'):
encoded = make_password('lètmein', hasher=algo)
state = {'upgraded': False}
def setter():
state['upgraded'] = True
self.assertFalse(check_password('WRONG', encoded, setter))
self.assertFalse(state['upgraded'])
def test_pbkdf2_upgrade(self):
self.assertEqual('pbkdf2_sha256', get_hasher('default').algorithm)
hasher = get_hasher('default')
self.assertNotEqual(hasher.iterations, 1)
old_iterations = hasher.iterations
try:
# Generate a password with 1 iteration.
hasher.iterations = 1
encoded = make_password('letmein')
algo, iterations, salt, hash = encoded.split('$', 3)
self.assertEqual(iterations, '1')
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
# Check that no upgrade is triggerd
self.assertTrue(check_password('letmein', encoded, setter))
self.assertFalse(state['upgraded'])
# Revert to the old iteration count and ...
hasher.iterations = old_iterations
# ... check if the password would get updated to the new iteration count.
self.assertTrue(check_password('letmein', encoded, setter))
self.assertTrue(state['upgraded'])
finally:
hasher.iterations = old_iterations
def test_pbkdf2_upgrade_new_hasher(self):
self.assertEqual('pbkdf2_sha256', get_hasher('default').algorithm)
hasher = get_hasher('default')
self.assertNotEqual(hasher.iterations, 1)
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
with self.settings(PASSWORD_HASHERS=[
'django.contrib.auth.tests.test_hashers.PBKDF2SingleIterationHasher']):
encoded = make_password('letmein')
algo, iterations, salt, hash = encoded.split('$', 3)
self.assertEqual(iterations, '1')
# Check that no upgrade is triggerd
self.assertTrue(check_password('letmein', encoded, setter))
self.assertFalse(state['upgraded'])
# Revert to the old iteration count and check if the password would get
# updated to the new iteration count.
with self.settings(PASSWORD_HASHERS=[
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.tests.test_hashers.PBKDF2SingleIterationHasher']):
self.assertTrue(check_password('letmein', encoded, setter))
self.assertTrue(state['upgraded'])
def test_load_library_no_algorithm(self):
with self.assertRaises(ValueError) as e:
BasePasswordHasher()._load_library()
self.assertEqual("Hasher 'BasePasswordHasher' doesn't specify a "
"library attribute", str(e.exception))
def test_load_library_importerror(self):
PlainHasher = type(str('PlainHasher'), (BasePasswordHasher,),
{'algorithm': 'plain', 'library': 'plain'})
# Python 3.3 adds quotes around module name
with six.assertRaisesRegex(self, ValueError,
"Couldn't load 'PlainHasher' algorithm library: No module named '?plain'?"):
PlainHasher()._load_library()
| bsd-3-clause |
YuwenXiong/py-R-FCN | lib/pycocotools/coco.py | 16 | 14881 | __author__ = 'tylin'
__version__ = '1.0.1'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# segToMask - Convert polygon segmentation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>segToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import datetime
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
from skimage.draw import polygon
import urllib
import copy
import itertools
import mask
import os
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset = {}
self.anns = []
self.imgToAnns = {}
self.catToImgs = {}
self.imgs = {}
self.cats = {}
if not annotation_file == None:
print 'loading annotations into memory...'
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
print 'Done (t=%0.2fs)'%(time.time()- tic)
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print 'creating index...'
anns = {}
imgToAnns = {}
catToImgs = {}
cats = {}
imgs = {}
if 'annotations' in self.dataset:
imgToAnns = {ann['image_id']: [] for ann in self.dataset['annotations']}
anns = {ann['id']: [] for ann in self.dataset['annotations']}
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']] += [ann]
anns[ann['id']] = ann
if 'images' in self.dataset:
imgs = {im['id']: {} for im in self.dataset['images']}
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
cats = {cat['id']: [] for cat in self.dataset['categories']}
for cat in self.dataset['categories']:
cats[cat['id']] = cat
catToImgs = {cat['id']: [] for cat in self.dataset['categories']}
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']] += [ann['image_id']]
print 'index created!'
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print '%s: %s'%(key, value)
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
# this can be changed by defaultdict
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
if datasetType == 'instances':
ax = plt.gca()
polygons = []
color = []
for ann in anns:
c = np.random.random((1, 3)).tolist()[0]
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly, True,alpha=0.4))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = mask.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print ann['caption']
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
# res.dataset['info'] = copy.deepcopy(self.dataset['info'])
# res.dataset['licenses'] = copy.deepcopy(self.dataset['licenses'])
print 'Loading and preparing results... '
tic = time.time()
anns = json.load(open(resFile))
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = mask.area([ann['segmentation']])[0]
if not 'bbox' in ann:
ann['bbox'] = mask.toBbox([ann['segmentation']])[0]
ann['id'] = id+1
ann['iscrowd'] = 0
print 'DONE (t=%0.2fs)'%(time.time()- tic)
res.dataset['annotations'] = anns
res.createIndex()
return res
def download( self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print 'Please specify target directory'
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urllib.urlretrieve(img['coco_url'], fname)
print 'downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic)
| mit |
Bismarrck/tensorflow | tensorflow/compiler/tests/pooling_ops_test.py | 22 | 20324 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for pooling operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
def NHWCToNCHW(input_tensor):
"""Convert the input from NHWC format to NCHW.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, ops.Tensor):
return array_ops.transpose(input_tensor, [0, 3, 1, 2])
else:
return [input_tensor[0], input_tensor[3], input_tensor[1], input_tensor[2]]
def NCHWToNHWC(input_tensor):
"""Convert the input from NCHW format to NHWC.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, ops.Tensor):
return array_ops.transpose(input_tensor, [0, 2, 3, 1])
else:
return [input_tensor[0], input_tensor[2], input_tensor[3], input_tensor[1]]
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs
"""
test_configs = ["NHWC", "NCHW"]
return test_configs
class PoolingTest(xla_test.XLATestCase):
def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding,
data_format, expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, currently only co.MaxPool.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
expected: An array containing the expected operation outputs.
"""
total_size = np.prod(input_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = np.array([f * 1.0 for f in range(1, total_size + 1)], dtype=np.float32)
x = x.reshape(input_sizes)
with self.cached_session() as sess:
with self.test_scope():
inputs = array_ops.placeholder(dtypes.float32)
t = inputs
if data_format == "NCHW":
t = NHWCToNCHW(t)
ksize = NHWCToNCHW(ksize)
strides = NHWCToNCHW(strides)
t = pool_func(t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
t = NCHWToNHWC(t)
actual = sess.run(t, {inputs: x})
self.assertAllClose(expected, actual.flatten(), rtol=1e-5, atol=1e-6)
def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding,
expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
for data_format in GetTestConfigs():
self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding,
data_format, expected)
def testMaxPoolValidPadding(self):
expected_output = [13.0, 14.0, 15.0]
self._VerifyValues(nn_ops.max_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output)
def testMaxPoolSamePadding(self):
expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0]
self._VerifyValues(nn_ops.max_pool,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
def testMaxPoolSamePaddingNonSquareWindow(self):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
#
# [max(1.0, 2.0), max(2.0, padded0),
# max(3.0, 4.0), max(4.0, padded0)]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 2.0, 4.0, 4.0])
def testMaxPoolValidPaddingUnevenStride(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0])
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0])
def testMaxPoolSamePaddingFilter4(self):
expected_output = [
21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0, 54.0, 55.0, 56.0,
61.0, 62.0, 63.0, 64.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
def testMaxPoolSamePaddingFilter8(self):
expected_output = [
145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0, 161.0, 162.0,
163.0, 164.0, 165.0, 166.0, 167.0, 168.0, 177.0, 178.0, 179.0, 180.0,
181.0, 182.0, 183.0, 184.0, 185.0, 186.0, 187.0, 188.0, 189.0, 190.0,
191.0, 192.0, 273.0, 274.0, 275.0, 276.0, 277.0, 278.0, 279.0, 280.0,
289.0, 290.0, 291.0, 292.0, 293.0, 294.0, 295.0, 296.0, 305.0, 306.0,
307.0, 308.0, 309.0, 310.0, 311.0, 312.0, 313.0, 314.0, 315.0, 316.0,
317.0, 318.0, 319.0, 320.0, 401.0, 402.0, 403.0, 404.0, 405.0, 406.0,
407.0, 408.0, 417.0, 418.0, 419.0, 420.0, 421.0, 422.0, 423.0, 424.0,
433.0, 434.0, 435.0, 436.0, 437.0, 438.0, 439.0, 440.0, 441.0, 442.0,
443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 465.0, 466.0, 467.0, 468.0,
469.0, 470.0, 471.0, 472.0, 481.0, 482.0, 483.0, 484.0, 485.0, 486.0,
487.0, 488.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0, 503.0, 504.0,
505.0, 506.0, 507.0, 508.0, 509.0, 510.0, 511.0, 512.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
# Tests for DepthwiseMaxPooling on CPU only.
def testDepthwiseMaxPool1x1DepthWindow1(self):
# input is:
# [1.0, ..., 10.0] along depth,
#
# We maxpool by depth in patches of 2.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 1, 1, 10],
ksize=[1, 1, 1, 2],
strides=[1, 1, 1, 2],
padding="SAME",
expected=[2.0, 4.0, 6.0, 8.0, 10.0])
def testDepthwiseMaxPool2x2DepthWindow3(self):
# input is:
#
# a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2
# output. Each node has contiguous values, so the depthwise max
# should be multiples of 3.0.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 6],
ksize=[1, 1, 1, 3],
strides=[1, 1, 1, 3],
padding="SAME",
expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0])
def testKernelSmallerThanStrideValid(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID",
expected=[9, 12, 30, 33])
def testKernelSmallerThanStrideSame(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 7, 9])
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 9, 11])
# Average pooling
def testAvgPoolValidPadding(self):
expected_output = [7, 8, 9]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output)
def testAvgPoolSamePadding(self):
expected_output = [7., 8., 9., 11.5, 12.5, 13.5]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
class PoolGradTest(xla_test.XLATestCase):
CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
def _VerifyOneTest(self,
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
data_format,
pool_grad_grad_func=None):
"""Verifies the output values of the pooling gradient function.
Args:
pool_func: Forward pooling function
pool_grad_func: Pooling gradient function for pool_grad_func
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
pool_grad_grad_func: Second-order gradient function, if available.
"""
total_size = np.prod(input_sizes)
# TODO(b/73062247): MaxPoolGradGrad can confuse gradients when x is equally
# maximal at 16 bits. Switch to np.random.randn when resolved.
x = np.arange(1, total_size + 1, dtype=np.float32)
x *= (np.random.randint(2, size=total_size) * 2 - 1) # Flip signs randomly
# Verify some specifically interesting values...
x[np.random.choice(total_size)] = np.inf
x[np.random.choice(total_size)] = -np.inf
# TODO(b/74222344): Fix nan handling for max pool grad.
# x[np.random.choice(total_size)] = np.nan
x = x.reshape(input_sizes)
with self.cached_session() as sess:
# Use the forward pool function to compute some corresponding outputs
# (needed for the CPU device, and we need the shape in both cases).
with ops.device(self.CPU_DEVICE):
inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes)
outputs = pool_func(
inputs,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
output_vals = np.array(sess.run(outputs, {inputs: x}))
output_gradient_vals = np.arange(
1, output_vals.size + 1, dtype=np.float32)
output_gradient_vals = output_gradient_vals.reshape(output_vals.shape)
output_grad_grad_vals = np.arange(1, x.size + 1, dtype=np.float32)
output_grad_grad_vals = output_grad_grad_vals.reshape(x.shape)
# Use the Tensorflow CPU pooling gradient to compute the expected input
# gradients.
with ops.device(self.CPU_DEVICE):
output_gradients = array_ops.placeholder(
dtypes.float32, shape=output_vals.shape)
expected_input_gradients = pool_grad_func(
inputs,
outputs,
output_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
expected_input_gradient_vals = sess.run(
expected_input_gradients,
{inputs: x,
output_gradients: output_gradient_vals})
output_grad_gradients = array_ops.placeholder(
dtypes.float32, shape=expected_input_gradient_vals.shape)
if pool_grad_grad_func is not None:
expected_grad_gradients = pool_grad_grad_func(
inputs,
outputs,
output_grad_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
expected_grad_gradients_vals = sess.run(expected_grad_gradients, {
inputs: x,
output_grad_gradients: output_grad_grad_vals
})
# Run the gradient op on the XLA device
with self.test_scope():
outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)
xla_inputs = inputs
xla_outputs = outputs
xla_output_gradients = output_gradients
xla_output_grad_gradients = output_grad_gradients
xla_ksize = ksize
xla_strides = strides
if data_format == "NCHW":
xla_inputs = NHWCToNCHW(inputs)
xla_outputs = NHWCToNCHW(outputs)
xla_output_gradients = NHWCToNCHW(output_gradients)
xla_output_grad_gradients = NHWCToNCHW(output_grad_gradients)
xla_ksize = NHWCToNCHW(ksize)
xla_strides = NHWCToNCHW(strides)
actual_input_gradients = pool_grad_func(
xla_inputs,
xla_outputs,
xla_output_gradients,
ksize=xla_ksize,
strides=xla_strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
actual_input_gradients = NCHWToNHWC(actual_input_gradients)
if pool_grad_grad_func is not None:
actual_grad_gradients = pool_grad_grad_func(
xla_inputs,
xla_outputs,
xla_output_grad_gradients,
ksize=xla_ksize,
strides=xla_strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
actual_grad_gradients = NCHWToNHWC(actual_grad_gradients)
actual_input_gradients_vals = sess.run(actual_input_gradients, {
inputs: x,
outputs: output_vals,
output_gradients: output_gradient_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_input_gradient_vals,
actual_input_gradients_vals,
rtol=1e-4,
atol=1e-6)
self.assertShapeEqual(actual_input_gradients_vals, inputs)
if pool_grad_grad_func is not None:
actual_grad_gradients_vals = sess.run(
actual_grad_gradients, {
inputs: x,
outputs: output_vals,
output_grad_gradients: output_grad_grad_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_grad_gradients_vals,
actual_grad_gradients_vals,
rtol=1e-4,
atol=1e-6)
self.assertShapeEqual(actual_grad_gradients_vals, outputs)
def _VerifyValues(self,
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
pool_grad_grad_func=None):
"""Verifies the output values of the pooling function.
Args:
pool_func: Pooling function to be called, e.g., tf.nn.max_pool
pool_grad_func: Corresponding pooling gradient function.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
pool_grad_grad_func: Second-order gradient function, if available.
"""
for data_format in GetTestConfigs():
self._VerifyOneTest(
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
data_format,
pool_grad_grad_func=pool_grad_grad_func)
def _TestPooling(self, forward_op, backward_op, pool_grad_grad_func=None):
# VALID padding
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, non square window
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# VALID padding, uneven stride
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, size 4 input
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, size 8 input
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
def testMaxPool(self):
self._TestPooling(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
pool_grad_grad_func=gen_nn_ops.max_pool_grad_grad)
def testAvgPool(self):
# Wrapper around AvgPoolGrad that ignores extra arguments needed by
# MaxPoolGrad.
def AvgPoolGrad(inputs, outputs, output_gradients, ksize, strides, padding,
data_format):
del outputs # Unused by average-pooling gradients.
return gen_nn_ops.avg_pool_grad(
inputs.get_shape().as_list(),
output_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
self._TestPooling(nn_ops.avg_pool, AvgPoolGrad)
# The CPU implementation of AvgPoolGrad doesn't accept kernels smaller than
# the stride size, so we only run the following tests on MaxPoolGrad.
def testMaxPoolKernelSmallerThanStrideValid(self):
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID")
def testMaxPoolKernelSmallerThanStrideSame(self):
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME")
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME")
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
keedio/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/xlrd/biffh.py | 64 | 16753 | # -*- coding: cp1252 -*-
##
# Support module for the xlrd package.
#
# <p>Portions copyright © 2005-2008 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under a BSD-style licence.</p>
##
# 2008-02-10 SJM BIFF2 BLANK record
# 2008-02-08 SJM Preparation for Excel 2.0 support
# 2008-02-02 SJM Added suffixes (_B2, _B2_ONLY, etc) on record names for biff_dump & biff_count
# 2007-12-04 SJM Added support for Excel 2.x (BIFF2) files.
# 2007-09-08 SJM Avoid crash when zero-length Unicode string missing options byte.
# 2007-04-22 SJM Remove experimental "trimming" facility.
DEBUG = 0
from struct import unpack
import sys
from timemachine import *
class XLRDError(Exception):
pass
##
# Parent of almost all other classes in the package. Defines a common "dump" method
# for debugging.
class BaseObject(object):
_repr_these = []
##
# @param f open file object, to which the dump is written
# @param header text to write before the dump
# @param footer text to write after the dump
# @param indent number of leading spaces (for recursive calls)
def dump(self, f=None, header=None, footer=None, indent=0):
if f is None:
f = sys.stderr
alist = self.__dict__.items()
alist.sort()
pad = " " * indent
if header is not None: print >> f, header
list_type = type([])
dict_type = type({})
for attr, value in alist:
if getattr(value, 'dump', None) and attr != 'book':
value.dump(f,
header="%s%s (%s object):" % (pad, attr, value.__class__.__name__),
indent=indent+4)
elif attr not in self._repr_these and (
isinstance(value, list_type) or isinstance(value, dict_type)
):
print >> f, "%s%s: %s, len = %d" % (pad, attr, type(value), len(value))
else:
print >> f, "%s%s: %r" % (pad, attr, value)
if footer is not None: print >> f, footer
FUN, FDT, FNU, FGE, FTX = range(5) # unknown, date, number, general, text
DATEFORMAT = FDT
NUMBERFORMAT = FNU
(
XL_CELL_EMPTY,
XL_CELL_TEXT,
XL_CELL_NUMBER,
XL_CELL_DATE,
XL_CELL_BOOLEAN,
XL_CELL_ERROR,
XL_CELL_BLANK, # for use in debugging, gathering stats, etc
) = range(7)
biff_text_from_num = {
0: "(not BIFF)",
20: "2.0",
21: "2.1",
30: "3",
40: "4S",
45: "4W",
50: "5",
70: "7",
80: "8",
85: "8X",
}
##
# <p>This dictionary can be used to produce a text version of the internal codes
# that Excel uses for error cells. Here are its contents:
# <pre>
# 0x00: '#NULL!', # Intersection of two cell ranges is empty
# 0x07: '#DIV/0!', # Division by zero
# 0x0F: '#VALUE!', # Wrong type of operand
# 0x17: '#REF!', # Illegal or deleted cell reference
# 0x1D: '#NAME?', # Wrong function or range name
# 0x24: '#NUM!', # Value range overflow
# 0x2A: '#N/A!', # Argument or function not available
# </pre></p>
error_text_from_code = {
0x00: '#NULL!', # Intersection of two cell ranges is empty
0x07: '#DIV/0!', # Division by zero
0x0F: '#VALUE!', # Wrong type of operand
0x17: '#REF!', # Illegal or deleted cell reference
0x1D: '#NAME?', # Wrong function or range name
0x24: '#NUM!', # Value range overflow
0x2A: '#N/A!', # Argument or function not available
}
BIFF_FIRST_UNICODE = 80
XL_WORKBOOK_GLOBALS = WBKBLOBAL = 0x5
XL_WORKBOOK_GLOBALS_4W = 0x100
XL_WORKSHEET = WRKSHEET = 0x10
XL_BOUNDSHEET_WORKSHEET = 0x00
XL_BOUNDSHEET_CHART = 0x02
XL_BOUNDSHEET_VB_MODULE = 0x06
# XL_RK2 = 0x7e
XL_ARRAY = 0x0221
XL_ARRAY2 = 0x0021
XL_BLANK = 0x0201
XL_BLANK_B2 = 0x01
XL_BOF = 0x809
XL_BOOLERR = 0x205
XL_BOOLERR_B2 = 0x5
XL_BOUNDSHEET = 0x85
XL_BUILTINFMTCOUNT = 0x56
XL_CF = 0x01B1
XL_CODEPAGE = 0x42
XL_COLINFO = 0x7D
XL_COLUMNDEFAULT = 0x20 # BIFF2 only
XL_COLWIDTH = 0x24 # BIFF2 only
XL_CONDFMT = 0x01B0
XL_CONTINUE = 0x3c
XL_COUNTRY = 0x8C
XL_DATEMODE = 0x22
XL_DEFAULTROWHEIGHT = 0x0225
XL_DEFCOLWIDTH = 0x55
XL_DIMENSION = 0x200
XL_DIMENSION2 = 0x0
XL_EFONT = 0x45
XL_EOF = 0x0a
XL_EXTERNNAME = 0x23
XL_EXTERNSHEET = 0x17
XL_EXTSST = 0xff
XL_FEAT11 = 0x872
XL_FILEPASS = 0x2f
XL_FONT = 0x31
XL_FONT_B3B4 = 0x231
XL_FORMAT = 0x41e
XL_FORMAT2 = 0x1E # BIFF2, BIFF3
XL_FORMULA = 0x6
XL_FORMULA3 = 0x206
XL_FORMULA4 = 0x406
XL_GCW = 0xab
XL_INDEX = 0x20b
XL_INTEGER = 0x2 # BIFF2 only
XL_IXFE = 0x44 # BIFF2 only
XL_LABEL = 0x204
XL_LABEL_B2 = 0x04
XL_LABELRANGES = 0x15f
XL_LABELSST = 0xfd
XL_MERGEDCELLS = 0xE5
XL_MSO_DRAWING = 0x00EC
XL_MSO_DRAWING_GROUP = 0x00EB
XL_MSO_DRAWING_SELECTION = 0x00ED
XL_MULRK = 0xbd
XL_MULBLANK = 0xbe
XL_NAME = 0x18
XL_NOTE = 0x1c
XL_NUMBER = 0x203
XL_NUMBER_B2 = 0x3
XL_OBJ = 0x5D
XL_PALETTE = 0x92
XL_RK = 0x27e
XL_ROW = 0x208
XL_ROW_B2 = 0x08
XL_RSTRING = 0xd6
XL_SHEETHDR = 0x8F # BIFF4W only
XL_SHEETSOFFSET = 0x8E # BIFF4W only
XL_SHRFMLA = 0x04bc
XL_SST = 0xfc
XL_STANDARDWIDTH = 0x99
XL_STRING = 0x207
XL_STRING_B2 = 0x7
XL_STYLE = 0x293
XL_SUPBOOK = 0x1AE
XL_TABLEOP = 0x236
XL_TABLEOP2 = 0x37
XL_TABLEOP_B2 = 0x36
XL_TXO = 0x1b6
XL_UNCALCED = 0x5e
XL_UNKNOWN = 0xffff
XL_WINDOW2 = 0x023E
XL_WRITEACCESS = 0x5C
XL_XF = 0xe0
XL_XF2 = 0x0043 # BIFF2 version of XF record
XL_XF3 = 0x0243 # BIFF3 version of XF record
XL_XF4 = 0x0443 # BIFF4 version of XF record
boflen = {0x0809: 8, 0x0409: 6, 0x0209: 6, 0x0009: 4}
bofcodes = (0x0809, 0x0409, 0x0209, 0x0009)
XL_FORMULA_OPCODES = (0x0006, 0x0406, 0x0206)
_cell_opcode_list = [
XL_BOOLERR,
XL_FORMULA,
XL_FORMULA3,
XL_FORMULA4,
XL_LABEL,
XL_LABELSST,
XL_MULRK,
XL_NUMBER,
XL_RK,
XL_RSTRING,
]
_cell_opcode_dict = {}
for _cell_opcode in _cell_opcode_list:
_cell_opcode_dict[_cell_opcode] = 1
is_cell_opcode = _cell_opcode_dict.has_key
# def fprintf(f, fmt, *vargs): f.write(fmt % vargs)
def fprintf(f, fmt, *vargs):
if fmt.endswith('\n'):
print >> f, fmt[:-1] % vargs
else:
print >> f, fmt % vargs,
def upkbits(tgt_obj, src, manifest, local_setattr=setattr):
for n, mask, attr in manifest:
local_setattr(tgt_obj, attr, (src & mask) >> n)
def upkbitsL(tgt_obj, src, manifest, local_setattr=setattr, local_int=int):
for n, mask, attr in manifest:
local_setattr(tgt_obj, attr, local_int((src & mask) >> n))
def unpack_string(data, pos, encoding, lenlen=1):
nchars = unpack('<' + 'BH'[lenlen-1], data[pos:pos+lenlen])[0]
pos += lenlen
return unicode(data[pos:pos+nchars], encoding)
def unpack_string_update_pos(data, pos, encoding, lenlen=1, known_len=None):
if known_len is not None:
# On a NAME record, the length byte is detached from the front of the string.
nchars = known_len
else:
nchars = unpack('<' + 'BH'[lenlen-1], data[pos:pos+lenlen])[0]
pos += lenlen
newpos = pos + nchars
return (unicode(data[pos:newpos], encoding), newpos)
def unpack_unicode(data, pos, lenlen=2):
"Return unicode_strg"
nchars = unpack('<' + 'BH'[lenlen-1], data[pos:pos+lenlen])[0]
if not nchars:
# Ambiguous whether 0-length string should have an "options" byte.
# Avoid crash if missing.
return u""
pos += lenlen
options = ord(data[pos])
pos += 1
# phonetic = options & 0x04
# richtext = options & 0x08
if options & 0x08:
# rt = unpack('<H', data[pos:pos+2])[0] # unused
pos += 2
if options & 0x04:
# sz = unpack('<i', data[pos:pos+4])[0] # unused
pos += 4
if options & 0x01:
# Uncompressed UTF-16-LE
rawstrg = data[pos:pos+2*nchars]
# if DEBUG: print "nchars=%d pos=%d rawstrg=%r" % (nchars, pos, rawstrg)
strg = unicode(rawstrg, 'utf_16_le')
# pos += 2*nchars
else:
# Note: this is COMPRESSED (not ASCII!) encoding!!!
# Merely returning the raw bytes would work OK 99.99% of the time
# if the local codepage was cp1252 -- however this would rapidly go pear-shaped
# for other codepages so we grit our Anglocentric teeth and return Unicode :-)
strg = unicode(data[pos:pos+nchars], "latin_1")
# pos += nchars
# if richtext:
# pos += 4 * rt
# if phonetic:
# pos += sz
# return (strg, pos)
return strg
def unpack_unicode_update_pos(data, pos, lenlen=2, known_len=None):
"Return (unicode_strg, updated value of pos)"
if known_len is not None:
# On a NAME record, the length byte is detached from the front of the string.
nchars = known_len
else:
nchars = unpack('<' + 'BH'[lenlen-1], data[pos:pos+lenlen])[0]
pos += lenlen
if not nchars and not data[pos:]:
# Zero-length string with no options byte
return (u"", pos)
options = ord(data[pos])
pos += 1
phonetic = options & 0x04
richtext = options & 0x08
if richtext:
rt = unpack('<H', data[pos:pos+2])[0]
pos += 2
if phonetic:
sz = unpack('<i', data[pos:pos+4])[0]
pos += 4
if options & 0x01:
# Uncompressed UTF-16-LE
strg = unicode(data[pos:pos+2*nchars], 'utf_16_le')
pos += 2*nchars
else:
# Note: this is COMPRESSED (not ASCII!) encoding!!!
strg = unicode(data[pos:pos+nchars], "latin_1")
pos += nchars
if richtext:
pos += 4 * rt
if phonetic:
pos += sz
return (strg, pos)
def unpack_cell_range_address_list_update_pos(
output_list, data, pos, biff_version, addr_size=6):
# output_list is updated in situ
if biff_version < 80:
assert addr_size == 6
else:
assert addr_size in (6, 8)
n, = unpack("<H", data[pos:pos+2])
pos += 2
if n:
if addr_size == 6:
fmt = "<HHBB"
else:
fmt = "<HHHH"
for _unused in xrange(n):
ra, rb, ca, cb = unpack(fmt, data[pos:pos+addr_size])
output_list.append((ra, rb+1, ca, cb+1))
pos += addr_size
return pos
_brecstrg = """\
0000 DIMENSIONS_B2
0001 BLANK_B2
0002 INTEGER_B2_ONLY
0003 NUMBER_B2
0004 LABEL_B2
0005 BOOLERR_B2
0006 FORMULA
0007 STRING_B2
0008 ROW_B2
0009 BOF_B2
000A EOF
000B INDEX_B2_ONLY
000C CALCCOUNT
000D CALCMODE
000E PRECISION
000F REFMODE
0010 DELTA
0011 ITERATION
0012 PROTECT
0013 PASSWORD
0014 HEADER
0015 FOOTER
0016 EXTERNCOUNT
0017 EXTERNSHEET
0018 NAME_B2,5+
0019 WINDOWPROTECT
001A VERTICALPAGEBREAKS
001B HORIZONTALPAGEBREAKS
001C NOTE
001D SELECTION
001E FORMAT_B2-3
001F BUILTINFMTCOUNT_B2
0020 COLUMNDEFAULT_B2_ONLY
0021 ARRAY_B2_ONLY
0022 DATEMODE
0023 EXTERNNAME
0024 COLWIDTH_B2_ONLY
0025 DEFAULTROWHEIGHT_B2_ONLY
0026 LEFTMARGIN
0027 RIGHTMARGIN
0028 TOPMARGIN
0029 BOTTOMMARGIN
002A PRINTHEADERS
002B PRINTGRIDLINES
002F FILEPASS
0031 FONT
0032 FONT2_B2_ONLY
0036 TABLEOP_B2
0037 TABLEOP2_B2
003C CONTINUE
003D WINDOW1
003E WINDOW2_B2
0040 BACKUP
0041 PANE
0042 CODEPAGE
0043 XF_B2
0044 IXFE_B2_ONLY
0045 EFONT_B2_ONLY
004D PLS
0051 DCONREF
0055 DEFCOLWIDTH
0056 BUILTINFMTCOUNT_B3-4
0059 XCT
005A CRN
005B FILESHARING
005C WRITEACCESS
005D OBJECT
005E UNCALCED
005F SAVERECALC
0063 OBJECTPROTECT
007D COLINFO
007E RK2_mythical_?
0080 GUTS
0081 WSBOOL
0082 GRIDSET
0083 HCENTER
0084 VCENTER
0085 BOUNDSHEET
0086 WRITEPROT
008C COUNTRY
008D HIDEOBJ
008E SHEETSOFFSET
008F SHEETHDR
0090 SORT
0092 PALETTE
0099 STANDARDWIDTH
009B FILTERMODE
009C FNGROUPCOUNT
009D AUTOFILTERINFO
009E AUTOFILTER
00A0 SCL
00A1 SETUP
00AB GCW
00BD MULRK
00BE MULBLANK
00C1 MMS
00D6 RSTRING
00D7 DBCELL
00DA BOOKBOOL
00DD SCENPROTECT
00E0 XF
00E1 INTERFACEHDR
00E2 INTERFACEEND
00E5 MERGEDCELLS
00E9 BITMAP
00EB MSO_DRAWING_GROUP
00EC MSO_DRAWING
00ED MSO_DRAWING_SELECTION
00EF PHONETIC
00FC SST
00FD LABELSST
00FF EXTSST
013D TABID
015F LABELRANGES
0160 USESELFS
0161 DSF
01AE SUPBOOK
01AF PROTECTIONREV4
01B0 CONDFMT
01B1 CF
01B2 DVAL
01B6 TXO
01B7 REFRESHALL
01B8 HLINK
01BC PASSWORDREV4
01BE DV
01C0 XL9FILE
01C1 RECALCID
0200 DIMENSIONS
0201 BLANK
0203 NUMBER
0204 LABEL
0205 BOOLERR
0206 FORMULA_B3
0207 STRING
0208 ROW
0209 BOF
020B INDEX_B3+
0218 NAME
0221 ARRAY
0223 EXTERNNAME_B3-4
0225 DEFAULTROWHEIGHT
0231 FONT_B3B4
0236 TABLEOP
023E WINDOW2
0243 XF_B3
027E RK
0293 STYLE
0406 FORMULA_B4
0409 BOF
041E FORMAT
0443 XF_B4
04BC SHRFMLA
0800 QUICKTIP
0809 BOF
0862 SHEETLAYOUT
0867 SHEETPROTECTION
0868 RANGEPROTECTION
"""
biff_rec_name_dict = {}
for _buff in _brecstrg.splitlines():
_numh, _name = _buff.split()
biff_rec_name_dict[int(_numh, 16)] = _name
del _buff, _name, _brecstrg
def hex_char_dump(strg, ofs, dlen, base=0, fout=sys.stdout, unnumbered=False):
endpos = min(ofs + dlen, len(strg))
pos = ofs
numbered = not unnumbered
num_prefix = ''
while pos < endpos:
endsub = min(pos + 16, endpos)
substrg = strg[pos:endsub]
lensub = endsub - pos
if lensub <= 0 or lensub != len(substrg):
fprintf(
sys.stdout,
'??? hex_char_dump: ofs=%d dlen=%d base=%d -> endpos=%d pos=%d endsub=%d substrg=%r\n',
ofs, dlen, base, endpos, pos, endsub, substrg)
break
hexd = ''.join(["%02x " % ord(c) for c in substrg])
chard = ''
for c in substrg:
if c == '\0':
c = '~'
elif not (' ' <= c <= '~'):
c = '?'
chard += c
if numbered:
num_prefix = "%5d: " % (base+pos-ofs)
fprintf(fout, "%s %-48s %s\n", num_prefix, hexd, chard)
pos = endsub
def biff_dump(mem, stream_offset, stream_len, base=0, fout=sys.stdout, unnumbered=False):
pos = stream_offset
stream_end = stream_offset + stream_len
adj = base - stream_offset
dummies = 0
numbered = not unnumbered
num_prefix = ''
while stream_end - pos >= 4:
rc, length = unpack('<HH', mem[pos:pos+4])
if rc == 0 and length == 0:
if mem[pos:] == '\0' * (stream_end - pos):
dummies = stream_end - pos
savpos = pos
pos = stream_end
break
if dummies:
dummies += 4
else:
savpos = pos
dummies = 4
pos += 4
else:
if dummies:
if numbered:
num_prefix = "%5d: " % (adj + savpos)
fprintf(fout, "%s---- %d zero bytes skipped ----\n", num_prefix, dummies)
dummies = 0
recname = biff_rec_name_dict.get(rc, '<UNKNOWN>')
if numbered:
num_prefix = "%5d: " % (adj + pos)
fprintf(fout, "%s%04x %s len = %04x (%d)\n", num_prefix, rc, recname, length, length)
pos += 4
hex_char_dump(mem, pos, length, adj+pos, fout, unnumbered)
pos += length
if dummies:
if numbered:
num_prefix = "%5d: " % (adj + savpos)
fprintf(fout, "%s---- %d zero bytes skipped ----\n", num_prefix, dummies)
if pos < stream_end:
if numbered:
num_prefix = "%5d: " % (adj + pos)
fprintf(fout, "%s---- Misc bytes at end ----\n", num_prefix)
hex_char_dump(mem, pos, stream_end-pos, adj + pos, fout, unnumbered)
elif pos > stream_end:
fprintf(fout, "Last dumped record has length (%d) that is too large\n", length)
def biff_count_records(mem, stream_offset, stream_len, fout=sys.stdout):
pos = stream_offset
stream_end = stream_offset + stream_len
tally = {}
while stream_end - pos >= 4:
rc, length = unpack('<HH', mem[pos:pos+4])
if rc == 0 and length == 0:
if mem[pos:] == '\0' * (stream_end - pos):
break
recname = "<Dummy (zero)>"
else:
recname = biff_rec_name_dict.get(rc, None)
if recname is None:
recname = "Unknown_0x%04X" % rc
if tally.has_key(recname):
tally[recname] += 1
else:
tally[recname] = 1
pos += length + 4
slist = tally.items()
slist.sort()
for recname, count in slist:
print >> fout, "%8d %s" % (count, recname)
encoding_from_codepage = {
1200 : 'utf_16_le',
10000: 'mac_roman',
10006: 'mac_greek', # guess
10007: 'mac_cyrillic', # guess
10029: 'mac_latin2', # guess
10079: 'mac_iceland', # guess
10081: 'mac_turkish', # guess
32768: 'mac_roman',
32769: 'cp1252',
}
# some more guessing, for Indic scripts
# codepage 57000 range:
# 2 Devanagari [0]
# 3 Bengali [1]
# 4 Tamil [5]
# 5 Telegu [6]
# 6 Assamese [1] c.f. Bengali
# 7 Oriya [4]
# 8 Kannada [7]
# 9 Malayalam [8]
# 10 Gujarati [3]
# 11 Gurmukhi [2]
| apache-2.0 |
stevenewey/yotta | yotta/build.py | 2 | 5200 | # Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import os
import logging
# validate, , validate things, internal
from .lib import validate
# CMakeGen, , generate build files, internal
from .lib import cmakegen
# Target, , represents an installed target, internal
from .lib import target
# install, , install subcommand, internal
from . import install
def addOptions(parser, add_build_targets=True):
parser.add_argument('-g', '--generate-only', dest='generate_only',
action='store_true', default=False,
help='Only generate CMakeLists, don\'t run CMake or build'
)
parser.add_argument('-r', '--release-build', dest='release_build', action='store_true', default=True)
parser.add_argument('-d', '--debug-build', dest='release_build', action='store_false', default=True)
# the target class adds its own build-system specific options. In the
# future we probably want to load these from a target instance, rather than
# from the class
target.DerivedTarget.addBuildOptions(parser)
if add_build_targets:
parser.add_argument(
"build_targets", metavar='MODULE_TO_BUILD', nargs='*', type=str, default=[],
help='List modules or programs to build (omit to build the default '+
'set, or use "all_tests" to build all tests, including those '+
'of dependencies).'
)
def execCommand(args, following_args):
status = installAndBuild(args, following_args)
return status['status']
def installAndBuild(args, following_args):
''' Perform the build command, but provide detailed error information.
Returns {status:0, build_status:0, generate_status:0, install_status:0} on success.
If status: is nonzero there was some sort of error. Other properties
are optional, and may not be set if that step was not attempted.
'''
build_status = generate_status = install_status = 0
if not hasattr(args, 'build_targets'):
vars(args)['build_targets'] = []
if 'test' in args.build_targets:
logging.error('Cannot build "test". Use "yotta test" to run tests.')
return {'status':1}
cwd = os.getcwd()
c = validate.currentDirectoryModule()
if not c:
return {'status':1}
target, errors = c.satisfyTarget(args.target)
if errors:
for error in errors:
logging.error(error)
return {'status':1}
# run the install command before building, we need to add some options the
# install command expects to be present to do this:
vars(args)['component'] = None
vars(args)['act_globally'] = False
if not hasattr(args, 'install_test_deps'):
if 'all_tests' in args.build_targets:
vars(args)['install_test_deps'] = 'all'
elif not len(args.build_targets):
vars(args)['install_test_deps'] = 'own'
else:
# If the named build targets include tests from other modules, we
# need to install the deps for those modules. To do this we need to
# be able to tell which module a library belongs to, which is not
# straightforward (especially if there is custom cmake involved).
# That's why this is 'all', and not 'none'.
vars(args)['install_test_deps'] = 'all'
# install may exit non-zero for non-fatal errors (such as incompatible
# version specs), which it will display
install_status = install.execCommand(args, [])
builddir = os.path.join(cwd, 'build', target.getName())
all_components = c.getDependenciesRecursive(
target = target,
available_components = [(c.getName(), c)],
test = True
)
# if a dependency is missing the build will almost certainly fail, so don't try
missing = 0
for d in all_components.values():
if not d and not (d.isTestDependency() and args.install_test_deps != 'all'):
logging.error('%s not available' % os.path.split(d.path)[1])
missing += 1
if missing:
logging.error('Missing dependencies prevent build. Use `yotta ls` to list them.')
return {'status': 1, 'install_status':install_status, 'missing_status':missing}
generator = cmakegen.CMakeGen(builddir, target)
app = c if len(c.getBinaries()) else None
for error in generator.generateRecursive(c, all_components, builddir, application=app):
logging.error(error)
generate_status = 1
if (not hasattr(args, 'generate_only')) or (not args.generate_only):
error = target.build(
builddir, c, args, release_build=args.release_build,
build_args=following_args, targets=args.build_targets
)
if error:
logging.error(error)
build_status = 1
return {
'status': build_status or generate_status or install_status,
'missing_status': missing,
'build_status': build_status,
'generate_status': generate_status,
'install_status': install_status
}
return errcode
| apache-2.0 |
x303597316/hue | desktop/core/ext-py/Django-1.6.10/django/templatetags/future.py | 130 | 1640 | from django.template import Library
from django.template import defaulttags
register = Library()
@register.tag
def ssi(parser, token):
# Used for deprecation path during 1.3/1.4, will be removed in 2.0
return defaulttags.ssi(parser, token)
@register.tag
def url(parser, token):
# Used for deprecation path during 1.3/1.4, will be removed in 2.0
return defaulttags.url(parser, token)
@register.tag
def cycle(parser, token):
"""
This is the future version of `cycle` with auto-escaping.
By default all strings are escaped.
If you want to disable auto-escaping of variables you can use::
{% autoescape off %}
{% cycle var1 var2 var3 as somecycle %}
{% autoescape %}
Or if only some variables should be escaped, you can use::
{% cycle var1 var2|safe var3|safe as somecycle %}
"""
return defaulttags.cycle(parser, token, escape=True)
@register.tag
def firstof(parser, token):
"""
This is the future version of `firstof` with auto-escaping.
This is equivalent to::
{% if var1 %}
{{ var1 }}
{% elif var2 %}
{{ var2 }}
{% elif var3 %}
{{ var3 }}
{% endif %}
If you want to disable auto-escaping of variables you can use::
{% autoescape off %}
{% firstof var1 var2 var3 "<strong>fallback value</strong>" %}
{% autoescape %}
Or if only some variables should be escaped, you can use::
{% firstof var1 var2|safe var3 "<strong>fallback value</strong>"|safe %}
"""
return defaulttags.firstof(parser, token, escape=True)
| apache-2.0 |
tmerrick1/spack | var/spack/repos/builtin/packages/flac/package.py | 5 | 1699 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Flac(AutotoolsPackage):
"""Encoder/decoder for the Free Lossless Audio Codec"""
homepage = "https://xiph.org/flac/index.html"
url = "http://downloads.xiph.org/releases/flac/flac-1.3.2.tar.xz"
version('1.3.2', '454f1bfa3f93cc708098d7890d0499bd')
version('1.3.1', 'b9922c9a0378c88d3e901b234f852698')
version('1.3.0', '13b5c214cee8373464d3d65dee362cdd')
depends_on('libvorbis')
depends_on('id3lib')
| lgpl-2.1 |
zihua/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
joopert/home-assistant | homeassistant/components/rpi_pfio/__init__.py | 3 | 1398 | """Support for controlling the PiFace Digital I/O module on a RPi."""
import logging
import pifacedigitalio as PFIO
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
_LOGGER = logging.getLogger(__name__)
DOMAIN = "rpi_pfio"
DATA_PFIO_LISTENER = "pfio_listener"
def setup(hass, config):
"""Set up the Raspberry PI PFIO component."""
pifacedigital = PFIO.PiFaceDigital()
hass.data[DATA_PFIO_LISTENER] = PFIO.InputEventListener(chip=pifacedigital)
def cleanup_pfio(event):
"""Stuff to do before stopping."""
PFIO.deinit()
def prepare_pfio(event):
"""Stuff to do when home assistant starts."""
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, cleanup_pfio)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, prepare_pfio)
PFIO.init()
return True
def write_output(port, value):
"""Write a value to a PFIO."""
PFIO.digital_write(port, value)
def read_input(port):
"""Read a value from a PFIO."""
return PFIO.digital_read(port)
def edge_detect(hass, port, event_callback, settle):
"""Add detection for RISING and FALLING events."""
hass.data[DATA_PFIO_LISTENER].register(
port, PFIO.IODIR_BOTH, event_callback, settle_time=settle
)
def activate_listener(hass):
"""Activate the registered listener events."""
hass.data[DATA_PFIO_LISTENER].activate()
| apache-2.0 |
WalkingMachine/sara_behaviors | sara_flexbe_behaviors/src/sara_flexbe_behaviors/action_place_2_sm.py | 1 | 12085 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.TF_transform import TF_transformation
from flexbe_states.log_key_state import LogKeyState
from sara_flexbe_states.set_gripper_state import SetGripperState
from sara_flexbe_states.moveit_moveCartesian import MoveitMoveCartesian
from flexbe_states.log_state import LogState
from sara_flexbe_states.torque_reader import ReadTorque
from sara_flexbe_states.sara_set_head_angle import SaraSetHeadAngle
from sara_flexbe_states.gen_gripper_pose import GenGripperPose
from sara_flexbe_states.moveit_move import MoveitMove
from sara_flexbe_states.pose_gen_euler import GenPoseEuler
from sara_flexbe_states.sara_move_base import SaraMoveBase
from sara_flexbe_states.sara_say import SaraSay
from sara_flexbe_states.run_trajectory import RunTrajectory
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Sat May 12 2018
@author: Raphael Duchaine
'''
class Action_place_2SM(Behavior):
'''
Place un objet a une position
'''
def __init__(self):
super(Action_place_2SM, self).__init__()
self.name = 'Action_place_2'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
# O 52 47
# TF Transform |nFrame1 Frame2|n
# O 36 230
# Gen Grip pose|n|nA
# O 33 308
# MoveIt move|nmove = false|n|nPos
# O 6 135
# PreGrip Pose #pre grip
# O 27 264
# #approach_pos|nGen Grip pose|ndistance = 0.25
# O 0 491
# MoveIt move|nmove =True|n|nA
# O 279 628
# open grip
# O 36 446
# MoveIt move|nmove =True|n|nB
# O 472 612
# MoveIt move|n|nB
# O 460 492
# #preGrip|nMoveIt move
def create(self):
# x:682 y:306, x:452 y:252
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['pos'])
_state_machine.userdata.pos = {"x":0.8, "y":-0.2, "z":1}
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
# x:30 y:458, x:130 y:458, x:230 y:458, x:330 y:458, x:430 y:458, x:530 y:458, x:630 y:458, x:59 y:533, x:830 y:458
_sm_group_0 = ConcurrencyContainer(outcomes=['threshold', 'watchdog', 'fail'], conditions=[
('threshold', [('read', 'threshold')]),
('watchdog', [('read', 'watchdog')]),
('fail', [('read', 'fail')]),
('threshold', [('read yaw', 'threshold')]),
('fail', [('read yaw', 'fail')]),
('watchdog', [('read yaw', 'watchdog')])
])
with _sm_group_0:
# x:86 y:125
OperatableStateMachine.add('read',
ReadTorque(watchdog=1, Joint="right_elbow_pitch_joint", Threshold=0.7, min_time=0.4),
transitions={'threshold': 'threshold', 'watchdog': 'watchdog', 'fail': 'fail'},
autonomy={'threshold': Autonomy.Off, 'watchdog': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'torque': 'torque'})
# x:252 y:135
OperatableStateMachine.add('read yaw',
ReadTorque(watchdog=1, Joint="right_elbow_pitch_joint", Threshold=0.5, min_time=0.4),
transitions={'threshold': 'threshold', 'watchdog': 'watchdog', 'fail': 'fail'},
autonomy={'threshold': Autonomy.Off, 'watchdog': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'torque': 'torque'})
# x:30 y:458
_sm_read_torque_1 = OperatableStateMachine(outcomes=['done'])
with _sm_read_torque_1:
# x:142 y:61
OperatableStateMachine.add('log',
LogState(text="going down", severity=Logger.REPORT_HINT),
transitions={'done': 'Group'},
autonomy={'done': Autonomy.Off})
# x:131 y:164
OperatableStateMachine.add('Group',
_sm_group_0,
transitions={'threshold': 'done', 'watchdog': 'log', 'fail': 'done'},
autonomy={'threshold': Autonomy.Inherit, 'watchdog': Autonomy.Inherit, 'fail': Autonomy.Inherit})
# x:30 y:458
_sm_go_down_2 = OperatableStateMachine(outcomes=['done'], input_keys=['GripPose'])
with _sm_go_down_2:
# x:92 y:127
OperatableStateMachine.add('place down',
MoveitMoveCartesian(move=True, waitForExecution=True, group="RightArm", watchdog=15),
transitions={'done': 'done', 'failed': 'done'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'targetPose': 'GripPose'})
# x:30 y:324, x:130 y:324
_sm_releasing_3 = OperatableStateMachine(outcomes=['object', 'no_object'])
with _sm_releasing_3:
# x:30 y:40
OperatableStateMachine.add('say touchdown',
SaraSay(sentence="Touchdown!", input_keys=[], emotion=1, block=False),
transitions={'done': 'open gripper'},
autonomy={'done': Autonomy.Off})
# x:139 y:176
OperatableStateMachine.add('open gripper',
SetGripperState(width=0.14, effort=1),
transitions={'object': 'object', 'no_object': 'no_object'},
autonomy={'object': Autonomy.Off, 'no_object': Autonomy.Off},
remapping={'object_size': 'object_size'})
# x:30 y:324, x:130 y:324
_sm_moveback_4 = OperatableStateMachine(outcomes=['arrived', 'failed'])
with _sm_moveback_4:
# x:30 y:40
OperatableStateMachine.add('genpose',
GenPoseEuler(x=-0.3, y=-0.3, z=0, roll=0, pitch=0, yaw=0),
transitions={'done': 'move back'},
autonomy={'done': Autonomy.Off},
remapping={'pose': 'backPose'})
# x:40 y:163
OperatableStateMachine.add('move back',
SaraMoveBase(reference="base_link"),
transitions={'arrived': 'arrived', 'failed': 'failed'},
autonomy={'arrived': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'pose': 'backPose'})
# x:536 y:72, x:231 y:292
_sm_prepare_grip_5 = OperatableStateMachine(outcomes=['failed', 'done'], input_keys=['pos'], output_keys=['approach_pose', 'grip_pose'])
with _sm_prepare_grip_5:
# x:50 y:40
OperatableStateMachine.add('Gen place_pos',
GenGripperPose(l=0, z=-0.05, planar=True),
transitions={'done': 'Gen approach_pos', 'fail': 'failed'},
autonomy={'done': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'pose_in': 'pos', 'pose_out': 'grip_pose'})
# x:30 y:176
OperatableStateMachine.add('MoveIt_isReachable',
MoveitMove(move=False, waitForExecution=True, group="RightArm", watchdog=15),
transitions={'done': 'log app', 'failed': 'failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'target': 'grip_pose'})
# x:37 y:108
OperatableStateMachine.add('Gen approach_pos',
GenGripperPose(l=0.0, z=0.20, planar=True),
transitions={'done': 'log place pos', 'fail': 'failed'},
autonomy={'done': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'pose_in': 'pos', 'pose_out': 'approach_pose'})
# x:41 y:269
OperatableStateMachine.add('log app',
LogKeyState(text="{}", severity=Logger.REPORT_HINT),
transitions={'done': 'done'},
autonomy={'done': Autonomy.Off},
remapping={'data': 'approach_pose'})
# x:360 y:180
OperatableStateMachine.add('log place pos',
LogKeyState(text="place pose is {}", severity=Logger.REPORT_HINT),
transitions={'done': 'MoveIt_isReachable'},
autonomy={'done': Autonomy.Off},
remapping={'data': 'grip_pose'})
# x:30 y:458, x:130 y:458, x:230 y:458
_sm_get_down_6 = ConcurrencyContainer(outcomes=['done'], input_keys=['GripPose'], conditions=[
('done', [('Go down', 'done')]),
('done', [('read torque', 'done')])
])
with _sm_get_down_6:
# x:178 y:127
OperatableStateMachine.add('Go down',
_sm_go_down_2,
transitions={'done': 'done'},
autonomy={'done': Autonomy.Inherit},
remapping={'GripPose': 'GripPose'})
# x:405 y:150
OperatableStateMachine.add('read torque',
_sm_read_torque_1,
transitions={'done': 'done'},
autonomy={'done': Autonomy.Inherit})
# x:30 y:324, x:130 y:324
_sm_pretraitement_7 = OperatableStateMachine(outcomes=['fail', 'done'], input_keys=['pos'], output_keys=['pos'])
with _sm_pretraitement_7:
# x:30 y:40
OperatableStateMachine.add('TF_transformation',
TF_transformation(in_ref="map", out_ref="base_link"),
transitions={'done': 'LOG POSE', 'fail': 'fail'},
autonomy={'done': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'in_pos': 'pos', 'out_pos': 'pos'})
# x:33 y:107
OperatableStateMachine.add('LOG POSE',
LogKeyState(text="{}", severity=Logger.REPORT_HINT),
transitions={'done': 'done'},
autonomy={'done': Autonomy.Off},
remapping={'data': 'pos'})
with _state_machine:
# x:148 y:34
OperatableStateMachine.add('Pretraitement',
_sm_pretraitement_7,
transitions={'fail': 'failed', 'done': 'Pregrip'},
autonomy={'fail': Autonomy.Inherit, 'done': Autonomy.Inherit},
remapping={'pos': 'pos'})
# x:634 y:410
OperatableStateMachine.add('close gripper',
SetGripperState(width=0, effort=1),
transitions={'object': 'finished', 'no_object': 'finished'},
autonomy={'object': Autonomy.Off, 'no_object': Autonomy.Off},
remapping={'object_size': 'object_size'})
# x:141 y:522
OperatableStateMachine.add('Get_down',
_sm_get_down_6,
transitions={'done': 'releasing'},
autonomy={'done': Autonomy.Inherit},
remapping={'GripPose': 'grip_pose'})
# x:159 y:352
OperatableStateMachine.add('look down',
SaraSetHeadAngle(pitch=0.6, yaw=-0.3),
transitions={'done': 'Move_approach'},
autonomy={'done': Autonomy.Off})
# x:156 y:238
OperatableStateMachine.add('Prepare grip',
_sm_prepare_grip_5,
transitions={'failed': 'failed', 'done': 'look down'},
autonomy={'failed': Autonomy.Inherit, 'done': Autonomy.Inherit},
remapping={'pos': 'pos', 'approach_pose': 'approach_pose', 'grip_pose': 'grip_pose'})
# x:139 y:444
OperatableStateMachine.add('Move_approach',
MoveitMove(move=True, waitForExecution=True, group="RightArm", watchdog=15),
transitions={'done': 'Get_down', 'failed': 'failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'target': 'approach_pose'})
# x:623 y:525
OperatableStateMachine.add('Moveback',
_sm_moveback_4,
transitions={'arrived': 'close gripper', 'failed': 'failed'},
autonomy={'arrived': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:298 y:520
OperatableStateMachine.add('releasing',
_sm_releasing_3,
transitions={'object': 'Pregrip_2', 'no_object': 'Pregrip_2'},
autonomy={'object': Autonomy.Inherit, 'no_object': Autonomy.Inherit})
# x:159 y:139
OperatableStateMachine.add('Pregrip',
RunTrajectory(file="pre_grip_pose", duration=6),
transitions={'done': 'Prepare grip'},
autonomy={'done': Autonomy.Off})
# x:440 y:537
OperatableStateMachine.add('Pregrip_2',
RunTrajectory(file="pre_grip_pose", duration=0),
transitions={'done': 'Moveback'},
autonomy={'done': Autonomy.Off})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| bsd-3-clause |
171121130/SWI | venv/Lib/site-packages/flask/ctx.py | 170 | 14739 | # -*- coding: utf-8 -*-
"""
flask.ctx
~~~~~~~~~
Implements the objects required to keep the context.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
from functools import update_wrapper
from werkzeug.exceptions import HTTPException
from .globals import _request_ctx_stack, _app_ctx_stack
from .signals import appcontext_pushed, appcontext_popped
from ._compat import BROKEN_PYPY_CTXMGR_EXIT, reraise
# a singleton sentinel value for parameter defaults
_sentinel = object()
class _AppCtxGlobals(object):
"""A plain object."""
def get(self, name, default=None):
return self.__dict__.get(name, default)
def pop(self, name, default=_sentinel):
if default is _sentinel:
return self.__dict__.pop(name)
else:
return self.__dict__.pop(name, default)
def setdefault(self, name, default=None):
return self.__dict__.setdefault(name, default)
def __contains__(self, item):
return item in self.__dict__
def __iter__(self):
return iter(self.__dict__)
def __repr__(self):
top = _app_ctx_stack.top
if top is not None:
return '<flask.g of %r>' % top.app.name
return object.__repr__(self)
def after_this_request(f):
"""Executes a function after this request. This is useful to modify
response objects. The function is passed the response object and has
to return the same or a new one.
Example::
@app.route('/')
def index():
@after_this_request
def add_header(response):
response.headers['X-Foo'] = 'Parachute'
return response
return 'Hello World!'
This is more useful if a function other than the view function wants to
modify a response. For instance think of a decorator that wants to add
some headers without converting the return value into a response object.
.. versionadded:: 0.9
"""
_request_ctx_stack.top._after_request_functions.append(f)
return f
def copy_current_request_context(f):
"""A helper function that decorates a function to retain the current
request context. This is useful when working with greenlets. The moment
the function is decorated a copy of the request context is created and
then pushed when the function is called.
Example::
import gevent
from flask import copy_current_request_context
@app.route('/')
def index():
@copy_current_request_context
def do_some_work():
# do some work here, it can access flask.request like you
# would otherwise in the view function.
...
gevent.spawn(do_some_work)
return 'Regular response'
.. versionadded:: 0.10
"""
top = _request_ctx_stack.top
if top is None:
raise RuntimeError('This decorator can only be used at local scopes '
'when a request context is on the stack. For instance within '
'view functions.')
reqctx = top.copy()
def wrapper(*args, **kwargs):
with reqctx:
return f(*args, **kwargs)
return update_wrapper(wrapper, f)
def has_request_context():
"""If you have code that wants to test if a request context is there or
not this function can be used. For instance, you may want to take advantage
of request information if the request object is available, but fail
silently if it is unavailable.
::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and has_request_context():
remote_addr = request.remote_addr
self.remote_addr = remote_addr
Alternatively you can also just test any of the context bound objects
(such as :class:`request` or :class:`g` for truthness)::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and request:
remote_addr = request.remote_addr
self.remote_addr = remote_addr
.. versionadded:: 0.7
"""
return _request_ctx_stack.top is not None
def has_app_context():
"""Works like :func:`has_request_context` but for the application
context. You can also just do a boolean check on the
:data:`current_app` object instead.
.. versionadded:: 0.9
"""
return _app_ctx_stack.top is not None
class AppContext(object):
"""The application context binds an application object implicitly
to the current thread or greenlet, similar to how the
:class:`RequestContext` binds request information. The application
context is also implicitly created if a request context is created
but the application is not on top of the individual application
context.
"""
def __init__(self, app):
self.app = app
self.url_adapter = app.create_url_adapter(None)
self.g = app.app_ctx_globals_class()
# Like request context, app contexts can be pushed multiple times
# but there a basic "refcount" is enough to track them.
self._refcnt = 0
def push(self):
"""Binds the app context to the current context."""
self._refcnt += 1
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
_app_ctx_stack.push(self)
appcontext_pushed.send(self.app)
def pop(self, exc=_sentinel):
"""Pops the app context."""
try:
self._refcnt -= 1
if self._refcnt <= 0:
if exc is _sentinel:
exc = sys.exc_info()[1]
self.app.do_teardown_appcontext(exc)
finally:
rv = _app_ctx_stack.pop()
assert rv is self, 'Popped wrong app context. (%r instead of %r)' \
% (rv, self)
appcontext_popped.send(self.app)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
self.pop(exc_value)
if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None:
reraise(exc_type, exc_value, tb)
class RequestContext(object):
"""The request context contains all request relevant information. It is
created at the beginning of the request and pushed to the
`_request_ctx_stack` and removed at the end of it. It will create the
URL adapter and request object for the WSGI environment provided.
Do not attempt to use this class directly, instead use
:meth:`~flask.Flask.test_request_context` and
:meth:`~flask.Flask.request_context` to create this object.
When the request context is popped, it will evaluate all the
functions registered on the application for teardown execution
(:meth:`~flask.Flask.teardown_request`).
The request context is automatically popped at the end of the request
for you. In debug mode the request context is kept around if
exceptions happen so that interactive debuggers have a chance to
introspect the data. With 0.4 this can also be forced for requests
that did not fail and outside of ``DEBUG`` mode. By setting
``'flask._preserve_context'`` to ``True`` on the WSGI environment the
context will not pop itself at the end of the request. This is used by
the :meth:`~flask.Flask.test_client` for example to implement the
deferred cleanup functionality.
You might find this helpful for unittests where you need the
information from the context local around for a little longer. Make
sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in
that situation, otherwise your unittests will leak memory.
"""
def __init__(self, app, environ, request=None):
self.app = app
if request is None:
request = app.request_class(environ)
self.request = request
self.url_adapter = app.create_url_adapter(self.request)
self.flashes = None
self.session = None
# Request contexts can be pushed multiple times and interleaved with
# other request contexts. Now only if the last level is popped we
# get rid of them. Additionally if an application context is missing
# one is created implicitly so for each level we add this information
self._implicit_app_ctx_stack = []
# indicator if the context was preserved. Next time another context
# is pushed the preserved context is popped.
self.preserved = False
# remembers the exception for pop if there is one in case the context
# preservation kicks in.
self._preserved_exc = None
# Functions that should be executed after the request on the response
# object. These will be called before the regular "after_request"
# functions.
self._after_request_functions = []
self.match_request()
def _get_g(self):
return _app_ctx_stack.top.g
def _set_g(self, value):
_app_ctx_stack.top.g = value
g = property(_get_g, _set_g)
del _get_g, _set_g
def copy(self):
"""Creates a copy of this request context with the same request object.
This can be used to move a request context to a different greenlet.
Because the actual request object is the same this cannot be used to
move a request context to a different thread unless access to the
request object is locked.
.. versionadded:: 0.10
"""
return self.__class__(self.app,
environ=self.request.environ,
request=self.request
)
def match_request(self):
"""Can be overridden by a subclass to hook into the matching
of the request.
"""
try:
url_rule, self.request.view_args = \
self.url_adapter.match(return_rule=True)
self.request.url_rule = url_rule
except HTTPException as e:
self.request.routing_exception = e
def push(self):
"""Binds the request context to the current context."""
# If an exception occurs in debug mode or if context preservation is
# activated under exception situations exactly one context stays
# on the stack. The rationale is that you want to access that
# information under debug situations. However if someone forgets to
# pop that context again we want to make sure that on the next push
# it's invalidated, otherwise we run at risk that something leaks
# memory. This is usually only a problem in test suite since this
# functionality is not active in production environments.
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop(top._preserved_exc)
# Before we push the request context we have to ensure that there
# is an application context.
app_ctx = _app_ctx_stack.top
if app_ctx is None or app_ctx.app != self.app:
app_ctx = self.app.app_context()
app_ctx.push()
self._implicit_app_ctx_stack.append(app_ctx)
else:
self._implicit_app_ctx_stack.append(None)
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
_request_ctx_stack.push(self)
# Open the session at the moment that the request context is
# available. This allows a custom open_session method to use the
# request context (e.g. code that access database information
# stored on `g` instead of the appcontext).
self.session = self.app.open_session(self.request)
if self.session is None:
self.session = self.app.make_null_session()
def pop(self, exc=_sentinel):
"""Pops the request context and unbinds it by doing that. This will
also trigger the execution of functions registered by the
:meth:`~flask.Flask.teardown_request` decorator.
.. versionchanged:: 0.9
Added the `exc` argument.
"""
app_ctx = self._implicit_app_ctx_stack.pop()
try:
clear_request = False
if not self._implicit_app_ctx_stack:
self.preserved = False
self._preserved_exc = None
if exc is _sentinel:
exc = sys.exc_info()[1]
self.app.do_teardown_request(exc)
# If this interpreter supports clearing the exception information
# we do that now. This will only go into effect on Python 2.x,
# on 3.x it disappears automatically at the end of the exception
# stack.
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
request_close = getattr(self.request, 'close', None)
if request_close is not None:
request_close()
clear_request = True
finally:
rv = _request_ctx_stack.pop()
# get rid of circular dependencies at the end of the request
# so that we don't require the GC to be active.
if clear_request:
rv.request.environ['werkzeug.request'] = None
# Get rid of the app as well if necessary.
if app_ctx is not None:
app_ctx.pop(exc)
assert rv is self, 'Popped wrong request context. ' \
'(%r instead of %r)' % (rv, self)
def auto_pop(self, exc):
if self.request.environ.get('flask._preserve_context') or \
(exc is not None and self.app.preserve_context_on_exception):
self.preserved = True
self._preserved_exc = exc
else:
self.pop(exc)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
# do not pop the request stack if we are in debug mode and an
# exception happened. This will allow the debugger to still
# access the request object in the interactive shell. Furthermore
# the context can be force kept alive for the test client.
# See flask.testing for how this works.
self.auto_pop(exc_value)
if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None:
reraise(exc_type, exc_value, tb)
def __repr__(self):
return '<%s \'%s\' [%s] of %s>' % (
self.__class__.__name__,
self.request.url,
self.request.method,
self.app.name,
)
| mit |
nuigroup/pymt-widgets | examples/apps/svg/svg2.py | 2 | 3693 | # -*- coding: utf-8 -*-
from pymt import *
# PYMT Plugin integration
IS_PYMT_PLUGIN = True
PLUGIN_TITLE = 'SVG Viewer'
PLUGIN_AUTHOR = 'Nathanaël Lécaudé'
PLUGIN_DESCRIPTION = 'This is an example of Scalable Vector Graphics using the Squirtle library for pyglet.'
svgdata = """<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 13.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 14948) -->
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
<svg version="1.0" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
width="128px" height="128px" viewBox="0 0 128 128" enable-background="new 0 0 128 128" xml:space="preserve">
<polygon fill="#F1E323" points="26.318,46.295 64.821,19.125 110.014,42.609 112.881,96.541 67.961,111.15 26.045,92.309 "/>
<polygon fill="#F8F191" points="64.957,22.128 108.238,43.974 110.969,95.312 99.637,50.392 "/>
<polyline points="82.844,51.822 77.109,44.931 73.285,53.273 78.809,57.082 "/>
<polyline points="59.072,54.47 52.683,47.113 48.422,56.019 54.576,60.084 "/>
<polygon points="45.979,75.24 44.887,70.871 39.016,79.201 44.477,78.518 59.223,89.441 77.246,90.807 92.674,77.973 96.77,77.152
90.898,70.734 90.217,75.787 76.836,87.529 59.769,86.164 "/>
<polygon fill="#404040" points="52.942,48.89 57.584,54.215 53.352,51.757 "/>
<polygon fill="#404040" points="77.109,46.159 81.479,51.757 77.518,49.846 "/>
<polygon fill="#404040" points="44.478,72.783 45.569,75.924 58.95,86.301 44.341,76.469 "/>
<polygon fill="#404040" points="91.172,71.691 95.814,76.742 92.4,74.967 "/>
<polyline fill="#CFC31E" points="27.547,47.115 27.273,91.354 68.098,109.785 30.687,89.168 "/>
<polygon fill="#F1E323" points="116.173,58.063 145.789,54.59 147.251,77.99 119.646,77.259 "/>
<polygon fill="#F1E323" points="103.376,31.92 80.341,19.854 96.063,-3.363 114.893,5.778 "/>
<polygon fill="#F1E323" points="53.833,21.134 33.723,-6.288 10.688,7.24 27.507,33.383 "/>
<polygon fill="#F1E323" points="21.292,58.611 -11.25,57.697 -11.25,80.549 19.281,79.817 "/>
<polygon fill="#F1E323" points="28.604,98.647 13.065,124.059 34.637,133.383 52.736,110.348 "/>
<polygon fill="#F1E323" points="84.18,110.348 104.473,103.035 113.979,130.092 92.955,133.748 "/>
<polygon fill="#F8F191" points="96.978,-1.352 112.882,6.875 102.827,29.727 108.86,7.789 "/>
<polygon fill="#F8F191" points="118.55,59.342 144.326,56.6 145.789,76.161 141.401,60.622 "/>
<polygon fill="#F8F191" points="33.906,-3.729 51.09,20.585 38.111,11.262 "/>
<polygon fill="#F8F191" points="-9.056,59.891 19.098,60.622 18.001,77.989 15.807,63.364 "/>
<polygon fill="#F8F191" points="103.741,105.778 112.15,128.63 104.29,117.844 "/>
<polygon fill="#F8F191" points="29.518,101.757 48.714,111.812 35.368,130.458 44.326,113.64 "/>
<polygon fill="#CFC31E" points="95.5,1 83.75,18.5 101.5,28.75 87.75,18.25 "/>
<polygon fill="#CFC31E" points="118.75,62.25 121,76 143.5,76.25 123.25,74 "/>
<polygon fill="#CFC31E" points="85.75,111.25 93.5,132.75 91.5,120 "/>
<polygon fill="#CFC31E" points="27.75,103 14.75,123.5 32.5,131.5 18.25,122.75 "/>
<polygon fill="#CFC31E" points="16.25,78.5 -10.5,79.25 -10,60.5 -8.75,77 "/>
<polygon fill="#CFC31E" points="28.25,31.25 13,8 23.75,17.5 "/>
</svg>""";
def pymt_plugin_activate(w, ctx):
sun = MTScatterSvg(filename = 'sun', pos = (200,200), rawdata=svgdata)
ctx.c = MTKinetic()
ctx.c.add_widget(sun)
w.add_widget(ctx.c)
def pymt_plugin_deactivate(w, ctx):
w.remove_widget(ctx.c)
if __name__ == '__main__':
w = MTWindow()
ctx = MTContext()
pymt_plugin_activate(w, ctx)
runTouchApp()
pymt_plugin_deactivate(w, ctx)
| lgpl-3.0 |
nailor/tyy-vaali | aanikone/votechecker/models.py | 1 | 6819 | from django.db import models
from django.utils.translation import ugettext as _
from django.contrib.auth.models import User
from aanikone.utils import now
PAPERVOTE = 1
ELECTRONIC = 2
PRE_ELECTRONIC = 3
class Election(models.Model):
"""Election model, automatically generated from WebVoter database"""
name = models.TextField(primary_key=True)
password = models.TextField()
authurl = models.TextField()
isopen = models.BooleanField()
production = models.BooleanField()
ispublic = models.BooleanField()
firstpassword = models.BooleanField()
secondpassword = models.BooleanField()
stv = models.BooleanField()
government = models.BooleanField()
toelect = models.IntegerField()
class Meta:
db_table = u'election'
class Person(models.Model):
"""Person model.
This is automatically generated from WebVoter database. WebVoter
uses Person's hasvoted field to check if a person has voted already.
WebVoter handles adding persons, the only thing the checker should
do is to ensure that the person has not voted with either of the
accounts (if applicable, see below) and mark the hasvoted field
for the account(s).
Special notes for election of 2009
----------------------------------
Election has students from two universities, University of Turku
and Turku School of Economics. That's why the hetu (social
security number) field is required in the model. This field can be
removed later. It is used for finding students that are in both
universities.
"""
id = models.IntegerField(primary_key=True)
electionname = models.ForeignKey(Election, db_column='electionname')
personnumber = models.TextField()
organization = models.TextField()
lastname = models.TextField(null=True)
firstname = models.TextField(null=True)
emailaddress = models.TextField(null=True)
address = models.TextField(null=True)
city = models.TextField(null=True)
zipcode = models.TextField(null=True)
votedate = models.DateTimeField(null=True)
hasvoted = models.BooleanField()
votestyle = models.IntegerField()
password = models.TextField(null=True)
hetu = models.TextField()
def __unicode__(self):
try:
firstnames = self.firstname.split()
fn = ' '.join([x.capitalize() for x in firstnames])
return u'%s, %s' % (self.lastname.capitalize(), fn)
except AttributeError:
# Name missing?!
return unicode(self.personnumber)
def check_vote(self):
"""Check if the person has voted.
Returns True of any instance of the person has voted. False
otherwise.
"""
objs = Person.objects.filter(
hetu__exact=self.hetu,
hasvoted__exact=True)
if self.hasvoted or objs.count():
return True
return False
def vote(self):
"""Mark person (and all duplicates) voted.
Note: this does not check if the person has already voted. Use
Person.check_vote for that.
"""
objs = Person.objects.filter(hetu=self.hetu)
try:
for p in objs:
# Votes set this way are paper votes
p.votestyle = PAPERVOTE
p.hasvoted = True
p.votedate = now()
p.save()
finally:
# Just a security measure: Mark this user as voted in the
# end (even if something fails), the check_vote should then
# return False later on.
self.votestyle = PAPERVOTE
self.hasvoted = True
self.votedate = now()
self.save()
def get_ticket(self):
objs = Person.objects.filter(hetu=self.hetu)
for p in objs:
if p.ticket_set.all().count() > 0:
return p.ticket_set.all()
return None
def give_slip(self, place, user):
"""Give person a voting slip and mark person as voted.
This prevents the electronical voting from working. Votestyle
is not changed, that is used to determine (in addition to the
Ticket entry) that the person has a slip, but has not yet
returned it.
"""
objs = Person.objects.filter(hetu=self.hetu)
try:
for p in objs:
# Votes set this way are paper votes
p.hasvoted = True
p.save()
finally:
# Just a security measure: Mark this user as voted in the
# end (even if something fails), the check_vote should then
# return False later on.
self.hasvoted = True
self.save()
t = Ticket(
voter=self,
release_place=place,
releaser=user,
)
t.save()
class Meta:
db_table = u'person'
ordering = ['lastname', 'firstname']
class Place(models.Model):
name = models.CharField(_(u'name'), max_length=500)
description = models.TextField(_(u'description'), null=True, blank=True)
def __unicode__(self):
return self.name
def find_open_tickets(self):
return self.released_tickets.filter(
submit_place=None,
submit_time=None
)
class Meta:
verbose_name = _(u'voting place')
verbose_name_plural = _(u'voting places')
ordering = ['name',]
class Ticket(models.Model):
voter = models.ForeignKey(Person,
verbose_name=_(u'voter'),
unique=True,
to_field='id')
release_place = models.ForeignKey(Place,
verbose_name=_(u'release place'),
related_name='released_tickets')
release_time = models.DateTimeField(_(u'release time'),
default=now)
releaser = models.ForeignKey(User, related_name='releaser')
submit_time = models.DateTimeField(_(u'submit time'), null=True)
submit_place = models.ForeignKey(Place,
verbose_name=_(u'submit place'),
related_name='submitted_tickets',
null=True)
submitter = models.ForeignKey(User, related_name='submitter', null=True,
blank=True)
def __unicode__(self):
return u'%s, %s (%s)' % (str(self.voter).decode('utf-8'),
self.release_place,
self.release_time)
class Meta:
verbose_name = _(u'ticket')
verbose_name_plural = _(u'tickets')
ordering = ['release_time',]
| gpl-3.0 |
jbaayen/sympy | sympy/thirdparty/__init__.py | 10 | 1047 | """Thirdparty Packages for internal use.
"""
import sys
import os
def import_thirdparty(lib):
"""
Imports a thirdparty package "lib" by setting all paths correctly.
At the moment, there is only the "pyglet" library, so we just put
pyglet to sys.path temporarily, then import "lib" and then restore the path.
With more packages, we'll just put them to sys.path as well.
"""
seen = set()
def new_import(name, globals={}, locals={}, fromlist=[]):
if name in seen:
return old_import(name, globals, locals, fromlist)
seen.add(name)
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname( \
__file__)), "pyglet"))
try:
m = old_import(name, globals, locals, fromlist)
finally:
del sys.path[0]
return m
import __builtin__
old_import = __builtin__.__import__
__builtin__.__import__ = new_import
try:
m = __import__(lib)
finally:
__builtin__.__import__ = old_import
return m
| bsd-3-clause |
elainekmao/hiphoptextanalysis | lyricwiki-scraper/lyricwiki/spiders/mastakilla_spider.py | 1 | 1143 | from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import Selector
from lyricwiki.items import LyricWikiItem
class LyricWikiSpider(CrawlSpider):
name = "mastakilla" #CHANGE NAME
allowed_domains = ["lyrics.wikia.com"]
start_urls = [
"http://lyrics.wikia.com/Masta_Killa", #CHANGE URL
]
rules = ( #CHANGE REGEX
Rule(SgmlLinkExtractor(allow=('/Masta_Killa.*',),restrict_xpaths=('//ol/li',)), callback='parse_item', follow=True),
)
def parse_item(self, response):
sel = Selector(response)
info = sel.xpath('//div[@class="mw-content-ltr"]')
item = LyricWikiItem()
item['title'] = sel.xpath('//header[@id="WikiaPageHeader"]/h1/text()').extract()
item['artist'] = info.xpath('b/a/text()').extract()
item['album'] = info.xpath('i/a/text()').extract()
item['lyrics'] = sel.xpath('//div[@class="lyricbox"]/text()').extract()
return item | gpl-2.0 |
wanglongqi/sympy | doc/ext/sympylive.py | 104 | 1289 | """
sympylive
~~~~~~~~~
Allow `SymPy Live <http://live.sympy.org/>`_ to be used for interactive
evaluation of SymPy's code examples.
:copyright: Copyright 2014 by the SymPy Development Team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
def builder_inited(app):
if not app.config.sympylive_url:
raise ExtensionError('sympylive_url config value must be set'
' for the sympylive extension to work')
app.add_javascript(app.config.sympylive_url + '/static/utilities.js')
app.add_javascript(app.config.sympylive_url + '/static/external/classy.js')
app.add_stylesheet(app.config.sympylive_url + '/static/live-core.css')
app.add_stylesheet(app.config.sympylive_url +
'/static/live-autocomplete.css')
app.add_stylesheet(app.config.sympylive_url + '/static/live-sphinx.css')
app.add_javascript(app.config.sympylive_url + '/static/live-core.js')
app.add_javascript(app.config.sympylive_url +
'/static/live-autocomplete.js')
app.add_javascript(app.config.sympylive_url + '/static/live-sphinx.js')
def setup(app):
app.add_config_value('sympylive_url', 'http://live.sympy.org', False)
app.connect('builder-inited', builder_inited)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.