text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
Created on Tue Apr 28 23:15:29 2015
@author: ddboline
"""
import os
import matplotlib
matplotlib.use('Agg')
import pylab as pl
from pandas.tools.plotting import scatter_matrix
def create_html_page_of_plots(list_of_plots, prefix='html'):
"""
create html page with png files
"""
if not os.path.exists(prefix):
os.makedirs(prefix)
os.system('mv *.png %s' % prefix)
#print(list_of_plots)
idx = 0
htmlfile = open('%s/index_0.html' % prefix, 'w')
htmlfile.write('<!DOCTYPE html><html><body><div>\n')
for plot in list_of_plots:
if idx > 0 and idx % 200 == 0:
htmlfile.write('</div></html></html>\n')
htmlfile.close()
htmlfile = open('%s/index_%d.html' % (prefix, (idx//200)), 'w')
htmlfile.write('<!DOCTYPE html><html><body><div>\n')
htmlfile.write('<p><img src="%s"></p>\n' % plot)
idx += 1
htmlfile.write('</div></html></html>\n')
htmlfile.close()
def plot_data(indf, prefix='html'):
"""
create scatter matrix plot, histograms
"""
list_of_plots = []
column_groups = []
for idx in range(0, len(indf.columns), 3):
print len(indf.columns), idx, (idx+3)
column_groups.append(indf.columns[idx:(idx+3)])
for idx in range(len(column_groups)):
for idy in range(0, idx):
if idx == idy:
continue
print column_groups[idx]+column_groups[idy]
pl.clf()
scatter_matrix(indf[column_groups[idx]+column_groups[idy]])
pl.savefig('scatter_matrix_%d_%d.png' % (idx, idy))
list_of_plots.append('scatter_matrix_%d_%d.png' % (idx, idy))
pl.close()
for col in indf:
pl.clf()
print col
indf[col].hist(histtype='step', normed=True)
pl.title(col)
pl.savefig('%s_hist.png' % col)
list_of_plots.append('%s_hist.png' % col)
create_html_page_of_plots(list_of_plots, prefix)
return
|
{
"content_hash": "ed205d5c72434c215d415744b2619cc8",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 75,
"avg_line_length": 30.753846153846155,
"alnum_prop": 0.5652826413206603,
"repo_name": "abhishek-ch/kaggle_facebook_recruiting_human_or_bot",
"id": "86f7b5c4f36b9ae477913e8785d3c2e7890cb5df",
"size": "2041",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plot_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10178"
}
],
"symlink_target": ""
}
|
import re
import uuid
from xml.dom import minidom
from eventlet import greenthread
from lxml import etree
import mock
from mox3 import mox
from oslo_concurrency.fixture import lockutils as lock_fixture
from nova.compute import utils as compute_utils
from nova import exception
from nova.network import linux_net
from nova import objects
from nova import test
from nova.tests.unit import fake_network
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova.virt.libvirt import firewall
from nova.virt.libvirt import host
from nova.virt import netutils
_fake_network_info = fake_network.fake_get_instance_nw_info
_fake_stub_out_get_nw_info = fake_network.stub_out_nw_api_get_instance_nw_info
_ipv4_like = fake_network.ipv4_like
class NWFilterFakes(object):
def __init__(self):
self.filters = {}
def nwfilterLookupByName(self, name):
if name in self.filters:
return self.filters[name]
raise fakelibvirt.libvirtError('Filter Not Found')
def filterDefineXMLMock(self, xml):
class FakeNWFilterInternal(object):
def __init__(self, parent, name, u, xml):
self.name = name
self.uuid = u
self.parent = parent
self.xml = xml
def XMLDesc(self, flags):
return self.xml
def undefine(self):
del self.parent.filters[self.name]
tree = etree.fromstring(xml)
name = tree.get('name')
u = tree.find('uuid')
if u is None:
u = uuid.uuid4().hex
else:
u = u.text
if name not in self.filters:
self.filters[name] = FakeNWFilterInternal(self, name, u, xml)
else:
if self.filters[name].uuid != u:
raise fakelibvirt.libvirtError(
"Mismatching name '%s' with uuid '%s' vs '%s'"
% (name, self.filters[name].uuid, u))
self.filters[name].xml = xml
return True
class IptablesFirewallTestCase(test.NoDBTestCase):
def setUp(self):
super(IptablesFirewallTestCase, self).setUp()
self.useFixture(lock_fixture.ExternalLockFixture())
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.fw = firewall.IptablesFirewallDriver(
host=host.Host("qemu:///system"))
in_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
'# Completed on Tue Dec 18 15:50:25 2012',
'# Generated by iptables-save v1.4.12 on Tue Dec 18 15:50:25 201;',
'*mangle',
':PREROUTING ACCEPT [241:39722]',
':INPUT ACCEPT [230:39282]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [266:26558]',
':POSTROUTING ACCEPT [267:26590]',
'-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM '
'--checksum-fill',
'COMMIT',
'# Completed on Tue Dec 18 15:50:25 2012',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def _create_instance_ref(self,
uuid="74526555-9166-4893-a203-126bdcab0d67"):
inst = objects.Instance(
id=7,
uuid=uuid,
user_id="fake",
project_id="fake",
image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6',
instance_type_id=1)
inst.info_cache = objects.InstanceInfoCache()
inst.info_cache.deleted = False
return inst
@mock.patch.object(objects.InstanceList, "get_by_security_group_id")
@mock.patch.object(objects.SecurityGroupRuleList, "get_by_instance")
def test_static_filters(self, mock_secrule, mock_instlist):
UUID = "2674993b-6adb-4733-abd9-a7c10cc1f146"
SRC_UUID = "0e0a76b2-7c52-4bc0-9a60-d83017e42c1a"
instance_ref = self._create_instance_ref(UUID)
src_instance_ref = self._create_instance_ref(SRC_UUID)
secgroup = objects.SecurityGroup(id=1,
user_id='fake',
project_id='fake',
name='testgroup',
description='test group')
src_secgroup = objects.SecurityGroup(id=2,
user_id='fake',
project_id='fake',
name='testsourcegroup',
description='src group')
r1 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
protocol='icmp',
from_port=-1,
to_port=-1,
cidr='192.168.11.0/24',
grantee_group=None)
r2 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
protocol='icmp',
from_port=8,
to_port=-1,
cidr='192.168.11.0/24',
grantee_group=None)
r3 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
protocol='tcp',
from_port=80,
to_port=81,
cidr='192.168.10.0/24',
grantee_group=None)
r4 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
protocol='tcp',
from_port=80,
to_port=81,
cidr=None,
grantee_group=src_secgroup,
group_id=src_secgroup['id'])
r5 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
protocol=None,
cidr=None,
grantee_group=src_secgroup,
group_id=src_secgroup['id'])
secgroup_list = objects.SecurityGroupList()
secgroup_list.objects.append(secgroup)
src_secgroup_list = objects.SecurityGroupList()
src_secgroup_list.objects.append(src_secgroup)
instance_ref.security_groups = secgroup_list
src_instance_ref.security_groups = src_secgroup_list
mock_secrule.return_value = objects.SecurityGroupRuleList(
objects=[r1, r2, r3, r4, r5])
def _fake_instlist(ctxt, id):
if id == src_secgroup['id']:
insts = objects.InstanceList()
insts.objects.append(src_instance_ref)
return insts
else:
insts = objects.InstanceList()
insts.objects.append(instance_ref)
return insts
mock_instlist.side_effect = _fake_instlist
def fake_iptables_execute(*cmd, **kwargs):
process_input = kwargs.get('process_input', None)
if cmd == ('ip6tables-save', '-c'):
return '\n'.join(self.in6_filter_rules), None
if cmd == ('iptables-save', '-c'):
return '\n'.join(self.in_rules), None
if cmd == ('iptables-restore', '-c'):
lines = process_input.split('\n')
if '*filter' in lines:
self.out_rules = lines
return '', ''
if cmd == ('ip6tables-restore', '-c',):
lines = process_input.split('\n')
if '*filter' in lines:
self.out6_rules = lines
return '', ''
network_model = _fake_network_info(self, 1)
linux_net.iptables_manager.execute = fake_iptables_execute
self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
lambda instance: network_model)
self.fw.prepare_instance_filter(instance_ref, network_model)
self.fw.apply_instance_filter(instance_ref, network_model)
in_rules = filter(lambda l: not l.startswith('#'),
self.in_rules)
for rule in in_rules:
if 'nova' not in rule:
self.assertIn(rule, self.out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self.out_rules:
# This is pretty crude, but it'll do for now
# last two octets change
if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self.out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp '
'-s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp '
'--icmp-type 8 -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
for ip in network_model.fixed_ips():
if ip['version'] != 4:
continue
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp -m multiport '
'--dports 80:81 -s %s' % ip['address'])
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -s '
'%s' % ip['address'])
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"Protocol/port-less acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp '
'-m multiport --dports 80:81 -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
def test_filters_for_instance_with_ip_v6(self):
self.flags(use_ipv6=True)
network_info = _fake_network_info(self, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEqual(len(rulesv4), 2)
self.assertEqual(len(rulesv6), 1)
def test_filters_for_instance_without_ip_v6(self):
self.flags(use_ipv6=False)
network_info = _fake_network_info(self, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEqual(len(rulesv4), 2)
self.assertEqual(len(rulesv6), 0)
@mock.patch.object(objects.SecurityGroupRuleList, "get_by_instance")
def test_multinic_iptables(self, mock_secrule):
mock_secrule.return_value = objects.SecurityGroupRuleList()
ipv4_rules_per_addr = 1
ipv4_addr_per_network = 2
ipv6_rules_per_addr = 1
ipv6_addr_per_network = 1
networks_count = 5
instance_ref = self._create_instance_ref()
instance_ref.security_groups = objects.SecurityGroupList()
network_info = _fake_network_info(self, networks_count,
ipv4_addr_per_network)
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
# Extra rules are for the DHCP request
rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
networks_count) + 2
self.assertEqual(ipv4_network_rules, rules)
self.assertEqual(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
instance_ref = self._create_instance_ref()
self.mox.StubOutWithMock(self.fw,
'instance_rules')
self.mox.StubOutWithMock(self.fw,
'add_filters_for_instance',
use_mock_anything=True)
self.mox.StubOutWithMock(self.fw.iptables.ipv4['filter'],
'has_chain')
self.fw.instance_rules(instance_ref,
mox.IgnoreArg()).AndReturn((None, None))
self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.fw.instance_rules(instance_ref,
mox.IgnoreArg()).AndReturn((None, None))
self.fw.iptables.ipv4['filter'].has_chain(mox.IgnoreArg()
).AndReturn(True)
self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.fw.prepare_instance_filter(instance_ref, mox.IgnoreArg())
self.fw.instance_info[instance_ref['id']] = (instance_ref, None)
self.fw.do_refresh_security_group_rules("fake")
def test_do_refresh_security_group_rules_instance_gone(self):
instance1 = objects.Instance(None, id=1, uuid='fake-uuid1')
instance2 = objects.Instance(None, id=2, uuid='fake-uuid2')
self.fw.instance_info = {1: (instance1, 'netinfo1'),
2: (instance2, 'netinfo2')}
mock_filter = mock.MagicMock()
with mock.patch.dict(self.fw.iptables.ipv4, {'filter': mock_filter}):
mock_filter.has_chain.return_value = False
with mock.patch.object(self.fw, 'instance_rules') as mock_ir:
mock_ir.return_value = (None, None)
self.fw.do_refresh_security_group_rules('secgroup')
self.assertEqual(2, mock_ir.call_count)
# NOTE(danms): Make sure that it is checking has_chain each time,
# continuing to process all the instances, and never adding the
# new chains back if has_chain() is False
mock_filter.has_chain.assert_has_calls([mock.call('inst-1'),
mock.call('inst-2')],
any_order=True)
self.assertEqual(0, mock_filter.add_chain.call_count)
@mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName")
@mock.patch.object(fakelibvirt.virConnect, "nwfilterDefineXML")
@mock.patch.object(objects.InstanceList, "get_by_security_group_id")
@mock.patch.object(objects.SecurityGroupRuleList, "get_by_instance")
def test_unfilter_instance_undefines_nwfilter(self,
mock_secrule,
mock_instlist,
mock_define,
mock_lookup):
fakefilter = NWFilterFakes()
mock_lookup.side_effect = fakefilter.nwfilterLookupByName
mock_define.side_effect = fakefilter.filterDefineXMLMock
instance_ref = self._create_instance_ref()
instance_ref.security_groups = objects.SecurityGroupList()
mock_secrule.return_value = objects.SecurityGroupRuleList()
network_info = _fake_network_info(self, 1)
self.fw.setup_basic_filtering(instance_ref, network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
original_filter_count = len(fakefilter.filters)
self.fw.unfilter_instance(instance_ref, network_info)
# should undefine just the instance filter
self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
@mock.patch.object(firewall, 'libvirt', fakelibvirt)
class NWFilterTestCase(test.NoDBTestCase):
def setUp(self):
super(NWFilterTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.fw = firewall.NWFilterFirewall(host=host.Host("qemu:///system"))
def _create_security_group(self, instance_ref):
secgroup = objects.SecurityGroup(id=1,
user_id='fake',
project_id='fake',
name='testgroup',
description='test group description')
secgroup_list = objects.SecurityGroupList()
secgroup_list.objects.append(secgroup)
instance_ref.security_groups = secgroup_list
return secgroup
def _create_instance(self):
inst = objects.Instance(
id=7,
uuid="74526555-9166-4893-a203-126bdcab0d67",
user_id="fake",
project_id="fake",
image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6',
instance_type_id=1)
inst.info_cache = objects.InstanceInfoCache()
inst.info_cache.deleted = False
return inst
@mock.patch.object(fakelibvirt.virConnect, "nwfilterDefineXML")
def test_creates_base_rule_first(self, mock_define):
# These come pre-defined by libvirt
self.defined_filters = ['no-mac-spoofing',
'no-ip-spoofing',
'no-arp-spoofing',
'allow-dhcp-server']
self.recursive_depends = {}
for f in self.defined_filters:
self.recursive_depends[f] = []
def fake_define(xml):
dom = minidom.parseString(xml)
name = dom.firstChild.getAttribute('name')
self.recursive_depends[name] = []
for f in dom.getElementsByTagName('filterref'):
ref = f.getAttribute('filter')
self.assertIn(ref, self.defined_filters,
('%s referenced filter that does ' +
'not yet exist: %s') % (name, ref))
dependencies = [ref] + self.recursive_depends[ref]
self.recursive_depends[name] += dependencies
self.defined_filters.append(name)
return True
mock_define.side_effect = fake_define
instance_ref = self._create_instance()
self._create_security_group(instance_ref)
def _ensure_all_called(mac, allow_dhcp):
instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
mac.translate({ord(':'): None}))
requiredlist = ['no-arp-spoofing', 'no-ip-spoofing',
'no-mac-spoofing']
required_not_list = []
if allow_dhcp:
requiredlist.append('allow-dhcp-server')
else:
required_not_list.append('allow-dhcp-server')
for required in requiredlist:
self.assertIn(required,
self.recursive_depends[instance_filter],
"Instance's filter does not include %s" %
required)
for required_not in required_not_list:
self.assertNotIn(required_not,
self.recursive_depends[instance_filter],
"Instance filter includes %s" % required_not)
network_info = _fake_network_info(self, 1)
# since there is one (network_info) there is one vif
# pass this vif's mac to _ensure_all_called()
# to set the instance_filter properly
mac = network_info[0]['address']
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
self.fw.setup_basic_filtering(instance_ref, network_info)
allow_dhcp = True
_ensure_all_called(mac, allow_dhcp)
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = None
self.fw.setup_basic_filtering(instance_ref, network_info)
allow_dhcp = False
_ensure_all_called(mac, allow_dhcp)
@mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName")
@mock.patch.object(fakelibvirt.virConnect, "nwfilterDefineXML")
def test_unfilter_instance_undefines_nwfilters(self,
mock_define,
mock_lookup):
fakefilter = NWFilterFakes()
mock_lookup.side_effect = fakefilter.nwfilterLookupByName
mock_define.side_effect = fakefilter.filterDefineXMLMock
instance_ref = self._create_instance()
self._create_security_group(instance_ref)
network_info = _fake_network_info(self, 1)
self.fw.setup_basic_filtering(instance_ref, network_info)
original_filter_count = len(fakefilter.filters)
self.fw.unfilter_instance(instance_ref, network_info)
self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
@mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName")
@mock.patch.object(greenthread, 'sleep')
def test_unfilter_instance_retry_and_error(self, mock_sleep, mock_lookup):
# Tests that we try to undefine the network filter when it's in use
# until we hit a timeout. We try two times and sleep once in between.
self.flags(live_migration_retry_count=2)
in_use = fakelibvirt.libvirtError('nwfilter is in use')
in_use.err = (fakelibvirt.VIR_ERR_OPERATION_INVALID,)
mock_undefine = mock.Mock(side_effect=in_use)
fakefilter = mock.MagicMock(undefine=mock_undefine)
mock_lookup.return_value = fakefilter
instance_ref = self._create_instance()
network_info = _fake_network_info(self, 1)
self.assertRaises(fakelibvirt.libvirtError, self.fw.unfilter_instance,
instance_ref, network_info)
self.assertEqual(2, mock_lookup.call_count)
self.assertEqual(2, mock_undefine.call_count)
mock_sleep.assert_called_once_with(1)
@mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName")
@mock.patch.object(greenthread, 'sleep')
def test_unfilter_instance_retry_not_found(self, mock_sleep, mock_lookup):
# Tests that we exit if the nw filter is not found.
in_use = fakelibvirt.libvirtError('nwfilter is in use')
in_use.err = (fakelibvirt.VIR_ERR_OPERATION_INVALID,)
not_found = fakelibvirt.libvirtError('no nwfilter with matching name')
not_found.err = (fakelibvirt.VIR_ERR_NO_NWFILTER,)
mock_undefine = mock.Mock(side_effect=(in_use, not_found))
fakefilter = mock.MagicMock(undefine=mock_undefine)
mock_lookup.return_value = fakefilter
instance_ref = self._create_instance()
network_info = _fake_network_info(self, 1)
self.fw.unfilter_instance(instance_ref, network_info)
self.assertEqual(2, mock_lookup.call_count)
self.assertEqual(2, mock_undefine.call_count)
mock_sleep.assert_called_once_with(1)
@mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName")
@mock.patch.object(greenthread, 'sleep')
def test_unfilter_instance_retry_and_pass(self, mock_sleep, mock_lookup):
# Tests that we retry on in-use error but pass if undefine() works
# while looping.
in_use = fakelibvirt.libvirtError('nwfilter is in use')
in_use.err = (fakelibvirt.VIR_ERR_OPERATION_INVALID,)
mock_undefine = mock.Mock(side_effect=(in_use, None))
fakefilter = mock.MagicMock(undefine=mock_undefine)
mock_lookup.return_value = fakefilter
instance_ref = self._create_instance()
network_info = _fake_network_info(self, 1)
self.fw.unfilter_instance(instance_ref, network_info)
self.assertEqual(2, mock_lookup.call_count)
self.assertEqual(2, mock_undefine.call_count)
mock_sleep.assert_called_once_with(1)
def test_redefining_nwfilters(self):
fakefilter = NWFilterFakes()
self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
instance_ref = self._create_instance()
self._create_security_group(instance_ref)
network_info = _fake_network_info(self, 1)
self.fw.setup_basic_filtering(instance_ref, network_info)
self.fw.setup_basic_filtering(instance_ref, network_info)
@mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName")
@mock.patch.object(fakelibvirt.virConnect, "nwfilterDefineXML")
def test_nwfilter_parameters(self,
mock_define,
mock_lookup):
fakefilter = NWFilterFakes()
mock_lookup.side_effect = fakefilter.nwfilterLookupByName
mock_define.side_effect = fakefilter.filterDefineXMLMock
instance_ref = self._create_instance()
self._create_security_group(instance_ref)
network_info = _fake_network_info(self, 1)
self.fw.setup_basic_filtering(instance_ref, network_info)
vif = network_info[0]
nic_id = vif['address'].replace(':', '')
instance_filter_name = self.fw._instance_filter_name(instance_ref,
nic_id)
f = fakefilter.nwfilterLookupByName(instance_filter_name)
tree = etree.fromstring(f.xml)
for fref in tree.findall('filterref'):
parameters = fref.findall('./parameter')
for parameter in parameters:
subnet_v4, subnet_v6 = vif['network']['subnets']
if parameter.get('name') == 'IP':
self.assertTrue(_ipv4_like(parameter.get('value'),
'192.168'))
elif parameter.get('name') == 'DHCPSERVER':
dhcp_server = subnet_v4.get('dhcp_server')
self.assertEqual(parameter.get('value'), dhcp_server)
elif parameter.get('name') == 'RASERVER':
ra_server = subnet_v6['gateway']['address'] + "/128"
self.assertEqual(parameter.get('value'), ra_server)
elif parameter.get('name') == 'PROJNET':
ipv4_cidr = subnet_v4['cidr']
net, mask = netutils.get_net_and_mask(ipv4_cidr)
self.assertEqual(parameter.get('value'), net)
elif parameter.get('name') == 'PROJMASK':
ipv4_cidr = subnet_v4['cidr']
net, mask = netutils.get_net_and_mask(ipv4_cidr)
self.assertEqual(parameter.get('value'), mask)
elif parameter.get('name') == 'PROJNET6':
ipv6_cidr = subnet_v6['cidr']
net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
self.assertEqual(parameter.get('value'), net)
elif parameter.get('name') == 'PROJMASK6':
ipv6_cidr = subnet_v6['cidr']
net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
self.assertEqual(parameter.get('value'), prefix)
else:
raise exception.InvalidParameterValue('unknown parameter '
'in filter')
@mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName")
@mock.patch.object(fakelibvirt.virConnect, "nwfilterDefineXML")
def test_multinic_base_filter_selection(self,
mock_define,
mock_lookup):
fakefilter = NWFilterFakes()
mock_lookup.side_effect = fakefilter.nwfilterLookupByName
mock_define.side_effect = fakefilter.filterDefineXMLMock
instance_ref = self._create_instance()
self._create_security_group(instance_ref)
network_info = _fake_network_info(self, 2)
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
self.fw.setup_basic_filtering(instance_ref, network_info)
def assert_filterref(instance, vif, expected=None):
expected = expected or []
nic_id = vif['address'].replace(':', '')
filter_name = self.fw._instance_filter_name(instance, nic_id)
f = fakefilter.nwfilterLookupByName(filter_name)
tree = etree.fromstring(f.xml)
frefs = [fr.get('filter') for fr in tree.findall('filterref')]
self.assertEqual(set(expected), set(frefs))
assert_filterref(instance_ref, network_info[0],
expected=['nova-base'])
assert_filterref(instance_ref, network_info[1],
expected=['nova-nodhcp'])
@mock.patch.object(firewall.LOG, 'debug')
def test_get_filter_uuid_unicode_exception_logging(self, debug):
with mock.patch.object(self.fw._conn, 'nwfilterLookupByName') as look:
look.side_effect = fakelibvirt.libvirtError(u"\U0001F4A9")
self.fw._get_filter_uuid('test')
self.assertEqual(2, debug.call_count)
self.assertEqual(u"Cannot find UUID for filter '%(name)s': '%(e)s'",
debug.call_args_list[0][0][0])
|
{
"content_hash": "e7600c89d90ed08bbbe69f66218d8201",
"timestamp": "",
"source": "github",
"line_count": 699,
"max_line_length": 79,
"avg_line_length": 45.464949928469245,
"alnum_prop": 0.5571428571428572,
"repo_name": "HybridF5/nova",
"id": "6a203a19147cd41cb9ce715868e5311578c4c5db",
"size": "32436",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "nova/tests/unit/virt/libvirt/test_firewall.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from django import forms
from models import Package
from models import Project, ProjectCategory
class ProjectFormEdit(forms.Form):
choices = []
for package in Package.objects.all():
choices.append((package.id, str(package)))
package = forms.ChoiceField(choices=choices)
username = forms.CharField(max_length=100)
repository = forms.CharField(max_length=100)
branch = forms.CharField(max_length=100)
cargo_support = forms.BooleanField(required=False)
widget = forms.SelectMultiple(attrs =
{'class': 'form-control chosen-select',
'data-placeholder': 'select one or more project categories'})
categories = forms.ModelMultipleChoiceField(required=False,
queryset=ProjectCategory.objects.all(),
widget = widget)
def clean_package(self):
id = self.cleaned_data['package']
value = Package.objects.get(pk = id)
return value
def clean_username(self):
return self.cleaned_data['username'].strip()
def clean_repository(self):
return self.cleaned_data['repository'].strip()
def clean_branch(self):
return self.cleaned_data['branch'].strip()
class ProjectForm(ProjectFormEdit):
# Check if project already exists
def clean(self):
cleaned_data = super(ProjectForm, self).clean()
username = cleaned_data.get('username')
repository = cleaned_data.get('repository')
branch = cleaned_data.get('branch')
if username and repository and branch:
try:
Project.objects.get(username__exact = username,
repository__exact = repository,
branch__exact = branch,
deleted = False)
raise forms.ValidationError('Project already exists')
except Project.DoesNotExist:
pass
return cleaned_data
|
{
"content_hash": "112d7e4caaa6f9130770ef26674bbf0b",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 73,
"avg_line_length": 31.737704918032787,
"alnum_prop": 0.6291322314049587,
"repo_name": "youprofit/rust-ci-1",
"id": "776a6f285930ec80d4e1e7eb107a55287fd0caaa",
"size": "1936",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tpt/ppatrigger/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1663"
},
{
"name": "HTML",
"bytes": "28545"
},
{
"name": "Perl",
"bytes": "2422"
},
{
"name": "Python",
"bytes": "65772"
},
{
"name": "Shell",
"bytes": "1797"
}
],
"symlink_target": ""
}
|
import sys
from ev3dev2 import Device
if sys.version_info < (3, 4):
raise SystemError('Must be using Python 3.4 or higher')
class PowerSupply(Device):
"""
A generic interface to read data from the system's power_supply class.
Uses the built-in legoev3-battery if none is specified.
"""
SYSTEM_CLASS_NAME = 'power_supply'
SYSTEM_DEVICE_NAME_CONVENTION = '*'
__slots__ = [
'_measured_current',
'_measured_voltage',
'_max_voltage',
'_min_voltage',
'_technology',
'_type',
]
def __init__(self, address=None, name_pattern=SYSTEM_DEVICE_NAME_CONVENTION, name_exact=False, **kwargs):
if address is not None:
kwargs['address'] = address
super(PowerSupply, self).__init__(self.SYSTEM_CLASS_NAME, name_pattern, name_exact, **kwargs)
self._measured_current = None
self._measured_voltage = None
self._max_voltage = None
self._min_voltage = None
self._technology = None
self._type = None
@property
def measured_current(self):
"""
The measured current that the battery is supplying (in microamps)
"""
self._measured_current, value = self.get_attr_int(self._measured_current, 'current_now')
return value
@property
def measured_voltage(self):
"""
The measured voltage that the battery is supplying (in microvolts)
"""
self._measured_voltage, value = self.get_attr_int(self._measured_voltage, 'voltage_now')
return value
@property
def max_voltage(self):
self._max_voltage, value = self.get_attr_int(self._max_voltage, 'voltage_max_design')
return value
@property
def min_voltage(self):
self._min_voltage, value = self.get_attr_int(self._min_voltage, 'voltage_min_design')
return value
@property
def technology(self):
self._technology, value = self.get_attr_string(self._technology, 'technology')
return value
@property
def type(self):
self._type, value = self.get_attr_string(self._type, 'type')
return value
@property
def measured_amps(self):
"""
The measured current that the battery is supplying (in amps)
"""
return self.measured_current / 1e6
@property
def measured_volts(self):
"""
The measured voltage that the battery is supplying (in volts)
"""
return self.measured_voltage / 1e6
|
{
"content_hash": "7fa3ddda972dd53b86fd6fbc0959f2ac",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 109,
"avg_line_length": 29.337209302325583,
"alnum_prop": 0.6091954022988506,
"repo_name": "rhempel/ev3dev-lang-python",
"id": "d33c38fcd2fd190a771864ee7dde5bc472e56bd5",
"size": "3977",
"binary": false,
"copies": "2",
"ref": "refs/heads/ev3dev-stretch",
"path": "ev3dev2/power.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "296032"
}
],
"symlink_target": ""
}
|
import os
import glob
from subprocess import check_output
bigipaddr = "10.1.1.4"
for f in glob.glob('./*.json.deploy'):
print f
os.system("./scripts/deploy_iapp_bigip.py -r " + bigipaddr + " " + f)
|
{
"content_hash": "cf399690331b8391436d7b9eeb8df8f8",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 73,
"avg_line_length": 23,
"alnum_prop": 0.6570048309178744,
"repo_name": "plcharbonneau/jenkins_demo",
"id": "6e7353b5bb14da47b698b30612ea8ee355b2fc7f",
"size": "392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "redeployAllServices.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "37698"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import os
import unittest
from copy import copy
from decimal import Decimal
from django.conf import settings
from django.contrib.gis.geos import HAS_GEOS
from django.db import connection
from django.test import TestCase, override_settings
from django.utils._os import upath
if HAS_GEOS:
from django.contrib.gis.utils.layermapping import (
LayerMapping, LayerMapError, InvalidDecimal, InvalidString,
MissingForeignKey,
)
from django.contrib.gis.gdal import DataSource
from .models import (
City, County, CountyFeat, Interstate, ICity1, ICity2, Invalid, State,
city_mapping, co_mapping, cofeat_mapping, inter_mapping,
)
shp_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), os.pardir, 'data'))
city_shp = os.path.join(shp_path, 'cities', 'cities.shp')
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
inter_shp = os.path.join(shp_path, 'interstates', 'interstates.shp')
invalid_shp = os.path.join(shp_path, 'invalid', 'emptypoints.shp')
# Dictionaries to hold what's expected in the county shapefile.
NAMES = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
NUMS = [1, 2, 1, 19, 1] # Number of polygons for each.
STATES = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
class LayerMapTest(TestCase):
def test_init(self):
"Testing LayerMapping initialization."
# Model field that does not exist.
bad1 = copy(city_mapping)
bad1['foobar'] = 'FooField'
# Shapefile field that does not exist.
bad2 = copy(city_mapping)
bad2['name'] = 'Nombre'
# Nonexistent geographic field type.
bad3 = copy(city_mapping)
bad3['point'] = 'CURVE'
# Incrementing through the bad mapping dictionaries and
# ensuring that a LayerMapError is raised.
for bad_map in (bad1, bad2, bad3):
with self.assertRaises(LayerMapError):
LayerMapping(City, city_shp, bad_map)
# A LookupError should be thrown for bogus encodings.
with self.assertRaises(LookupError):
LayerMapping(City, city_shp, city_mapping, encoding='foobar')
def test_simple_layermap(self):
"Test LayerMapping import of a simple point shapefile."
# Setting up for the LayerMapping.
lm = LayerMapping(City, city_shp, city_mapping)
lm.save()
# There should be three cities in the shape file.
self.assertEqual(3, City.objects.count())
# Opening up the shapefile, and verifying the values in each
# of the features made it to the model.
ds = DataSource(city_shp)
layer = ds[0]
for feat in layer:
city = City.objects.get(name=feat['Name'].value)
self.assertEqual(feat['Population'].value, city.population)
self.assertEqual(Decimal(str(feat['Density'])), city.density)
self.assertEqual(feat['Created'].value, city.dt)
# Comparing the geometries.
pnt1, pnt2 = feat.geom, city.point
self.assertAlmostEqual(pnt1.x, pnt2.x, 5)
self.assertAlmostEqual(pnt1.y, pnt2.y, 5)
def test_layermap_strict(self):
"Testing the `strict` keyword, and import of a LineString shapefile."
# When the `strict` keyword is set an error encountered will force
# the importation to stop.
with self.assertRaises(InvalidDecimal):
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True, strict=True)
Interstate.objects.all().delete()
# This LayerMapping should work b/c `strict` is not set.
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True)
# Two interstate should have imported correctly.
self.assertEqual(2, Interstate.objects.count())
# Verifying the values in the layer w/the model.
ds = DataSource(inter_shp)
# Only the first two features of this shapefile are valid.
valid_feats = ds[0][:2]
for feat in valid_feats:
istate = Interstate.objects.get(name=feat['Name'].value)
if feat.fid == 0:
self.assertEqual(Decimal(str(feat['Length'])), istate.length)
elif feat.fid == 1:
# Everything but the first two decimal digits were truncated,
# because the Interstate model's `length` field has decimal_places=2.
self.assertAlmostEqual(feat.get('Length'), float(istate.length), 2)
for p1, p2 in zip(feat.geom, istate.path):
self.assertAlmostEqual(p1[0], p2[0], 6)
self.assertAlmostEqual(p1[1], p2[1], 6)
def county_helper(self, county_feat=True):
"Helper function for ensuring the integrity of the mapped County models."
for name, n, st in zip(NAMES, NUMS, STATES):
# Should only be one record b/c of `unique` keyword.
c = County.objects.get(name=name)
self.assertEqual(n, len(c.mpoly))
self.assertEqual(st, c.state.name) # Checking ForeignKey mapping.
# Multiple records because `unique` was not set.
if county_feat:
qs = CountyFeat.objects.filter(name=name)
self.assertEqual(n, qs.count())
def test_layermap_unique_multigeometry_fk(self):
"Testing the `unique`, and `transform`, geometry collection conversion, and ForeignKey mappings."
# All the following should work.
# Telling LayerMapping that we want no transformations performed on the data.
lm = LayerMapping(County, co_shp, co_mapping, transform=False)
# Specifying the source spatial reference system via the `source_srs` keyword.
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269)
lm = LayerMapping(County, co_shp, co_mapping, source_srs='NAD83')
# Unique may take tuple or string parameters.
for arg in ('name', ('name', 'mpoly')):
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique=arg)
# Now test for failures
# Testing invalid params for the `unique` keyword.
for e, arg in ((TypeError, 5.0), (ValueError, 'foobar'), (ValueError, ('name', 'mpolygon'))):
with self.assertRaises(e):
LayerMapping(County, co_shp, co_mapping, transform=False, unique=arg)
# No source reference system defined in the shapefile, should raise an error.
if connection.features.supports_transform:
with self.assertRaises(LayerMapError):
LayerMapping(County, co_shp, co_mapping)
# Passing in invalid ForeignKey mapping parameters -- must be a dictionary
# mapping for the model the ForeignKey points to.
bad_fk_map1 = copy(co_mapping)
bad_fk_map1['state'] = 'name'
bad_fk_map2 = copy(co_mapping)
bad_fk_map2['state'] = {'nombre': 'State'}
with self.assertRaises(TypeError):
LayerMapping(County, co_shp, bad_fk_map1, transform=False)
with self.assertRaises(LayerMapError):
LayerMapping(County, co_shp, bad_fk_map2, transform=False)
# There exist no State models for the ForeignKey mapping to work -- should raise
# a MissingForeignKey exception (this error would be ignored if the `strict`
# keyword is not set).
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
with self.assertRaises(MissingForeignKey):
lm.save(silent=True, strict=True)
# Now creating the state models so the ForeignKey mapping may work.
State.objects.bulk_create([
State(name='Colorado'), State(name='Hawaii'), State(name='Texas')
])
# If a mapping is specified as a collection, all OGR fields that
# are not collections will be converted into them. For example,
# a Point column would be converted to MultiPoint. Other things being done
# w/the keyword args:
# `transform=False`: Specifies that no transform is to be done; this
# has the effect of ignoring the spatial reference check (because the
# county shapefile does not have implicit spatial reference info).
#
# `unique='name'`: Creates models on the condition that they have
# unique county names; geometries from each feature however will be
# appended to the geometry collection of the unique model. Thus,
# all of the various islands in Honolulu county will be in in one
# database record with a MULTIPOLYGON type.
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
lm.save(silent=True, strict=True)
# A reference that doesn't use the unique keyword; a new database record will
# created for each polygon.
lm = LayerMapping(CountyFeat, co_shp, cofeat_mapping, transform=False)
lm.save(silent=True, strict=True)
# The county helper is called to ensure integrity of County models.
self.county_helper()
def test_test_fid_range_step(self):
"Tests the `fid_range` keyword and the `step` keyword of .save()."
# Function for clearing out all the counties before testing.
def clear_counties():
County.objects.all().delete()
State.objects.bulk_create([
State(name='Colorado'), State(name='Hawaii'), State(name='Texas')
])
# Initializing the LayerMapping object to use in these tests.
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
# Bad feature id ranges should raise a type error.
bad_ranges = (5.0, 'foo', co_shp)
for bad in bad_ranges:
with self.assertRaises(TypeError):
lm.save(fid_range=bad)
# Step keyword should not be allowed w/`fid_range`.
fr = (3, 5) # layer[3:5]
with self.assertRaises(LayerMapError):
lm.save(fid_range=fr, step=10)
lm.save(fid_range=fr)
# Features IDs 3 & 4 are for Galveston County, Texas -- only
# one model is returned because the `unique` keyword was set.
qs = County.objects.all()
self.assertEqual(1, qs.count())
self.assertEqual('Galveston', qs[0].name)
# Features IDs 5 and beyond for Honolulu County, Hawaii, and
# FID 0 is for Pueblo County, Colorado.
clear_counties()
lm.save(fid_range=slice(5, None), silent=True, strict=True) # layer[5:]
lm.save(fid_range=slice(None, 1), silent=True, strict=True) # layer[:1]
# Only Pueblo & Honolulu counties should be present because of
# the `unique` keyword. Have to set `order_by` on this QuerySet
# or else MySQL will return a different ordering than the other dbs.
qs = County.objects.order_by('name')
self.assertEqual(2, qs.count())
hi, co = tuple(qs)
hi_idx, co_idx = tuple(map(NAMES.index, ('Honolulu', 'Pueblo')))
self.assertEqual('Pueblo', co.name)
self.assertEqual(NUMS[co_idx], len(co.mpoly))
self.assertEqual('Honolulu', hi.name)
self.assertEqual(NUMS[hi_idx], len(hi.mpoly))
# Testing the `step` keyword -- should get the same counties
# regardless of we use a step that divides equally, that is odd,
# or that is larger than the dataset.
for st in (4, 7, 1000):
clear_counties()
lm.save(step=st, strict=True)
self.county_helper(county_feat=False)
def test_model_inheritance(self):
"Tests LayerMapping on inherited models. See #12093."
icity_mapping = {'name': 'Name',
'population': 'Population',
'density': 'Density',
'point': 'POINT',
'dt': 'Created',
}
# Parent model has geometry field.
lm1 = LayerMapping(ICity1, city_shp, icity_mapping)
lm1.save()
# Grandparent has geometry field.
lm2 = LayerMapping(ICity2, city_shp, icity_mapping)
lm2.save()
self.assertEqual(6, ICity1.objects.count())
self.assertEqual(3, ICity2.objects.count())
def test_invalid_layer(self):
"Tests LayerMapping on invalid geometries. See #15378."
invalid_mapping = {'point': 'POINT'}
lm = LayerMapping(Invalid, invalid_shp, invalid_mapping,
source_srs=4326)
lm.save(silent=True)
def test_charfield_too_short(self):
mapping = copy(city_mapping)
mapping['name_short'] = 'Name'
lm = LayerMapping(City, city_shp, mapping)
with self.assertRaises(InvalidString):
lm.save(silent=True, strict=True)
def test_textfield(self):
"String content fits also in a TextField"
mapping = copy(city_mapping)
mapping['name_txt'] = 'Name'
lm = LayerMapping(City, city_shp, mapping)
lm.save(silent=True, strict=True)
self.assertEqual(City.objects.count(), 3)
self.assertEqual(City.objects.get(name='Houston').name_txt, "Houston")
def test_encoded_name(self):
""" Test a layer containing utf-8-encoded name """
city_shp = os.path.join(shp_path, 'ch-city', 'ch-city.shp')
lm = LayerMapping(City, city_shp, city_mapping)
lm.save(silent=True, strict=True)
self.assertEqual(City.objects.count(), 1)
self.assertEqual(City.objects.all()[0].name, "Zürich")
class OtherRouter(object):
def db_for_read(self, model, **hints):
return 'other'
def db_for_write(self, model, **hints):
return self.db_for_read(model, **hints)
def allow_relation(self, obj1, obj2, **hints):
return None
def allow_migrate(self, db, app_label, **hints):
return True
@override_settings(DATABASE_ROUTERS=[OtherRouter()])
class LayerMapRouterTest(TestCase):
@unittest.skipUnless(len(settings.DATABASES) > 1, 'multiple databases required')
def test_layermapping_default_db(self):
lm = LayerMapping(City, city_shp, city_mapping)
self.assertEqual(lm.using, 'other')
|
{
"content_hash": "010b5aa3d36bd7a11c17fdd00c2ba958",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 105,
"avg_line_length": 42.023460410557185,
"alnum_prop": 0.6280530355896721,
"repo_name": "cloudera/hue",
"id": "2f5fe58d48bdefc0673b2ad222b5d8f384b4bd4e",
"size": "14355",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/Django-1.11.29/tests/gis_tests/layermap/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python3
import argparse
import glob
import os
import shutil
import sys
from typing import List
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(ZULIP_PATH)
import pygments
from scripts.lib import clean_unused_caches
from scripts.lib.zulip_tools import (
ENDC,
OKBLUE,
get_dev_uuid_var_path,
get_tzdata_zi,
is_digest_obsolete,
run,
run_as_root,
write_new_digest,
)
from tools.setup.generate_zulip_bots_static_files import generate_zulip_bots_static_files
from version import PROVISION_VERSION
VENV_PATH = "/srv/zulip-py3-venv"
UUID_VAR_PATH = get_dev_uuid_var_path()
with get_tzdata_zi() as f:
line = f.readline()
assert line.startswith("# version ")
timezones_version = line[len("# version ") :]
def create_var_directories() -> None:
# create var/coverage, var/log, etc.
var_dir = os.path.join(ZULIP_PATH, "var")
sub_dirs = [
"coverage",
"log",
"node-coverage",
"test_uploads",
"uploads",
"xunit-test-results",
]
for sub_dir in sub_dirs:
path = os.path.join(var_dir, sub_dir)
os.makedirs(path, exist_ok=True)
def build_pygments_data_paths() -> List[str]:
paths = [
"tools/setup/build_pygments_data",
"tools/setup/lang.json",
]
return paths
def build_timezones_data_paths() -> List[str]:
paths = [
"tools/setup/build_timezone_values",
]
return paths
def compilemessages_paths() -> List[str]:
paths = ["zerver/management/commands/compilemessages.py"]
paths += glob.glob("locale/*/LC_MESSAGES/*.po")
paths += glob.glob("locale/*/translations.json")
return paths
def inline_email_css_paths() -> List[str]:
paths = [
"scripts/setup/inline_email_css.py",
"templates/zerver/emails/email.css",
]
paths += glob.glob("templates/zerver/emails/*.source.html")
return paths
def configure_rabbitmq_paths() -> List[str]:
paths = [
"scripts/setup/configure-rabbitmq",
]
return paths
def setup_shell_profile(shell_profile: str) -> None:
shell_profile_path = os.path.expanduser(shell_profile)
def write_command(command: str) -> None:
if os.path.exists(shell_profile_path):
with open(shell_profile_path) as shell_profile_file:
lines = [line.strip() for line in shell_profile_file.readlines()]
if command not in lines:
with open(shell_profile_path, "a+") as shell_profile_file:
shell_profile_file.writelines(command + "\n")
else:
with open(shell_profile_path, "w") as shell_profile_file:
shell_profile_file.writelines(command + "\n")
source_activate_command = "source " + os.path.join(VENV_PATH, "bin", "activate")
write_command(source_activate_command)
if os.path.exists("/srv/zulip"):
write_command("cd /srv/zulip")
def setup_bash_profile() -> None:
"""Select a bash profile file to add setup code to."""
BASH_PROFILES = [
os.path.expanduser(p) for p in ("~/.bash_profile", "~/.bash_login", "~/.profile")
]
def clear_old_profile() -> None:
# An earlier version of this script would output a fresh .bash_profile
# even though a .profile existed in the image used. As a convenience to
# existing developers (and, perhaps, future developers git-bisecting the
# provisioning scripts), check for this situation, and blow away the
# created .bash_profile if one is found.
BASH_PROFILE = BASH_PROFILES[0]
DOT_PROFILE = BASH_PROFILES[2]
OLD_PROFILE_TEXT = "source /srv/zulip-py3-venv/bin/activate\ncd /srv/zulip\n"
if os.path.exists(DOT_PROFILE):
try:
with open(BASH_PROFILE) as f:
profile_contents = f.read()
if profile_contents == OLD_PROFILE_TEXT:
os.unlink(BASH_PROFILE)
except FileNotFoundError:
pass
clear_old_profile()
for candidate_profile in BASH_PROFILES:
if os.path.exists(candidate_profile):
setup_shell_profile(candidate_profile)
break
else:
# no existing bash profile found; claim .bash_profile
setup_shell_profile(BASH_PROFILES[0])
def need_to_run_build_pygments_data() -> bool:
if not os.path.exists("static/generated/pygments_data.json"):
return True
return is_digest_obsolete(
"build_pygments_data_hash",
build_pygments_data_paths(),
[pygments.__version__],
)
def need_to_run_build_timezone_data() -> bool:
if not os.path.exists("static/generated/timezones.json"):
return True
return is_digest_obsolete(
"build_timezones_data_hash",
build_timezones_data_paths(),
[timezones_version],
)
def need_to_run_compilemessages() -> bool:
if not os.path.exists("locale/language_name_map.json"):
# User may have cleaned their Git checkout.
print("Need to run compilemessages due to missing language_name_map.json")
return True
return is_digest_obsolete(
"last_compilemessages_hash",
compilemessages_paths(),
)
def need_to_run_inline_email_css() -> bool:
if not os.path.exists("templates/zerver/emails/compiled/"):
return True
return is_digest_obsolete(
"last_email_source_files_hash",
inline_email_css_paths(),
)
def need_to_run_configure_rabbitmq(settings_list: List[str]) -> bool:
obsolete = is_digest_obsolete(
"last_configure_rabbitmq_hash",
configure_rabbitmq_paths(),
settings_list,
)
if obsolete:
return True
try:
from zerver.lib.queue import SimpleQueueClient
SimpleQueueClient()
return False
except Exception:
return True
def main(options: argparse.Namespace) -> int:
setup_bash_profile()
setup_shell_profile("~/.zprofile")
# This needs to happen before anything that imports zproject.settings.
run(["scripts/setup/generate_secrets.py", "--development"])
create_var_directories()
# The `build_emoji` script requires `emoji-datasource` package
# which we install via npm; thus this step is after installing npm
# packages.
run(["tools/setup/emoji/build_emoji"])
# copy over static files from the zulip_bots package
generate_zulip_bots_static_files()
if options.is_force or need_to_run_build_pygments_data():
run(["tools/setup/build_pygments_data"])
write_new_digest(
"build_pygments_data_hash",
build_pygments_data_paths(),
[pygments.__version__],
)
else:
print("No need to run `tools/setup/build_pygments_data`.")
if options.is_force or need_to_run_build_timezone_data():
run(["tools/setup/build_timezone_values"])
write_new_digest(
"build_timezones_data_hash",
build_timezones_data_paths(),
[timezones_version],
)
else:
print("No need to run `tools/setup/build_timezone_values`.")
if options.is_force or need_to_run_inline_email_css():
run(["scripts/setup/inline_email_css.py"])
write_new_digest(
"last_email_source_files_hash",
inline_email_css_paths(),
)
else:
print("No need to run `scripts/setup/inline_email_css.py`.")
if not options.is_build_release_tarball_only:
# The following block is skipped when we just need the development
# environment to build a release tarball.
# Need to set up Django before using template_status
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
import django
django.setup()
from django.conf import settings
from zerver.lib.test_fixtures import (
DEV_DATABASE,
TEST_DATABASE,
destroy_leaked_test_databases,
)
assert settings.RABBITMQ_PASSWORD is not None
if options.is_force or need_to_run_configure_rabbitmq([settings.RABBITMQ_PASSWORD]):
run_as_root(["scripts/setup/configure-rabbitmq"])
write_new_digest(
"last_configure_rabbitmq_hash",
configure_rabbitmq_paths(),
[settings.RABBITMQ_PASSWORD],
)
else:
print("No need to run `scripts/setup/configure-rabbitmq.")
dev_template_db_status = DEV_DATABASE.template_status()
if options.is_force or dev_template_db_status == "needs_rebuild":
run(["tools/setup/postgresql-init-dev-db"])
if options.skip_dev_db_build:
# We don't need to build the manual development
# database on continuous integration for running tests, so we can
# just leave it as a template db and save a minute.
#
# Important: We don't write a digest as that would
# incorrectly claim that we ran migrations.
pass
else:
run(["tools/rebuild-dev-database"])
DEV_DATABASE.write_new_db_digest()
elif dev_template_db_status == "run_migrations":
DEV_DATABASE.run_db_migrations()
elif dev_template_db_status == "current":
print("No need to regenerate the dev DB.")
test_template_db_status = TEST_DATABASE.template_status()
if options.is_force or test_template_db_status == "needs_rebuild":
run(["tools/setup/postgresql-init-test-db"])
run(["tools/rebuild-test-database"])
TEST_DATABASE.write_new_db_digest()
elif test_template_db_status == "run_migrations":
TEST_DATABASE.run_db_migrations()
elif test_template_db_status == "current":
print("No need to regenerate the test DB.")
if options.is_force or need_to_run_compilemessages():
run(["./manage.py", "compilemessages"])
write_new_digest(
"last_compilemessages_hash",
compilemessages_paths(),
)
else:
print("No need to run `manage.py compilemessages`.")
destroyed = destroy_leaked_test_databases()
if destroyed:
print(f"Dropped {destroyed} stale test databases!")
clean_unused_caches.main(
argparse.Namespace(
threshold_days=6,
# The defaults here should match parse_cache_script_args in zulip_tools.py
dry_run=False,
verbose=False,
no_headings=True,
)
)
# Keeping this cache file around can cause eslint to throw
# random TypeErrors when new/updated dependencies are added
if os.path.isfile(".eslintcache"):
# Remove this block when
# https://github.com/eslint/eslint/issues/11639 is fixed
# upstream.
os.remove(".eslintcache")
# Clean up the root of the `var/` directory for various
# testing-related files that we have migrated to
# `var/<uuid>/test-backend`.
print("Cleaning var/ directory files...")
var_paths = glob.glob("var/test*")
var_paths.append("var/bot_avatar")
for path in var_paths:
try:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
except FileNotFoundError:
pass
version_file = os.path.join(UUID_VAR_PATH, "provision_version")
print(f"writing to {version_file}\n")
with open(version_file, "w") as f:
f.write(".".join(map(str, PROVISION_VERSION)) + "\n")
print()
print(OKBLUE + "Zulip development environment setup succeeded!" + ENDC)
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--force",
action="store_true",
dest="is_force",
help="Ignore all provisioning optimizations.",
)
parser.add_argument(
"--build-release-tarball-only",
action="store_true",
dest="is_build_release_tarball_only",
help="Provision for test suite with production settings.",
)
parser.add_argument(
"--skip-dev-db-build", action="store_true", help="Don't run migrations on dev database."
)
options = parser.parse_args()
sys.exit(main(options))
|
{
"content_hash": "28827416364979097d3c3afd63bea42b",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 96,
"avg_line_length": 31.798982188295167,
"alnum_prop": 0.6103064735536529,
"repo_name": "andersk/zulip",
"id": "acc1856ff0e41434243c71b39eb802f82b15ddfd",
"size": "12497",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "tools/lib/provision_inner.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "490256"
},
{
"name": "Dockerfile",
"bytes": "4025"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "749848"
},
{
"name": "Handlebars",
"bytes": "377098"
},
{
"name": "JavaScript",
"bytes": "4006373"
},
{
"name": "Perl",
"bytes": "10163"
},
{
"name": "Puppet",
"bytes": "112128"
},
{
"name": "Python",
"bytes": "10168530"
},
{
"name": "Ruby",
"bytes": "3459"
},
{
"name": "Shell",
"bytes": "146797"
},
{
"name": "TypeScript",
"bytes": "284837"
}
],
"symlink_target": ""
}
|
import logging
from twisted.internet import reactor, ssl
from txjsonrpc.web.jsonrpc import Proxy
from OpenSSL import SSL
from twisted.python import log
def printValue(value):
print "Result: %s" % str(value)
def printError(error):
print 'error', error
def shutDown(data):
print "Shutting down reactor..."
reactor.stop()
def verifyCallback(connection, x509, errnum, errdepth, ok):
log.msg(connection.__str__())
if not ok:
log.msg('invalid server cert: %s' % x509.get_subject(), logLevel=logging.ERROR)
return False
else:
log.msg('good server cert: %s' % x509.get_subject(), logLevel=logging.INFO)
return True
class AltCtxFactory(ssl.ClientContextFactory):
def getContext(self):
#self.method = SSL.SSLv23_METHOD
ctx = ssl.ClientContextFactory.getContext(self)
ctx.set_verify(SSL.VERIFY_PEER, verifyCallback)
ctx.load_verify_locations("cacert.pem")
#ctx.use_certificate_file('keys/client.crt')
#ctx.use_privatekey_file('keys/client.key')
return ctx
import sys
log.startLogging(sys.stdout)
#proxy = Proxy('https://127.0.0.1:7443/', ssl_ctx_factory=AltCtxFactory)
proxy = Proxy('https://127.0.0.2:7443/', ssl_ctx_factory=AltCtxFactory)
d = proxy.callRemote('add', 3, 5)
d.addCallback(printValue).addErrback(printError).addBoth(shutDown)
reactor.run()
|
{
"content_hash": "cf38c32a7eb9dfe520b10b2b3dc2e857",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 87,
"avg_line_length": 29.27659574468085,
"alnum_prop": 0.6911337209302325,
"repo_name": "medialab/txjsonrpc",
"id": "bec17d266f70a7c281147ef79d78f9fa5dd58dc7",
"size": "1376",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/ssl/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126343"
},
{
"name": "Shell",
"bytes": "2986"
}
],
"symlink_target": ""
}
|
from .curl_connector import CurlConnector
|
{
"content_hash": "289e07d9a579a652eee698c709d6714c",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 41,
"avg_line_length": 42,
"alnum_prop": 0.8571428571428571,
"repo_name": "wing3s/flask-chatterbot",
"id": "4777971d89c17c49e6e916454855764c4c1672ea",
"size": "42",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flaskchatterbot/connectors/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2350"
}
],
"symlink_target": ""
}
|
import unittest
import cli
import swarm_worker
class CliTest(unittest.TestCase):
def test_parse_arguments(self):
args = cli.parse_arguments(['--template', 'test.template', '--target', '/etc/target.file'])
self.assertEqual(args.template, 'test.template')
self.assertEqual(args.target, '/etc/target.file')
def test_parse_arguments_defaults(self):
args = cli.parse_arguments(['--template', 'test.template'])
self.assertEqual(args.template, 'test.template')
self.assertIsNone(args.target, None)
def test_parse_empty_arguments(self):
self.assertRaises(SystemExit, cli.parse_arguments)
self.assertRaises(SystemExit, cli.parse_arguments, list())
def test_prints_help(self):
import sys
sys_stdout = sys.stdout
try:
class _CapturingOutput(object):
def __init__(self):
self.data = ''
def write(self, data):
self.data += data
sys.stdout = _CapturingOutput()
try:
cli.parse_arguments(['--help'])
except SystemExit as ex:
self.assertEqual(0, ex.code)
self.assertIn('Template generator based on Docker runtime information', sys.stdout.data)
finally:
sys.stdout = sys_stdout
def test_restart_argument(self):
args = cli.parse_arguments(['--template', 'test.template', '--restart', 'target-container'])
self.assertEqual(args.restart, ['target-container'])
args = cli.parse_arguments(['--template', 'test.template',
'--restart', 'target-container',
'--restart', 'second-container'])
self.assertEqual(args.restart, ['target-container', 'second-container'])
def test_signal_argument(self):
args = cli.parse_arguments(['--template', 'test.template', '--signal', 'target-container', 'HUP'])
self.assertEqual(args.signal, [['target-container', 'HUP']])
args = cli.parse_arguments(['--template', 'test.template',
'--signal', 'target-container', 'HUP',
'--signal', 'second-container', 'INT'])
self.assertEqual(args.signal, [['target-container', 'HUP'], ['second-container', 'INT']])
def test_swarm_worker_arguments(self):
self.assertRaises(SystemExit, swarm_worker.parse_arguments)
self.assertRaises(SystemExit, swarm_worker.parse_arguments, ['--debug'])
args = swarm_worker.parse_arguments(['--manager', 'manager-host',
'--retries', '12',
'--debug'])
self.assertEqual(args.manager, ['manager-host'])
self.assertEqual(args.retries, 12)
self.assertEqual(args.events, ['start', 'stop', 'die', 'health_status'])
self.assertTrue(args.debug)
args = swarm_worker.parse_arguments(['--manager', 'manager1', 'manager2',
'--events', 'start', 'stop'])
self.assertEqual(args.manager, ['manager1', 'manager2'])
self.assertEqual(args.events, ['start', 'stop'])
self.assertFalse(args.debug)
|
{
"content_hash": "dc59bdad4849bd45a3d87a301b9149a3",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 106,
"avg_line_length": 36.78888888888889,
"alnum_prop": 0.5623678646934461,
"repo_name": "rycus86/docker-pygen",
"id": "91fd7d5610ebcfa346f730c18a3f889b6004484d",
"size": "3311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "168797"
},
{
"name": "Shell",
"bytes": "1423"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import argparse
import glob
import os.path
import subprocess
import sys
SWIFTSHADER_SUPPRESSIONS = [
'images/write_image2d_r32f.amber',
'images/write_image2d_rg32f.amber',
'images/write_image2d_rgba32f.amber',
'images/write_image2d_r32ui.amber',
'images/write_image2d_rg32ui.amber',
'images/write_image2d_rgba32ui.amber',
'images/write_image2d_r32i.amber',
'images/write_image2d_rg32i.amber',
'images/write_image2d_rgba32i.amber',
'integer/add_sat_short.amber',
'integer/ctz_long.amber',
'integer/ctz_short.amber',
'integer/clz_long.amber',
'integer/clz_short.amber',
'integer/sub_sat_short.amber',
'integer/sub_sat_ushort.amber']
def main():
parser = argparse.ArgumentParser('Run Amber tests (without validation layers)')
parser.add_argument('--dir', dest='test_dir', default='.',
help='Specify the base directory of tests')
parser.add_argument('--amber', dest='amber',
help='Specify the path to the amber executable')
parser.add_argument('--swiftshader', dest='swiftshader', action='store_true',
help='Only run tests compatible with Swiftshader')
parser.add_argument('--vk-icd', dest='vk_icd',
help='Specify the path to the Vulkan ICD json')
parser.add_argument('--one-dir', dest='one_dir', action='store_true',
help='Avoid large globs and assume tests are directly in --dir')
args = parser.parse_args()
if args.vk_icd:
os.environ['VK_ICD_FILENAMES'] = args.vk_icd
tests = []
if args.one_dir:
tests = glob.glob(os.path.join(args.test_dir, '*.amber'))
else:
tests = glob.glob(os.path.join(args.test_dir, '**/*.amber'))
if args.swiftshader:
for suppress in SWIFTSHADER_SUPPRESSIONS:
p = suppress
if args.one_dir:
p = p.split('/')[1]
adjusted = os.path.join(args.test_dir, p)
if adjusted in tests:
tests.remove(adjusted)
cmd = [args.amber, '-d', '-V']
cmd = cmd + tests
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
(stdout, _) = p.communicate()
if p.returncode != 0:
raise RuntimeError('Failed tests \'{}\''.format(stdout.decode('utf-8')))
print(stdout.decode('utf-8'))
sys.exit(0)
if __name__ == '__main__':
main()
|
{
"content_hash": "2333871270f036dff9b10286441d94ff",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 86,
"avg_line_length": 33.44285714285714,
"alnum_prop": 0.6373344724476719,
"repo_name": "google/clspv",
"id": "73d56965c5d4d54903c7586b546192cf1e2d715f",
"size": "2968",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "amber/run_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3924"
},
{
"name": "C",
"bytes": "2150005"
},
{
"name": "C++",
"bytes": "1400016"
},
{
"name": "CMake",
"bytes": "31638"
},
{
"name": "Clojure",
"bytes": "118"
},
{
"name": "Cool",
"bytes": "4522"
},
{
"name": "LLVM",
"bytes": "2736336"
},
{
"name": "Perl",
"bytes": "2008"
},
{
"name": "Python",
"bytes": "73512"
},
{
"name": "Shell",
"bytes": "22226"
}
],
"symlink_target": ""
}
|
import json
from six.moves import urllib
from cinder import context
from cinder import exception
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit.volume.drivers.dell_emc import scaleio
from cinder.tests.unit.volume.drivers.dell_emc.scaleio import mocks
class TestCreateVolumeFromSnapShot(scaleio.TestScaleIODriver):
"""Test cases for ``ScaleIODriver.create_volume_from_snapshot()``"""
def setUp(self):
"""Setup a test case environment.
Creates fake volume and snapshot objects and sets up the required
API responses.
"""
super(TestCreateVolumeFromSnapShot, self).setUp()
ctx = context.RequestContext('fake', 'fake', auth_token=True)
self.snapshot = fake_snapshot.fake_snapshot_obj(ctx)
self.snapshot_name_2x_enc = urllib.parse.quote(
urllib.parse.quote(self.driver._id_to_base64(self.snapshot.id))
)
self.volume = fake_volume.fake_volume_obj(ctx)
self.volume_name_2x_enc = urllib.parse.quote(
urllib.parse.quote(self.driver._id_to_base64(self.volume.id))
)
self.snapshot_reply = json.dumps(
{
'volumeIdList': [self.volume.id],
'snapshotGroupId': 'snap_group'
}
)
self.HTTPS_MOCK_RESPONSES = {
self.RESPONSE_MODE.Valid: {
'types/Volume/instances/getByName::' +
self.snapshot_name_2x_enc: self.snapshot.id,
'instances/System/action/snapshotVolumes':
self.snapshot_reply,
},
self.RESPONSE_MODE.BadStatus: {
'instances/System/action/snapshotVolumes':
self.BAD_STATUS_RESPONSE,
'types/Volume/instances/getByName::' +
self.snapshot_name_2x_enc: self.BAD_STATUS_RESPONSE,
},
self.RESPONSE_MODE.Invalid: {
'instances/System/action/snapshotVolumes':
mocks.MockHTTPSResponse(
{
'errorCode': self.OLD_VOLUME_NOT_FOUND_ERROR,
'message': 'BadStatus Volume Test',
}, 400
),
'types/Volume/instances/getByName::' +
self.snapshot_name_2x_enc: None,
},
}
def test_bad_login(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
self.volume,
self.snapshot
)
def test_invalid_snapshot(self):
self.set_https_response_mode(self.RESPONSE_MODE.Invalid)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
self.volume,
self.snapshot
)
def test_create_volume_from_snapshot(self):
self.set_https_response_mode(self.RESPONSE_MODE.Valid)
self.driver.create_volume_from_snapshot(self.volume, self.snapshot)
|
{
"content_hash": "3f87c3991a353e11ed02e53009aae20a",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 75,
"avg_line_length": 37.093023255813954,
"alnum_prop": 0.590282131661442,
"repo_name": "Datera/cinder",
"id": "9623b2ca03dd52fce597b9aea5e2fb69d35fb85e",
"size": "3832",
"binary": false,
"copies": "5",
"ref": "refs/heads/datera_queens_backport",
"path": "cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_create_volume_from_snapshot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15242306"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class SubscriptionCreateOrUpdateParameters(Model):
"""Parameters supplied to the Create Or Update Subscription operation.
:param location: Subscription data center location.
:type location: str
:param type: Resource manager type of the resource.
:type type: str
:param accessed_at: Last time there was a receive request to this
subscription.
:type accessed_at: datetime
:param auto_delete_on_idle: TimeSpan idle interval after which the topic
is automatically deleted. The minimum duration is 5 minutes.
:type auto_delete_on_idle: str
:param count_details:
:type count_details: :class:`MessageCountDetails
<azure.mgmt.servicebus.models.MessageCountDetails>`
:param created_at: Exact time the message was created.
:type created_at: datetime
:param default_message_time_to_live: Default message time to live value.
This is the duration after which the message expires, starting from when
the message is sent to Service Bus. This is the default value used when
TimeToLive is not set on a message itself.
:type default_message_time_to_live: str
:param dead_lettering_on_filter_evaluation_exceptions: Value that
indicates whether a subscription has dead letter support on filter
evaluation exceptions.
:type dead_lettering_on_filter_evaluation_exceptions: bool
:param dead_lettering_on_message_expiration: Value that indicates whether
a subscription has dead letter support when a message expires.
:type dead_lettering_on_message_expiration: bool
:param enable_batched_operations: Value that indicates whether
server-side batched operations are enabled.
:type enable_batched_operations: bool
:param entity_availability_status: Entity availability status for the
topic. Possible values include: 'Available', 'Limited', 'Renaming',
'Restoring', 'Unknown'
:type entity_availability_status: str or :class:`EntityAvailabilityStatus
<azure.mgmt.servicebus.models.EntityAvailabilityStatus>`
:param is_read_only: Value that indicates whether the entity description
is read-only.
:type is_read_only: bool
:param lock_duration: The lock duration time span for the subscription.
:type lock_duration: str
:param max_delivery_count: Number of maximum deliveries.
:type max_delivery_count: int
:param message_count: Number of messages.
:type message_count: long
:param requires_session: Value indicating if a subscription supports the
concept of sessions.
:type requires_session: bool
:param status: Enumerates the possible values for the status of a
messaging entity. Possible values include: 'Active', 'Creating',
'Deleting', 'Disabled', 'ReceiveDisabled', 'Renaming', 'Restoring',
'SendDisabled', 'Unknown'
:type status: str or :class:`EntityStatus
<azure.mgmt.servicebus.models.EntityStatus>`
:param updated_at: The exact time the message was updated.
:type updated_at: datetime
"""
_validation = {
'location': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'accessed_at': {'key': 'properties.accessedAt', 'type': 'iso-8601'},
'auto_delete_on_idle': {'key': 'properties.autoDeleteOnIdle', 'type': 'str'},
'count_details': {'key': 'properties.countDetails', 'type': 'MessageCountDetails'},
'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'},
'default_message_time_to_live': {'key': 'properties.defaultMessageTimeToLive', 'type': 'str'},
'dead_lettering_on_filter_evaluation_exceptions': {'key': 'properties.deadLetteringOnFilterEvaluationExceptions', 'type': 'bool'},
'dead_lettering_on_message_expiration': {'key': 'properties.deadLetteringOnMessageExpiration', 'type': 'bool'},
'enable_batched_operations': {'key': 'properties.enableBatchedOperations', 'type': 'bool'},
'entity_availability_status': {'key': 'properties.entityAvailabilityStatus', 'type': 'EntityAvailabilityStatus'},
'is_read_only': {'key': 'properties.isReadOnly', 'type': 'bool'},
'lock_duration': {'key': 'properties.lockDuration', 'type': 'str'},
'max_delivery_count': {'key': 'properties.maxDeliveryCount', 'type': 'int'},
'message_count': {'key': 'properties.messageCount', 'type': 'long'},
'requires_session': {'key': 'properties.requiresSession', 'type': 'bool'},
'status': {'key': 'properties.status', 'type': 'EntityStatus'},
'updated_at': {'key': 'properties.updatedAt', 'type': 'iso-8601'},
}
def __init__(self, location, type=None, accessed_at=None, auto_delete_on_idle=None, count_details=None, created_at=None, default_message_time_to_live=None, dead_lettering_on_filter_evaluation_exceptions=None, dead_lettering_on_message_expiration=None, enable_batched_operations=None, entity_availability_status=None, is_read_only=None, lock_duration=None, max_delivery_count=None, message_count=None, requires_session=None, status=None, updated_at=None):
self.location = location
self.type = type
self.accessed_at = accessed_at
self.auto_delete_on_idle = auto_delete_on_idle
self.count_details = count_details
self.created_at = created_at
self.default_message_time_to_live = default_message_time_to_live
self.dead_lettering_on_filter_evaluation_exceptions = dead_lettering_on_filter_evaluation_exceptions
self.dead_lettering_on_message_expiration = dead_lettering_on_message_expiration
self.enable_batched_operations = enable_batched_operations
self.entity_availability_status = entity_availability_status
self.is_read_only = is_read_only
self.lock_duration = lock_duration
self.max_delivery_count = max_delivery_count
self.message_count = message_count
self.requires_session = requires_session
self.status = status
self.updated_at = updated_at
|
{
"content_hash": "637985ea6223e95d4138bea71187cdfb",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 458,
"avg_line_length": 57.09345794392523,
"alnum_prop": 0.6988050417416926,
"repo_name": "rjschwei/azure-sdk-for-python",
"id": "8fc8179f69936afa362391d12ce5bec7916cecde",
"size": "6583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-servicebus/azure/mgmt/servicebus/models/subscription_create_or_update_parameters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8317911"
}
],
"symlink_target": ""
}
|
import pkg_resources
import numpy as np
import skyfield
from skyfield.api import load
from skyfield.functions import load_bundled_npy
def main():
print('Skyfield version: {0}'.format(skyfield.__version__))
print('jplephem version: {0}'.format(version_of('jplephem')))
print('sgp4 version: {0}'.format(version_of('sgp4')))
ts = load.timescale()
fmt = '%Y-%m-%d'
final_leap = (ts._leap_tai[-1] - 1) / (24 * 60 * 60)
print('Built-in leap seconds table ends with leap second at: {0}'
.format(ts.tai_jd(final_leap).utc_strftime()))
arrays = load_bundled_npy('iers.npz')
daily_tt = arrays['tt_jd_minus_arange']
daily_tt += np.arange(len(daily_tt))
start = ts.tt_jd(daily_tt[0])
end = ts.tt_jd(daily_tt[-1])
print('Built-in ∆T table from finals2000A.all covers: {0} to {1}'
.format(start.utc_strftime(fmt), end.utc_strftime(fmt)))
def version_of(distribution):
try:
d = pkg_resources.get_distribution(distribution)
except pkg_resources.DistributionNotFound:
return 'Unknown'
else:
return d.version
main()
|
{
"content_hash": "aea4fa99c8bbead872e908f519a64ed6",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 69,
"avg_line_length": 31.771428571428572,
"alnum_prop": 0.6429856115107914,
"repo_name": "skyfielders/python-skyfield",
"id": "75114d4ff7fdcc0bf7bf33f5e30774df5befcc90",
"size": "1139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skyfield/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "781"
},
{
"name": "HCL",
"bytes": "21437"
},
{
"name": "Makefile",
"bytes": "2244"
},
{
"name": "Python",
"bytes": "985459"
},
{
"name": "Shell",
"bytes": "2393"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
from helper import get_name
from numpy.testing import assert_almost_equal
from onnx import helper
from onnx.defs import onnx_opset_version
from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE
import onnxruntime as onnxrt
from onnxruntime.capi._pybind_state import OrtDevice as C_OrtDevice # pylint: disable=E0611
from onnxruntime.capi._pybind_state import OrtValue as C_OrtValue
from onnxruntime.capi._pybind_state import OrtValueVector, SessionIOBinding
class TestIOBinding(unittest.TestCase):
def create_ortvalue_input_on_gpu(self):
return onnxrt.OrtValue.ortvalue_from_numpy(
np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32), "cuda", 0
)
def create_ortvalue_alternate_input_on_gpu(self):
return onnxrt.OrtValue.ortvalue_from_numpy(
np.array([[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]], dtype=np.float32),
"cuda",
0,
)
def create_uninitialized_ortvalue_input_on_gpu(self):
return onnxrt.OrtValue.ortvalue_from_shape_and_type([3, 2], np.float32, "cuda", 0)
def create_numpy_input(self):
return np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
def create_expected_output(self):
return np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
def create_expected_output_alternate(self):
return np.array([[2.0, 8.0], [18.0, 32.0], [50.0, 72.0]], dtype=np.float32)
def test_bind_input_to_cpu_arr(self):
input = self.create_numpy_input()
session = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=onnxrt.get_available_providers())
io_binding = session.io_binding()
# Bind Numpy object (input) that's on CPU to wherever the model needs it
io_binding.bind_cpu_input("X", self.create_numpy_input())
# Bind output to CPU
io_binding.bind_output("Y")
# Invoke Run
session.run_with_iobinding(io_binding)
# Sync if different CUDA streams
io_binding.synchronize_outputs()
# Get outputs over to CPU (the outputs which were bound to CUDA will get copied over to the host here)
ort_output = io_binding.copy_outputs_to_cpu()[0]
# Validate results
self.assertTrue(np.array_equal(self.create_expected_output(), ort_output))
def test_bind_input_types(self):
opset = onnx_opset_version()
devices = [
(
C_OrtDevice(C_OrtDevice.cpu(), C_OrtDevice.default_memory(), 0),
["CPUExecutionProvider"],
)
]
if "CUDAExecutionProvider" in onnxrt.get_all_providers():
devices.append(
(
C_OrtDevice(C_OrtDevice.cuda(), C_OrtDevice.default_memory(), 0),
["CUDAExecutionProvider"],
)
)
for device, provider in devices:
for dtype in [
np.float32,
np.float64,
np.int32,
np.uint32,
np.int64,
np.uint64,
np.int16,
np.uint16,
np.int8,
np.uint8,
np.float16,
np.bool_,
]:
with self.subTest(dtype=dtype, device=str(device)):
x = np.arange(8).reshape((-1, 2)).astype(dtype)
proto_dtype = NP_TYPE_TO_TENSOR_TYPE[x.dtype]
X = helper.make_tensor_value_info("X", proto_dtype, [None, x.shape[1]])
Y = helper.make_tensor_value_info("Y", proto_dtype, [None, x.shape[1]])
# inference
node_add = helper.make_node("Identity", ["X"], ["Y"])
# graph
graph_def = helper.make_graph([node_add], "lr", [X], [Y], [])
model_def = helper.make_model(
graph_def,
producer_name="dummy",
ir_version=7,
producer_version="0",
opset_imports=[helper.make_operatorsetid("", opset)],
)
sess = onnxrt.InferenceSession(model_def.SerializeToString(), providers=provider)
bind = SessionIOBinding(sess._sess)
ort_value = C_OrtValue.ortvalue_from_numpy(x, device)
bind.bind_ortvalue_input("X", ort_value)
bind.bind_output("Y", device)
sess._sess.run_with_iobinding(bind, None)
ortvaluevector = bind.get_outputs()
self.assertIsInstance(ortvaluevector, OrtValueVector)
ortvalue = bind.get_outputs()[0]
y = ortvalue.numpy()
assert_almost_equal(x, y)
bind = SessionIOBinding(sess._sess)
bind.bind_input("X", device, dtype, x.shape, ort_value.data_ptr())
bind.bind_output("Y", device)
sess._sess.run_with_iobinding(bind, None)
ortvalue = bind.get_outputs()[0]
y = ortvalue.numpy()
assert_almost_equal(x, y)
def test_bind_input_only(self):
input = self.create_ortvalue_input_on_gpu()
session = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=onnxrt.get_available_providers())
io_binding = session.io_binding()
# Bind input to CUDA
io_binding.bind_input("X", "cuda", 0, np.float32, [3, 2], input.data_ptr())
# Sync if different CUDA streams
io_binding.synchronize_inputs()
# Bind output to CPU
io_binding.bind_output("Y")
# Invoke Run
session.run_with_iobinding(io_binding)
# Sync if different CUDA streams
io_binding.synchronize_outputs()
# Get outputs over to CPU (the outputs which were bound to CUDA will get copied over to the host here)
ort_output = io_binding.copy_outputs_to_cpu()[0]
# Validate results
self.assertTrue(np.array_equal(self.create_expected_output(), ort_output))
def test_bind_input_and_preallocated_output(self):
input = self.create_ortvalue_input_on_gpu()
session = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=onnxrt.get_available_providers())
io_binding = session.io_binding()
# Bind input to CUDA
io_binding.bind_input("X", "cuda", 0, np.float32, [3, 2], input.data_ptr())
# Bind output to CUDA
output = self.create_uninitialized_ortvalue_input_on_gpu()
io_binding.bind_output("Y", "cuda", 0, np.float32, [3, 2], output.data_ptr())
# Sync if different CUDA streams
io_binding.synchronize_inputs()
# Invoke Run
session.run_with_iobinding(io_binding)
# Sync if different CUDA streams
io_binding.synchronize_outputs()
# Get outputs over to CPU (the outputs which were bound to CUDA will get copied over to the host here)
ort_output_vals = io_binding.copy_outputs_to_cpu()[0]
# Validate results
self.assertTrue(np.array_equal(self.create_expected_output(), ort_output_vals))
# Validate if ORT actually wrote to pre-allocated buffer by copying the Torch allocated buffer
# to the host and validating its contents
ort_output_vals_in_cpu = output.numpy()
# Validate results
self.assertTrue(np.array_equal(self.create_expected_output(), ort_output_vals_in_cpu))
def test_bind_input_and_non_preallocated_output(self):
session = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=onnxrt.get_available_providers())
io_binding = session.io_binding()
# Bind input to CUDA
io_binding.bind_input(
"X",
"cuda",
0,
np.float32,
[3, 2],
self.create_ortvalue_input_on_gpu().data_ptr(),
)
# Bind output to CUDA
io_binding.bind_output("Y", "cuda")
# Sync if different CUDA streams
io_binding.synchronize_inputs()
# Invoke Run
session.run_with_iobinding(io_binding)
# Sync if different CUDA streams
io_binding.synchronize_outputs()
# This call returns an OrtValue which has data allocated by ORT on CUDA
ort_outputs = io_binding.get_outputs()
self.assertEqual(len(ort_outputs), 1)
self.assertEqual(ort_outputs[0].device_name(), "cuda")
# Validate results (by copying results to CPU by creating a Numpy object)
self.assertTrue(np.array_equal(self.create_expected_output(), ort_outputs[0].numpy()))
# We should be able to repeat the above process as many times as we want - try once more
ort_outputs = io_binding.get_outputs()
self.assertEqual(len(ort_outputs), 1)
self.assertEqual(ort_outputs[0].device_name(), "cuda")
# Validate results (by copying results to CPU by creating a Numpy object)
self.assertTrue(np.array_equal(self.create_expected_output(), ort_outputs[0].numpy()))
# Change the bound input and validate the results in the same bound OrtValue
# Bind alternate input to CUDA
io_binding.bind_input(
"X",
"cuda",
0,
np.float32,
[3, 2],
self.create_ortvalue_alternate_input_on_gpu().data_ptr(),
)
# Sync if different CUDA streams
io_binding.synchronize_inputs()
# Invoke Run
session.run_with_iobinding(io_binding)
# Sync if different CUDA streams
io_binding.synchronize_outputs()
# This call returns an OrtValue which has data allocated by ORT on CUDA
ort_outputs = io_binding.get_outputs()
self.assertEqual(len(ort_outputs), 1)
self.assertEqual(ort_outputs[0].device_name(), "cuda")
# Validate results (by copying results to CPU by creating a Numpy object)
self.assertTrue(np.array_equal(self.create_expected_output_alternate(), ort_outputs[0].numpy()))
def test_bind_input_and_bind_output_with_ortvalues(self):
session = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=onnxrt.get_available_providers())
io_binding = session.io_binding()
# Bind ortvalue as input
input_ortvalue = self.create_ortvalue_input_on_gpu()
io_binding.bind_ortvalue_input("X", input_ortvalue)
# Bind ortvalue as output
output_ortvalue = self.create_uninitialized_ortvalue_input_on_gpu()
io_binding.bind_ortvalue_output("Y", output_ortvalue)
# Sync if different CUDA streams
io_binding.synchronize_inputs()
# Invoke Run
session.run_with_iobinding(io_binding)
# Sync if different CUDA streams
io_binding.synchronize_outputs()
# Inspect contents of output_ortvalue and make sure that it has the right contents
self.assertTrue(np.array_equal(self.create_expected_output(), output_ortvalue.numpy()))
# Bind another ortvalue as input
input_ortvalue_2 = self.create_ortvalue_alternate_input_on_gpu()
io_binding.bind_ortvalue_input("X", input_ortvalue_2)
# Sync if different CUDA streams
io_binding.synchronize_inputs()
# Invoke Run
session.run_with_iobinding(io_binding)
# Sync if different CUDA streams
io_binding.synchronize_outputs()
# Inspect contents of output_ortvalue and make sure that it has the right contents
self.assertTrue(np.array_equal(self.create_expected_output_alternate(), output_ortvalue.numpy()))
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "472b4c47f64a4e385ccd81928f39b0a2",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 110,
"avg_line_length": 38.44805194805195,
"alnum_prop": 0.5936497213308563,
"repo_name": "microsoft/onnxruntime",
"id": "ff1c0d17fd3ec62ca724097c2aa1f45c505b9c2b",
"size": "11979",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "onnxruntime/test/python/onnxruntime_test_python_iobinding.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1763425"
},
{
"name": "Batchfile",
"bytes": "17040"
},
{
"name": "C",
"bytes": "955390"
},
{
"name": "C#",
"bytes": "2304597"
},
{
"name": "C++",
"bytes": "39435305"
},
{
"name": "CMake",
"bytes": "514764"
},
{
"name": "CSS",
"bytes": "138431"
},
{
"name": "Cuda",
"bytes": "1104338"
},
{
"name": "Dockerfile",
"bytes": "8089"
},
{
"name": "HLSL",
"bytes": "11234"
},
{
"name": "HTML",
"bytes": "5933"
},
{
"name": "Java",
"bytes": "418665"
},
{
"name": "JavaScript",
"bytes": "212575"
},
{
"name": "Jupyter Notebook",
"bytes": "218327"
},
{
"name": "Kotlin",
"bytes": "4653"
},
{
"name": "Liquid",
"bytes": "5457"
},
{
"name": "NASL",
"bytes": "2628"
},
{
"name": "Objective-C",
"bytes": "151027"
},
{
"name": "Objective-C++",
"bytes": "107084"
},
{
"name": "Pascal",
"bytes": "9597"
},
{
"name": "PowerShell",
"bytes": "16419"
},
{
"name": "Python",
"bytes": "5041661"
},
{
"name": "Roff",
"bytes": "27539"
},
{
"name": "Ruby",
"bytes": "3545"
},
{
"name": "Shell",
"bytes": "116513"
},
{
"name": "Swift",
"bytes": "115"
},
{
"name": "TypeScript",
"bytes": "973087"
}
],
"symlink_target": ""
}
|
'''Tools to parse directories of DFT calculations'''
from .vasp import VaspParser
from .pwscf import PwscfParser
|
{
"content_hash": "47f2561b06906ccc7da4c3a3d0dd6771",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 52,
"avg_line_length": 28.5,
"alnum_prop": 0.7894736842105263,
"repo_name": "CitrineInformatics/pif-dft",
"id": "b4cb5eabf4a795aef1c799808c059a4ce486a6ce",
"size": "114",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "dfttopif/parsers/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "239935"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from django.test.utils import override_settings
from django.contrib.auth import signals
@override_settings(USE_TZ=False)
class SignalTestCase(TestCase):
urls = 'django.contrib.auth.tests.urls'
fixtures = ['authtestdata.json']
def listener_login(self, user, **kwargs):
self.logged_in.append(user)
def listener_logout(self, user, **kwargs):
self.logged_out.append(user)
def setUp(self):
"""Set up the listeners and reset the logged in/logged out counters"""
self.logged_in = []
self.logged_out = []
signals.user_logged_in.connect(self.listener_login)
signals.user_logged_out.connect(self.listener_logout)
def tearDown(self):
"""Disconnect the listeners"""
signals.user_logged_in.disconnect(self.listener_login)
signals.user_logged_out.disconnect(self.listener_logout)
def test_login(self):
# Only a successful login will trigger the signal.
self.client.login(username='testclient', password='bad')
self.assertEqual(len(self.logged_in), 0)
# Like this:
self.client.login(username='testclient', password='password')
self.assertEqual(len(self.logged_in), 1)
self.assertEqual(self.logged_in[0].username, 'testclient')
def test_logout_anonymous(self):
# The log_out function will still trigger the signal for anonymous
# users.
self.client.get('/logout/next_page/')
self.assertEqual(len(self.logged_out), 1)
self.assertEqual(self.logged_out[0], None)
def test_logout(self):
self.client.login(username='testclient', password='password')
self.client.get('/logout/next_page/')
self.assertEqual(len(self.logged_out), 1)
self.assertEqual(self.logged_out[0].username, 'testclient')
|
{
"content_hash": "920adbfb10f9b814c192ab4701f39ad3",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 37.857142857142854,
"alnum_prop": 0.6679245283018868,
"repo_name": "chrishas35/django-travis-ci",
"id": "7730ab0952f1dd08d3dd931ab6c0ea9af210b5a2",
"size": "1855",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "django/contrib/auth/tests/signals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "89027"
},
{
"name": "Python",
"bytes": "8037393"
},
{
"name": "Shell",
"bytes": "4241"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('arguments', '0003_auto_20161114_0750'),
]
operations = [
migrations.RenameField(
model_name='argument',
old_name='premise1_if_clauses',
new_name='premise1_if',
),
migrations.RenameField(
model_name='argument',
old_name='premise2_if_clauses',
new_name='premise2_if',
),
]
|
{
"content_hash": "291ba9bb574b046adc93e13397eb78ba",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 49,
"avg_line_length": 23.26086956521739,
"alnum_prop": 0.5626168224299065,
"repo_name": "amstart/demo",
"id": "f2053d28747caf9e5817847f88e83faa5d21a9db",
"size": "608",
"binary": false,
"copies": "1",
"ref": "refs/heads/test_one_vote_model",
"path": "demoslogic/arguments/migrations/0004_auto_20161120_1242.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3826"
},
{
"name": "HTML",
"bytes": "41694"
},
{
"name": "JavaScript",
"bytes": "3922"
},
{
"name": "Jupyter Notebook",
"bytes": "22630"
},
{
"name": "Python",
"bytes": "174579"
},
{
"name": "Shell",
"bytes": "4188"
}
],
"symlink_target": ""
}
|
from incuna_test_utils.testcases.urls import URLTestCase
from .. import views
class TestURLs(URLTestCase):
"""Ensure the urls work."""
def test_auth_token_url(self):
self.assert_url_matches_view(
view=views.GetAuthToken,
expected_url='/auth',
url_name='user_management_api_core:auth')
def test_password_reset_confirm_url(self):
self.assert_url_matches_view(
view=views.PasswordReset,
expected_url='/auth/password_reset/confirm/a/x-y',
url_name='user_management_api_core:password_reset_confirm',
url_kwargs={'uidb64': 'a', 'token': 'x-y'})
def test_password_reset_url(self):
self.assert_url_matches_view(
view=views.PasswordResetEmail,
expected_url='/auth/password_reset',
url_name='user_management_api_core:password_reset')
def test_profile_detail_url(self):
self.assert_url_matches_view(
view=views.ProfileDetail,
expected_url='/profile',
url_name='user_management_api_core:profile_detail')
def test_password_change_url(self):
self.assert_url_matches_view(
view=views.PasswordChange,
expected_url='/profile/password',
url_name='user_management_api_core:password_change')
def test_register_url(self):
self.assert_url_matches_view(
view=views.UserRegister,
expected_url='/register',
url_name='user_management_api_core:register')
def test_user_detail_url(self):
self.assert_url_matches_view(
view=views.UserDetail,
expected_url='/users/1',
url_name='user_management_api_users:user_detail',
url_kwargs={'pk': 1})
def test_user_list_url(self):
self.assert_url_matches_view(
view=views.UserList,
expected_url='/users',
url_name='user_management_api_users:user_list')
def test_verify_email(self):
"""Assert `verify_user` is defined."""
token = 'a-token'
self.assert_url_matches_view(
view=views.VerifyAccountView,
expected_url='/verify_email/{}'.format(token),
url_name='user_management_api_verify:verify_user',
url_kwargs={'token': token},
)
def test_resend_confirmation_email(self):
"""Assert `resend_confirmation_email` is defined."""
self.assert_url_matches_view(
view=views.ResendConfirmationEmail,
expected_url='/resend-confirmation-email',
url_name='user_management_api_core:resend_confirmation_email',
)
|
{
"content_hash": "4b68908381d6162c93c40d69aa0fc78a",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 74,
"avg_line_length": 35.72,
"alnum_prop": 0.6035834266517357,
"repo_name": "incuna/django-user-management",
"id": "cd911233c2076940a4aa72f78b3cf4f8498d0802",
"size": "2679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "user_management/api/tests/test_urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "1163"
},
{
"name": "Makefile",
"bytes": "443"
},
{
"name": "Python",
"bytes": "179550"
}
],
"symlink_target": ""
}
|
import re
class Pattern(object):
def __init__(self, name, regex, actions=None):
self.name = name
self.regex = re.compile(regex) if isinstance(regex, basestring) else regex
self.actions = actions
def __repr__(self):
return "Pattern(%r, %r, %r)" % (self.name, self.regex.pattern, self.actions)
class Token(object):
def __init__(self, name, value, start=None, end=None):
self.name = name
self.value = value
self.start = start
self.end = end
def __repr__(self):
return u'Token(%r, %r)' % (self.name, self.value)
class Node(object):
def __init__(self, name, match_object=None, parent=None, children=None):
self.name = name
self.match_object = match_object
self.parent = parent
self.children = children or []
def _print(self):
node_stack = [(0, self)]
while node_stack:
depth, node = node_stack.pop()
print " " * depth + u"Node({0.name}, {1!r})".format(node, node.match_object.group() if node.match_object else "None")
node_stack += [(depth + 1, i) for i in reversed(node.children)]
def __repr__(self):
return u'Node(%r, %r)' % (self.name, self.children)
# class LiteralTextNode(object):
class PushdownParser(object):
def __init__(self, spec):
self.spec = spec
for k in self.spec:
if isinstance(self.spec[k], InheritMarker):
self.spec[k] = self.spec[self.spec[k].name]
def parse(self, source):
# The root node of the document
document = Node("document", None)
# Our iterator through our to-be-built AST. We will add new nodes we
# encounter to this node as children.
current_node = document
# Our iterator through the source text.
position = 0
state_stack = [("document", document)]
while position < len(source):
# Go through each regex associated with current state in order
for pattern in self.spec[state_stack[-1][0]]:
match = pattern.regex.match(source, position)
if match:
position += len(match.group())
if pattern.name is not None and not pattern.name.startswith("Start:"):
new_node = Node(pattern.name, match_object=match,
parent=current_node)
current_node.children.append(new_node)
for i in pattern.actions or []:
if isinstance(i, PopAction):
if state_stack.pop()[1]:
current_node = current_node.parent
elif isinstance(i, PushAction):
if i.create_node:
new_node = Node(i.state, parent=current_node)
current_node.children.append(new_node)
current_node = new_node
state_stack.append((i.state, i.create_node))
else:
raise RuntimeError("dunno")
break
else:
current_node.children.append(Token("InvalidCharacter", None))#source[position]))
position += 1
if position < len(source):
RuntimeError("Finished early somehow")
return document
class PopAction(object):
def __repr(object):
return "PopAction()"
class PushAction(object):
def __init__(self, state, create_node=True):
self.state = state
self.create_node = create_node
def __repr__(object):
return "PushAction(%r, %r)" % (self.state, self.create_node)
@classmethod
def from_string(self, string):
if string.startswith(":"):
return PushAction(string[1:], False)
return PushAction(string, True)
def pop():
return [PopAction()]
def push(*args):
return [PushAction.from_string(i) for i in args]
class InheritMarker(object):
def __init__(self, name):
self.name = name
def inherit(name):
return InheritMarker(name)
def parse(source):
p = Pattern
BLANK_LINE_RE = r"[ \t]*\n"
spec = {
"document": [
p("BlankLine", BLANK_LINE_RE),
# Text
p(None, r"\\", push("text", ":inline-content")),
# Heading
p(None, r"#{1,6} ", push("header", ":inline-content")),
# UnorderedListItem
p(None, r" \* ", push("unordered-list", "unordered-list-item")),
# OrderedListItem
p(None, r" \# ", push("ordered-list", "ordered-list-item")),
# Code
p(None, r" ", push("code", "code-line")),
# Annotation
p(None, r"!(?![ \t]*\n)", push("annotation")),
# Text
p(None, r"", push("text", ":inline-content")),
],
"annotation": inherit("raw-content"),
"raw-content": [
p("PlainText", r"[^\n]+"),
p(None, r"\n", pop()),
],
"inline-content": [
p("PlainText", r"[^\n]+"),
p(None, r"\n", pop()),
],
"text": [
p(None, BLANK_LINE_RE, pop()),
# Text
p(None, r"", push("inline-content")),
],
"header": [
p(None, BLANK_LINE_RE, pop()),
],
"unordered-list": [
p(None, BLANK_LINE_RE, pop()),
# UnorderedListItem
p(None, r"(?: )* \* ", push("unordered-list-item")),
# UnorderedListItemContinuation
p(None, r"(?: )* ", push("unordered-list-item-continuation")),
],
"unordered-list-item": inherit("inline-content"),
"unordered-list-item-continuation": inherit("inline-content"),
"ordered-list": [
p(None, BLANK_LINE_RE, pop()),
# OrderedListItem
p(None, r"(?: )* \# ", push("ordered-list-item")),
# OrderedListItemContinuation
p(None, r"(?: )* ", push("ordered-list-item-continuation")),
],
"ordered-list-item": inherit("inline-content"),
"ordered-list-item-continuation": inherit("inline-content"),
"code": [
p(None, BLANK_LINE_RE, pop()),
# Code
p(None, r" ", push("code-line"))
],
"code-line": inherit("raw-content"),
}
return PushdownParser(spec).parse(source)
z = (
"""# Mono-Markup Specification
Hello world [link](href).
!syntax=c++
if (awesome) {
printf("Hello world")
}
* I am a list
continue
* Yes indeed
# I am an ordered list
continue
# Onward!
""")
parse(z)._print()
|
{
"content_hash": "97da63b2de9043c499c3db869f797e84",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 130,
"avg_line_length": 28.860759493670887,
"alnum_prop": 0.5090643274853801,
"repo_name": "brownhead/mono-markup",
"id": "efd4f81d36a85f769cc03d56f23c54426e1e947c",
"size": "6840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monomarkup/parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7143"
}
],
"symlink_target": ""
}
|
import treq
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from gtxamqp.pool import pool
from tests import utils
# get a new client from the client pool
client = pool.get(rabbitmq_conf=utils.generate_rabbitmq_config())
num_of_jokes_to_publish = 10
@inlineCallbacks
def get_jokes():
"""
fetch jokes from the queue and print them
once done printing, closes the reactor
"""
messages_received = 0
while messages_received < num_of_jokes_to_publish:
msg = yield client.basic_get(no_ack=True)
messages_received += 1 if msg and msg.content else 0
if msg and msg.content:
print(msg.content.body)
# you'll notice we don't explicitly teardown the client
# the reason is that the pool automatically performs client cleanup
# when the reactor stops is shutting down
reactor.stop()
@inlineCallbacks
def get_joke():
"""
generates a random joke using ICNDb API
"""
response = yield treq.get("http://api.icndb.com/jokes/random")
if response.code == 200:
content = yield response.json()
if content.get('type', 'failure') == 'success':
joke = content.get('value', {}).get('joke', None)
if joke:
returnValue(joke)
returnValue("What kind of bagel can fly? A plain bagel!")
@inlineCallbacks
def publish_jokes():
"""
publish jokes to the exchange
:return:
"""
for n in xrange(num_of_jokes_to_publish):
joke = yield get_joke()
client.basic_publish("{num}. {joke}\n".format(num=n, joke=joke))
reactor.callLater(0, publish_jokes)
loop = reactor.callLater(0, get_jokes)
reactor.run()
|
{
"content_hash": "603b9b4db913e4714377500bccc5fdb9",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 72,
"avg_line_length": 28.566666666666666,
"alnum_prop": 0.662777129521587,
"repo_name": "devsenexx/gtxamqp",
"id": "de16dce6f76185d59b00a873ab2c294199f045b5",
"size": "1714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/example_basic_get.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75581"
},
{
"name": "Shell",
"bytes": "270"
}
],
"symlink_target": ""
}
|
from clang.cindex import Cursor, Index
from bears.c_languages.codeclone_detection.ClangCountingConditions import (
get_identifier_name, is_function_declaration, is_literal, is_reference)
from bears.c_languages.codeclone_detection.CountVector import CountVector
class ClangCountVectorCreator:
"""
This object uses clang to create a count vector for each function for given
counting conditions. The counting conditions are clang specific and they
are called like this:
condition(stack)
While stack is a stack (i.e. list) holding a tuple holding the parent
cursors and the child number. (E.g. if a cursor is the third child of
its parent its child number is two, counted from zero.)
The ClangCountVectorCreator will only count variables local to each
function.
"""
def __init__(self,
conditions=None,
weightings=None):
"""
Creates a new ClangCountVectorCreator.
:param conditions: The counting conditions as list of function
objects, each shall return true when getting
data indicating that this occurrence should
be counted.
:param weightings: Optional factors to weight counting conditions.
Defaults to 1 for all conditions.
"""
self.conditions = conditions
self.weightings = weightings
self.count_vectors = {}
self.stack = []
def count_identifier(self, identifier, category):
if identifier not in self.count_vectors:
self.count_vectors[identifier] = CountVector(
identifier, category, self.conditions, self.weightings)
self.count_vectors[identifier].count_reference(self.stack)
def _get_vector_for_function(self, cursor, child_num=0):
"""
Creates a CountVector object for the given cursor.
Note: this function uses self.count_vectors for storing its results.
This is done knowingly because passing back and forth mutable objects
is not nice and yields in bigger complexity IMHO.
This function creates a CountVector object for all variables found in
self.local_vars and in the tree elements below the given one, stores it
in self.count_vectors.
:param cursor: Clang cursor to iterate over.
"""
assert isinstance(cursor, Cursor)
self.stack.append((cursor, child_num))
if is_reference(cursor):
self.count_identifier(get_identifier_name(cursor),
CountVector.Category.reference)
if is_literal(cursor):
tokens = list(cursor.get_tokens())
if tokens:
self.count_identifier(tokens[0].spelling,
CountVector.Category.literal)
for i, child in enumerate(cursor.get_children()):
self._get_vector_for_function(child, i)
self.stack.pop()
def _get_vectors_for_cursor(self, cursor, filename):
"""
Maps all functions in/under the given cursor to their count vectors
if they are defined in the given file.
:param cursor: The cursor to traverse.
:param filename: Absolute path to the file.
:return: The dictionary holding CountVectors for all variables
in all functions.
"""
assert isinstance(cursor, Cursor)
file = cursor.location.file
if file is not None:
file = file.name
if str(file) == str(filename) and is_function_declaration(cursor):
self._get_vector_for_function(cursor)
result = {(cursor.extent.start.line,
get_identifier_name(cursor)): self.count_vectors}
# Reset local states
self.count_vectors = {}
self.stack = []
else:
result = {}
for child in cursor.get_children():
result.update(self._get_vectors_for_cursor(child, filename))
return result
def get_vectors_for_file(self, filename, include_paths=()):
"""
Creates a dictionary associating each function name within the given
file with another dictionary associating each variable name (local to
the function) with a CountVector object. Functions of included files
will not be analyzed.
:param filename: The path to the file to parse.
:return: The dictionary holding CountVectors for all variables
in all functions.
"""
args = ['-I'+path for path in include_paths]
root = Index.create().parse(filename, args=args).cursor
return self._get_vectors_for_cursor(root, filename)
|
{
"content_hash": "0324ddaf78a1f313763680d49694d2fe",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 79,
"avg_line_length": 39.17741935483871,
"alnum_prop": 0.6179497735693701,
"repo_name": "IPMITMO/statan",
"id": "36c8d0bc0a2b741dfd503c2ef6a58b2cb6c96b31",
"size": "4858",
"binary": false,
"copies": "26",
"ref": "refs/heads/master",
"path": "coala-bears/bears/c_languages/codeclone_detection/ClangCountVectorCreator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "101"
},
{
"name": "Batchfile",
"bytes": "10931"
},
{
"name": "C",
"bytes": "28190"
},
{
"name": "C#",
"bytes": "45474"
},
{
"name": "C++",
"bytes": "335"
},
{
"name": "CSS",
"bytes": "6631"
},
{
"name": "Go",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "1564"
},
{
"name": "Java",
"bytes": "592"
},
{
"name": "JavaScript",
"bytes": "472227"
},
{
"name": "Makefile",
"bytes": "15304"
},
{
"name": "PHP",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "2312447"
},
{
"name": "Ruby",
"bytes": "447"
},
{
"name": "Shell",
"bytes": "12706"
}
],
"symlink_target": ""
}
|
import pytest
import vcr
from pygrabbit import PyGrabbit
@pytest.fixture
def grab():
@vcr.use_cassette('fixtures/vcr_cassettes/pygrabbit.yaml',
record_mode='new_episodes')
def make_grabbit(url):
return PyGrabbit.url(url)
return make_grabbit
class TestPyGrabbitTitle:
def test_title_from_url(self, grab):
g = grab('http://www.drudgereport.com')
assert g.title.startswith('DRUDGE REPORT')
def test_title_from_og(self, grab):
g = grab('http://ogp.me/')
assert g.title == 'Open Graph protocol'
def test_title_twitter_card(self, grab):
g = grab('https://dev.twitter.com/docs/cards/types/summary-card')
assert g.title == 'Summary Card'
class TestPyGrabbitDescription:
def test_description_from_og(self, grab):
g = grab('http://ogp.me/')
assert g.description == "The Open Graph protocol enables any web page to become a rich object in a social graph."
def test_description_from_twitter_card(self, grab):
g = grab("https://dev.twitter.com/docs/cards/types/summary-card")
assert g.description == "The Summary Card can be used for many kinds of web content, from blog posts and news articles, to products and restaurants. It is designed to give the reader a preview of the content before clicking through to your website."
def test_description_from_meta(self, grab):
g = grab("http://moz.com/learn/seo/meta-description")
assert g.description == "Get SEO best practices for the meta description tag, including length and content."
class TestPyGrabbitImages:
def test_return_array(self, grab):
g = grab("http://www.google.com")
assert type(g.images) == list
def test_only_images_from_og(self, grab):
g = grab("http://ogp.me/")
assert g.images[0] == "http://ogp.me/logo.png"
assert len(g.images) == 1
def test_return_images_from_twitter_card(self, grab):
g = grab("https://dev.twitter.com/cards/types/summary-large-image")
assert g.images[0] == "https://pbs.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3.png"
assert len(g.images) == 1
# NOTE: Amazon html is a bitch
# @vcr.use_cassette('fixtures/vcr_cassettes/pygrabbit.yaml', record_mode='new_episodes')
# def test_main_image_with_id_amazon(self):
# g = PyGrabbit.url("http://www.amazon.com/gp/product/0975277324")
# import ipdb;ipdb.set_trace()
# assert g.images[0] == "http://ecx.images-amazon.com/images/I/61dDQUfhuvL._SX300_.jpg"
# assert len(g.images) == 1
def test_return_images_from_global_img(self, grab):
g = grab("http://elixir-lang.org/")
assert g.images[0] == "http://elixir-lang.org/images/contents/home-code.png"
assert len(g.images) == 1
|
{
"content_hash": "07df4142005a7bbb4155973481150f02",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 257,
"avg_line_length": 40.41428571428571,
"alnum_prop": 0.6564156945917285,
"repo_name": "eka/pygrabbit",
"id": "9ea23d9018963c61d261db172bcb2ff27f902e73",
"size": "2829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_pygrabbit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6702"
}
],
"symlink_target": ""
}
|
import os
import os.path
import sys
import urllib
from robot.errors import DataError
from .encoding import decode_from_system
from .platform import WINDOWS
from .robottypes import is_unicode
if WINDOWS:
CASE_INSENSITIVE_FILESYSTEM = True
else:
try:
CASE_INSENSITIVE_FILESYSTEM = os.listdir('/tmp') == os.listdir('/TMP')
except OSError:
CASE_INSENSITIVE_FILESYSTEM = False
def normpath(path, case_normalize=False):
"""Replacement for os.path.normpath with some enhancements.
1. Non-Unicode paths are converted to Unicode using file system encoding.
2. Optionally lower-case paths on case-insensitive file systems.
That includes Windows and also OSX in default configuration.
3. Turn ``c:`` into ``c:\\`` on Windows instead of keeping it as ``c:``.
"""
if not is_unicode(path):
path = decode_from_system(path)
path = os.path.normpath(path)
if case_normalize and CASE_INSENSITIVE_FILESYSTEM:
path = path.lower()
if WINDOWS and len(path) == 2 and path[1] == ':':
return path + '\\'
return path
def abspath(path, case_normalize=False):
"""Replacement for os.path.abspath with some enhancements and bug fixes.
1. Non-Unicode paths are converted to Unicode using file system encoding.
2. Optionally lower-case paths on case-insensitive file systems.
That includes Windows and also OSX in default configuration.
3. Turn ``c:`` into ``c:\\`` on Windows instead of ``c:\\current\\path``.
4. Handle non-ASCII characters on working directory with Python < 2.6.5:
http://bugs.python.org/issue3426
"""
path = normpath(path, case_normalize)
if os.path.isabs(path):
return path
return normpath(os.path.join(os.getcwdu(), path), case_normalize)
# TODO: Investigate could this be replaced with os.path.relpath in RF 2.9.
def get_link_path(target, base):
"""Returns a relative path to a target from a base.
If base is an existing file, then its parent directory is considered.
Otherwise, base is assumed to be a directory.
Rationale: os.path.relpath is not available before Python 2.6
"""
path = _get_pathname(target, base)
url = urllib.pathname2url(path.encode('UTF-8'))
if os.path.isabs(path):
url = 'file:' + url
# At least Jython seems to use 'C|/Path' and not 'C:/Path'
if os.sep == '\\' and '|/' in url:
url = url.replace('|/', ':/', 1)
return url.replace('%5C', '/').replace('%3A', ':').replace('|', ':')
def _get_pathname(target, base):
target = abspath(target)
base = abspath(base)
if os.path.isfile(base):
base = os.path.dirname(base)
if base == target:
return os.path.basename(target)
base_drive, base_path = os.path.splitdrive(base)
# if in Windows and base and link on different drives
if os.path.splitdrive(target)[0] != base_drive:
return target
common_len = len(_common_path(base, target))
if base_path == os.sep:
return target[common_len:]
if common_len == len(base_drive) + len(os.sep):
common_len -= len(os.sep)
dirs_up = os.sep.join([os.pardir] * base[common_len:].count(os.sep))
return os.path.join(dirs_up, target[common_len + len(os.sep):])
def _common_path(p1, p2):
"""Returns the longest path common to p1 and p2.
Rationale: as os.path.commonprefix is character based, it doesn't consider
path separators as such, so it may return invalid paths:
commonprefix(('/foo/bar/', '/foo/baz.txt')) -> '/foo/ba' (instead of /foo)
"""
while p1 and p2:
if p1 == p2:
return p1
if len(p1) > len(p2):
p1 = os.path.dirname(p1)
else:
p2 = os.path.dirname(p2)
return ''
def find_file(path, basedir='.', file_type=None):
path = os.path.normpath(path.replace('/', os.sep))
for base in [basedir] + sys.path:
if not (base and os.path.isdir(base)):
continue
if not is_unicode(base):
base = decode_from_system(base)
ret = os.path.abspath(os.path.join(base, path))
if os.path.isfile(ret):
return ret
if os.path.isdir(ret) and os.path.isfile(os.path.join(ret, '__init__.py')):
return ret
default = file_type or 'File'
file_type = {'Library': 'Test library',
'Variables': 'Variable file',
'Resource': 'Resource file'}.get(file_type, default)
raise DataError("%s '%s' does not exist." % (file_type, path))
|
{
"content_hash": "cca77eeab0c46bb00f77d948ba6f20c9",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 83,
"avg_line_length": 36.03174603174603,
"alnum_prop": 0.6317180616740088,
"repo_name": "un33k/robotframework",
"id": "df9bc48ea1e3487848d40384b0022c17832f9cdf",
"size": "5148",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "src/robot/utils/robotpath.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "210"
},
{
"name": "CSS",
"bytes": "23490"
},
{
"name": "HTML",
"bytes": "140949"
},
{
"name": "Java",
"bytes": "60101"
},
{
"name": "JavaScript",
"bytes": "160761"
},
{
"name": "Python",
"bytes": "2160012"
},
{
"name": "RobotFramework",
"bytes": "2039326"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
}
|
"""Utilities for dealing with file paths."""
import os
def join(path, *paths):
"""Joins given path pieces with the appropriate separator.
This function is useful for joining parts of a path that could at times refer
to either a GCS path or a local path. In particular, this is useful for
ensuring Windows compatibility as on Windows, the GCS path separator is
different from the separator for local paths.
Use os.path.join instead if a path always refers to a local path.
Args:
path: First part of path to join. If this part starts with 'gs:/', the GCS
separator will be used in joining this path.
*paths: Remaining part(s) of path to join.
Returns:
Pieces joined by the appropriate path separator.
"""
if path.startswith('gs:/'):
# Note that we explicitly choose not to use posixpath.join() here, since
# that function has the undesirable behavior of having, for example,
# posixpath.join('gs://bucket/path', '/to/file') return '/to/file' instead
# of the slightly less surprising result 'gs://bucket/path//to/file'.
return '/'.join((path,) + paths)
else:
return os.path.join(path, *paths)
|
{
"content_hash": "7087477f861566c7539ba94be1b456d6",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 37.54838709677419,
"alnum_prop": 0.7044673539518901,
"repo_name": "xsm110/Apache-Beam",
"id": "6b3b978fdbe106300559fcdd315d70510c59f2bb",
"size": "1948",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/utils/path.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "42728"
},
{
"name": "Java",
"bytes": "11382377"
},
{
"name": "Protocol Buffer",
"bytes": "50349"
},
{
"name": "Python",
"bytes": "2635088"
},
{
"name": "Shell",
"bytes": "34853"
}
],
"symlink_target": ""
}
|
"""Strategies for creating new instances of Engine types.
These are semi-private implementation classes which provide the
underlying behavior for the "strategy" keyword argument available on
:func:`~sqlalchemy.engine.create_engine`. Current available options are
``plain``, ``threadlocal``, and ``mock``.
New strategies can be added via new ``EngineStrategy`` classes.
"""
from operator import attrgetter
from sqlalchemy.engine import base, threadlocal, url
from sqlalchemy import util, exc, event
from sqlalchemy import pool as poollib
strategies = {}
class EngineStrategy(object):
"""An adaptor that processes input arguments and produces an Engine.
Provides a ``create`` method that receives input arguments and
produces an instance of base.Engine or a subclass.
"""
def __init__(self):
strategies[self.name] = self
def create(self, *args, **kwargs):
"""Given arguments, returns a new Engine instance."""
raise NotImplementedError()
class DefaultEngineStrategy(EngineStrategy):
"""Base class for built-in strategies."""
def create(self, name_or_url, **kwargs):
# create url.URL object
u = url.make_url(name_or_url)
dialect_cls = u.get_dialect()
dialect_args = {}
# consume dialect arguments from kwargs
for k in util.get_cls_kwargs(dialect_cls):
if k in kwargs:
dialect_args[k] = kwargs.pop(k)
dbapi = kwargs.pop('module', None)
if dbapi is None:
dbapi_args = {}
for k in util.get_func_kwargs(dialect_cls.dbapi):
if k in kwargs:
dbapi_args[k] = kwargs.pop(k)
dbapi = dialect_cls.dbapi(**dbapi_args)
dialect_args['dbapi'] = dbapi
# create dialect
dialect = dialect_cls(**dialect_args)
# assemble connection arguments
(cargs, cparams) = dialect.create_connect_args(u)
cparams.update(kwargs.pop('connect_args', {}))
# look for existing pool or create
pool = kwargs.pop('pool', None)
if pool is None:
def connect():
try:
return dialect.connect(*cargs, **cparams)
except Exception, e:
invalidated = dialect.is_disconnect(e, None, None)
# Py3K
#raise exc.DBAPIError.instance(None, None,
# e, dialect.dbapi.Error,
# connection_invalidated=invalidated
#) from e
# Py2K
import sys
raise exc.DBAPIError.instance(
None, None, e, dialect.dbapi.Error,
connection_invalidated=invalidated
), None, sys.exc_info()[2]
# end Py2K
creator = kwargs.pop('creator', connect)
poolclass = kwargs.pop('poolclass', None)
if poolclass is None:
poolclass = dialect_cls.get_pool_class(u)
pool_args = {}
# consume pool arguments from kwargs, translating a few of
# the arguments
translate = {'logging_name': 'pool_logging_name',
'echo': 'echo_pool',
'timeout': 'pool_timeout',
'recycle': 'pool_recycle',
'events': 'pool_events',
'use_threadlocal': 'pool_threadlocal',
'reset_on_return': 'pool_reset_on_return'}
for k in util.get_cls_kwargs(poolclass):
tk = translate.get(k, k)
if tk in kwargs:
pool_args[k] = kwargs.pop(tk)
pool = poolclass(creator, **pool_args)
else:
if isinstance(pool, poollib._DBProxy):
pool = pool.get_pool(*cargs, **cparams)
else:
pool = pool
# create engine.
engineclass = self.engine_cls
engine_args = {}
for k in util.get_cls_kwargs(engineclass):
if k in kwargs:
engine_args[k] = kwargs.pop(k)
_initialize = kwargs.pop('_initialize', True)
# all kwargs should be consumed
if kwargs:
raise TypeError(
"Invalid argument(s) %s sent to create_engine(), "
"using configuration %s/%s/%s. Please check that the "
"keyword arguments are appropriate for this combination "
"of components." % (','.join("'%s'" % k for k in kwargs),
dialect.__class__.__name__,
pool.__class__.__name__,
engineclass.__name__))
engine = engineclass(pool, dialect, u, **engine_args)
if _initialize:
do_on_connect = dialect.on_connect()
if do_on_connect:
def on_connect(dbapi_connection, connection_record):
conn = getattr(
dbapi_connection, '_sqla_unwrap', dbapi_connection)
if conn is None:
return
do_on_connect(conn)
event.listen(pool, 'first_connect', on_connect)
event.listen(pool, 'connect', on_connect)
def first_connect(dbapi_connection, connection_record):
c = base.Connection(engine, connection=dbapi_connection)
# TODO: removing this allows the on connect activities
# to generate events. tests currently assume these aren't
# sent. do we want users to get all the initial connect
# activities as events ?
c._has_events = False
dialect.initialize(c)
event.listen(pool, 'first_connect', first_connect)
return engine
class PlainEngineStrategy(DefaultEngineStrategy):
"""Strategy for configuring a regular Engine."""
name = 'plain'
engine_cls = base.Engine
PlainEngineStrategy()
class ThreadLocalEngineStrategy(DefaultEngineStrategy):
"""Strategy for configuring an Engine with threadlocal behavior."""
name = 'threadlocal'
engine_cls = threadlocal.TLEngine
ThreadLocalEngineStrategy()
class MockEngineStrategy(EngineStrategy):
"""Strategy for configuring an Engine-like object with mocked execution.
Produces a single mock Connectable object which dispatches
statement execution to a passed-in function.
"""
name = 'mock'
def create(self, name_or_url, executor, **kwargs):
# create url.URL object
u = url.make_url(name_or_url)
dialect_cls = u.get_dialect()
dialect_args = {}
# consume dialect arguments from kwargs
for k in util.get_cls_kwargs(dialect_cls):
if k in kwargs:
dialect_args[k] = kwargs.pop(k)
# create dialect
dialect = dialect_cls(**dialect_args)
return MockEngineStrategy.MockConnection(dialect, executor)
class MockConnection(base.Connectable):
def __init__(self, dialect, execute):
self._dialect = dialect
self.execute = execute
engine = property(lambda s: s)
dialect = property(attrgetter('_dialect'))
name = property(lambda s: s._dialect.name)
def contextual_connect(self, **kwargs):
return self
def execution_options(self, **kw):
return self
def compiler(self, statement, parameters, **kwargs):
return self._dialect.compiler(
statement, parameters, engine=self, **kwargs)
def create(self, entity, **kwargs):
kwargs['checkfirst'] = False
from sqlalchemy.engine import ddl
ddl.SchemaGenerator(
self.dialect, self, **kwargs).traverse_single(entity)
def drop(self, entity, **kwargs):
kwargs['checkfirst'] = False
from sqlalchemy.engine import ddl
ddl.SchemaDropper(
self.dialect, self, **kwargs).traverse_single(entity)
def _run_visitor(self, visitorcallable, element,
connection=None,
**kwargs):
kwargs['checkfirst'] = False
visitorcallable(self.dialect, self,
**kwargs).traverse_single(element)
def execute(self, object, *multiparams, **params):
raise NotImplementedError()
MockEngineStrategy()
|
{
"content_hash": "417229d87bc4142e715690e22bb79444",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 76,
"avg_line_length": 34.06692913385827,
"alnum_prop": 0.5552987403212759,
"repo_name": "denny820909/builder",
"id": "2db1bfcc56bef32c2f23cfd1311640f7ac42b8bf",
"size": "8892",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/SQLAlchemy-0.8.0b2-py2.7-linux-x86_64.egg/sqlalchemy/engine/strategies.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "68706"
},
{
"name": "CSS",
"bytes": "18630"
},
{
"name": "D",
"bytes": "532"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "HTML",
"bytes": "69377"
},
{
"name": "Makefile",
"bytes": "1220"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "21088388"
},
{
"name": "Shell",
"bytes": "2766"
},
{
"name": "Smarty",
"bytes": "393"
}
],
"symlink_target": ""
}
|
from core.models import Size
from rest_framework import serializers
from api.v2.serializers.fields.base import UUIDHyperlinkedIdentityField
class SizeSummarySerializer(serializers.HyperlinkedModelSerializer):
url = UUIDHyperlinkedIdentityField(
view_name='api:v2:size-detail',
)
class Meta:
model = Size
fields = (
'id',
'uuid',
'url',
'alias',
'name',
'cpu',
'disk',
'mem',
'active',
'start_date',
'end_date')
# TODO: Move to fields?
class SizeRelatedField(serializers.PrimaryKeyRelatedField):
def get_queryset(self):
return Size.objects.all()
def to_representation(self, value):
size = Size.objects.get(pk=value.pk)
serializer = SizeSummarySerializer(
size,
context=self.context)
return serializer.data
|
{
"content_hash": "5f4f949c362462064bc01cd5a95ba506",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 71,
"avg_line_length": 24.842105263157894,
"alnum_prop": 0.5815677966101694,
"repo_name": "CCI-MOC/GUI-Backend",
"id": "7dd62e4ea72925583c33ebf095f0790c4837e6db",
"size": "944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/v2/serializers/summaries/size.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "11571"
},
{
"name": "Python",
"bytes": "2565922"
},
{
"name": "Ruby",
"bytes": "1345"
},
{
"name": "Shell",
"bytes": "42018"
}
],
"symlink_target": ""
}
|
from pyvisdk.thirdparty import Enum
DiagnosticPartitionStorageType = Enum(
'directAttached',
'networkAttached',
)
|
{
"content_hash": "84a592a4600895c0f5bacfb2ad1a4629",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 38,
"avg_line_length": 17.714285714285715,
"alnum_prop": 0.7580645161290323,
"repo_name": "xuru/pyvisdk",
"id": "8f557774412ed52e1cef1e2b52b3a7e9b971c89b",
"size": "249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/enums/diagnostic_partition_storage_type.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
}
|
def str2tuple(s, sep='/'):
"""
Given the string representation of a tagged token, return the
corresponding tuple representation. The rightmost occurrence of
*sep* in *s* will be used to divide *s* into a word string and
a tag string. If *sep* does not occur in *s*, return (s, None).
>>> from nltk.tag.util import str2tuple
>>> str2tuple('fly/NN')
('fly', 'NN')
:type s: str
:param s: The string representation of a tagged token.
:type sep: str
:param sep: The separator string used to separate word strings
from tags.
"""
loc = s.rfind(sep)
if loc >= 0:
return (s[:loc], s[loc+len(sep):].upper())
else:
return (s, None)
def tuple2str(tagged_token, sep='/'):
"""
Given the tuple representation of a tagged token, return the
corresponding string representation. This representation is
formed by concatenating the token's word string, followed by the
separator, followed by the token's tag. (If the tag is None,
then just return the bare word string.)
>>> from nltk.tag.util import tuple2str
>>> tagged_token = ('fly', 'NN')
>>> tuple2str(tagged_token)
'fly/NN'
:type tagged_token: tuple(str, str)
:param tagged_token: The tuple representation of a tagged token.
:type sep: str
:param sep: The separator string used to separate word strings
from tags.
"""
word, tag = tagged_token
if tag is None:
return word
else:
assert sep not in tag, 'tag may not contain sep!'
return '%s%s%s' % (word, sep, tag)
def untag(tagged_sentence):
"""
Given a tagged sentence, return an untagged version of that
sentence. I.e., return a list containing the first element
of each tuple in *tagged_sentence*.
>>> from nltk.tag.util import untag
>>> untag([('John', 'NNP'), ('saw', 'VBD'), ('Mary', 'NNP')])
['John', 'saw', 'Mary']
"""
return [w for (w, t) in tagged_sentence]
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
{
"content_hash": "f148946a0ffdeed46e070950472e3e98",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 69,
"avg_line_length": 32,
"alnum_prop": 0.6138059701492538,
"repo_name": "syllog1sm/TextBlob",
"id": "0e9269b6b4c84774c6544c21ad5fd9a490dddc54",
"size": "2406",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "text/nltk/tag/util.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1163"
},
{
"name": "Erlang",
"bytes": "1863"
},
{
"name": "JavaScript",
"bytes": "326"
},
{
"name": "Python",
"bytes": "3645100"
},
{
"name": "Shell",
"bytes": "6711"
}
],
"symlink_target": ""
}
|
"""
Editra Business Model Library:
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__cvsid__ = "$Id: __init__.py 69798 2011-11-23 02:59:40Z CJP $"
__revision__ = "$Revision: 69798 $"
#-----------------------------------------------------------------------------#
# Text Utils
from searcheng import *
from fchecker import *
from fileutil import *
from _dirmon import *
from fileimpl import *
from txtutil import *
from logfile import *
from backupmgr import *
from calllock import *
# Storage Classes
from histcache import *
from clipboard import *
# Networking utilities
from e_weblib import *
# Misc
from miscutil import *
from _efactory import *
from cmenumgr import *
from efilehist import *
from osutil import *
from _threads import *
from _trash import *
|
{
"content_hash": "314522ceba9637e5b11fcf8f085e3937",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 20.871794871794872,
"alnum_prop": 0.6130221130221131,
"repo_name": "ktan2020/legacy-automation",
"id": "0df19c57b137ab5f2e1c986ba521311f07f96fb3",
"size": "1381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "win/Lib/site-packages/wx-3.0-msw/wx/tools/Editra/src/ebmlib/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "913"
},
{
"name": "Ada",
"bytes": "289"
},
{
"name": "Assembly",
"bytes": "687"
},
{
"name": "Boo",
"bytes": "540"
},
{
"name": "C",
"bytes": "40116"
},
{
"name": "C#",
"bytes": "474"
},
{
"name": "C++",
"bytes": "393"
},
{
"name": "CSS",
"bytes": "70883"
},
{
"name": "ColdFusion",
"bytes": "1012"
},
{
"name": "Common Lisp",
"bytes": "1034"
},
{
"name": "D",
"bytes": "1858"
},
{
"name": "Eiffel",
"bytes": "426"
},
{
"name": "Erlang",
"bytes": "9243"
},
{
"name": "FORTRAN",
"bytes": "1810"
},
{
"name": "Forth",
"bytes": "182"
},
{
"name": "Groovy",
"bytes": "2366"
},
{
"name": "Haskell",
"bytes": "816"
},
{
"name": "Haxe",
"bytes": "455"
},
{
"name": "Java",
"bytes": "1155"
},
{
"name": "JavaScript",
"bytes": "69444"
},
{
"name": "Lua",
"bytes": "795"
},
{
"name": "Matlab",
"bytes": "1278"
},
{
"name": "OCaml",
"bytes": "350"
},
{
"name": "Objective-C++",
"bytes": "885"
},
{
"name": "PHP",
"bytes": "1411"
},
{
"name": "Pascal",
"bytes": "388"
},
{
"name": "Perl",
"bytes": "252651"
},
{
"name": "Pike",
"bytes": "589"
},
{
"name": "Python",
"bytes": "42085780"
},
{
"name": "R",
"bytes": "1156"
},
{
"name": "Ruby",
"bytes": "480"
},
{
"name": "Scheme",
"bytes": "282"
},
{
"name": "Shell",
"bytes": "30518"
},
{
"name": "Smalltalk",
"bytes": "926"
},
{
"name": "Squirrel",
"bytes": "697"
},
{
"name": "Stata",
"bytes": "302"
},
{
"name": "SystemVerilog",
"bytes": "3145"
},
{
"name": "Tcl",
"bytes": "1039"
},
{
"name": "TeX",
"bytes": "1746"
},
{
"name": "VHDL",
"bytes": "985"
},
{
"name": "Vala",
"bytes": "664"
},
{
"name": "Verilog",
"bytes": "439"
},
{
"name": "Visual Basic",
"bytes": "2142"
},
{
"name": "XSLT",
"bytes": "152770"
},
{
"name": "ooc",
"bytes": "890"
},
{
"name": "xBase",
"bytes": "769"
}
],
"symlink_target": ""
}
|
"""Runs all matched benchmark suites on an Android device.
This script probes the Android phone via `adb` and uses the device information
to filter and run suitable benchmarks and optionally captures Tracy traces on
the Android phone.
It expects that `adb` is installed, and there is iree tools cross-compiled
towards Android. If to capture traces, another set of tracing-enabled iree
tools and the Tracy `capture` tool should be cross-compiled towards Android.
Example usages:
# Without trace generation
python3 run_benchmarks.py \
--normal_benchmark_tool_dir=/path/to/normal/android/target/tools/dir \
/path/to/host/build/dir
# With trace generation
python3 run_benchmarks.py \
--normal_benchmark_tool_dir=/path/to/normal/android/target/tools/dir \
--traced_benchmark_tool_dir=/path/to/tracy/android/target/tools/dir \
--trace_capture_tool=/path/to/host/build/tracy/capture \
/path/to/host/build/dir
"""
import sys
import pathlib
# Add build_tools python dir to the search path.
sys.path.insert(0, str(pathlib.Path(__file__).parent.with_name("python")))
import atexit
import subprocess
import tarfile
import shutil
from typing import Any, Optional, Sequence
from common.benchmark_config import BenchmarkConfig
from common.benchmark_driver import BenchmarkDriver
from common.benchmark_definition import (DriverInfo, execute_cmd,
execute_cmd_and_get_output,
get_git_commit_hash,
get_iree_benchmark_module_arguments,
wait_for_iree_benchmark_module_start)
from common.benchmark_suite import (MODEL_FLAGFILE_NAME, BenchmarkCase,
BenchmarkSuite)
from common.android_device_utils import (get_android_device_model,
get_android_device_info,
get_android_gpu_name)
from common.common_arguments import build_common_argument_parser
# Root directory to perform benchmarks in on the Android device.
ANDROID_TMPDIR = pathlib.PurePosixPath("/data/local/tmp/iree-benchmarks")
NORMAL_TOOL_REL_DIR = pathlib.PurePosixPath("normal-tools")
TRACED_TOOL_REL_DIR = pathlib.PurePosixPath("traced-tools")
def adb_push_to_tmp_dir(
content: pathlib.Path,
relative_dir: pathlib.PurePosixPath = pathlib.PurePosixPath(),
verbose: bool = False) -> pathlib.PurePosixPath:
"""Pushes content onto the Android device.
Args:
content: the full path to the source file.
relative_dir: the directory to push to; relative to ANDROID_TMPDIR.
Returns:
The full path to the content on the Android device.
"""
filename = content.name
android_path = ANDROID_TMPDIR / relative_dir / filename
# When the output is a TTY, keep the default progress info output.
# In other cases, redirect progress info to null to avoid bloating log files.
stdout_redirect = None if sys.stdout.isatty() else subprocess.DEVNULL
execute_cmd(["adb", "push", content.resolve(), android_path],
verbose=verbose,
stdout=stdout_redirect)
return android_path
def adb_execute_and_get_output(
cmd_args: Sequence[str],
relative_dir: pathlib.PurePosixPath = pathlib.PurePosixPath(),
verbose: bool = False) -> str:
"""Executes command with adb shell.
Switches to `relative_dir` relative to the android tmp directory before
executing. Waits for completion and returns the command stdout.
Args:
cmd_args: a list containing the command to execute and its parameters
relative_dir: the directory to execute the command in; relative to
ANDROID_TMPDIR.
Returns:
A string for the command output.
"""
cmd = ["adb", "shell", "cd", ANDROID_TMPDIR / relative_dir, "&&"]
cmd.extend(cmd_args)
return execute_cmd_and_get_output(cmd, verbose=verbose)
def adb_execute(cmd_args: Sequence[str],
relative_dir: pathlib.PurePosixPath = pathlib.PurePosixPath(),
verbose: bool = False) -> subprocess.CompletedProcess:
"""Executes command with adb shell.
Switches to `relative_dir` relative to the android tmp directory before
executing. Waits for completion. Output is streamed to the terminal.
Args:
cmd_args: a list containing the command to execute and its parameters
relative_dir: the directory to execute the command in; relative to
ANDROID_TMPDIR.
Returns:
The completed process.
"""
cmd = ["adb", "shell", "cd", ANDROID_TMPDIR / relative_dir, "&&"]
cmd.extend(cmd_args)
return execute_cmd(cmd, verbose=verbose)
def is_magisk_su():
"""Returns true if the Android device has a Magisk SU binary."""
return "MagiskSU" in adb_execute_and_get_output(["su", "--help"])
def adb_execute_as_root(cmd_args: Sequence[Any]) -> subprocess.CompletedProcess:
"""Executes the given command as root."""
cmd = ["su", "-c" if is_magisk_su() else "root"]
cmd.extend(cmd_args)
return adb_execute(cmd)
def adb_start_cmd(cmd_args: Sequence[str],
relative_dir: pathlib.PurePosixPath = pathlib.PurePosixPath(),
verbose: bool = False) -> subprocess.Popen:
"""Executes command with adb shell in a directory and returns the handle
without waiting for completion.
Args:
cmd_args: a list containing the command to execute and its parameters
relative_dir: the directory to execute the command in; relative to
ANDROID_TMPDIR.
Returns:
A Popen object for the started command.
"""
cmd = ["adb", "shell", "cd", ANDROID_TMPDIR / relative_dir, "&&"]
cmd.extend(cmd_args)
if verbose:
print(f"cmd: {cmd}")
return subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True)
def get_vmfb_full_path_for_benchmark_case(
benchmark_case_dir: pathlib.Path) -> pathlib.Path:
flagfile = benchmark_case_dir / MODEL_FLAGFILE_NAME
for line in flagfile.read_text().splitlines():
flag_name, flag_value = line.strip().split("=")
if flag_name == "--module_file":
# Realpath canonicalization matters. The caller may rely on that to track
# which files it already pushed.
return (benchmark_case_dir / flag_value).resolve()
raise ValueError(f"{flagfile} does not contain a --module_file flag")
class AndroidBenchmarkDriver(BenchmarkDriver):
"""Android benchmark driver."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.already_pushed_files = {}
def run_benchmark_case(self, benchmark_case: BenchmarkCase,
benchmark_results_filename: Optional[pathlib.Path],
capture_filename: Optional[pathlib.Path]) -> None:
benchmark_case_dir = benchmark_case.benchmark_case_dir
# TODO(#11076): Support run_config.
if benchmark_case_dir is None:
raise ValueError("benchmark_case_dir can't be None.")
android_case_dir = pathlib.PurePosixPath(
benchmark_case_dir.relative_to(self.config.root_benchmark_dir))
self.__push_vmfb_file(benchmark_case_dir)
self.__check_and_push_file(benchmark_case_dir / MODEL_FLAGFILE_NAME,
android_case_dir)
taskset = self.__deduce_taskset(benchmark_case.bench_mode)
if benchmark_results_filename is not None:
self.__run_benchmark(android_case_dir=android_case_dir,
tool_name=benchmark_case.benchmark_tool_name,
driver_info=benchmark_case.driver_info,
results_filename=benchmark_results_filename,
taskset=taskset)
if capture_filename is not None:
self.__run_capture(android_case_dir=android_case_dir,
tool_name=benchmark_case.benchmark_tool_name,
capture_filename=capture_filename,
taskset=taskset)
def __run_benchmark(self, android_case_dir: pathlib.PurePosixPath,
tool_name: str, driver_info: DriverInfo,
results_filename: pathlib.Path, taskset: str):
if self.config.normal_benchmark_tool_dir is None:
raise ValueError("normal_benchmark_tool_dir can't be None.")
host_tool_path = self.config.normal_benchmark_tool_dir / tool_name
android_tool = self.__check_and_push_file(host_tool_path,
NORMAL_TOOL_REL_DIR)
cmd = [
"taskset", taskset, android_tool, f"--flagfile={MODEL_FLAGFILE_NAME}"
]
if tool_name == "iree-benchmark-module":
cmd.extend(
get_iree_benchmark_module_arguments(
results_filename=f"'{results_filename.name}'",
driver_info=driver_info,
benchmark_min_time=self.config.benchmark_min_time))
result_json = adb_execute_and_get_output(cmd,
android_case_dir,
verbose=self.verbose)
# Pull the result file back onto the host and set the filename for later
# return.
pull_cmd = [
"adb", "pull",
ANDROID_TMPDIR / android_case_dir / results_filename.name,
results_filename
]
execute_cmd_and_get_output(pull_cmd, verbose=self.verbose)
if self.verbose:
print(result_json)
def __run_capture(self, android_case_dir: pathlib.PurePosixPath,
tool_name: str, capture_filename: pathlib.Path,
taskset: str):
capture_config = self.config.trace_capture_config
if capture_config is None:
raise ValueError("capture_config can't be None.")
host_tool_path = capture_config.traced_benchmark_tool_dir / tool_name
android_tool = self.__check_and_push_file(host_tool_path,
TRACED_TOOL_REL_DIR)
run_cmd = [
"TRACY_NO_EXIT=1", f"IREE_PRESERVE_DYLIB_TEMP_FILES={ANDROID_TMPDIR}",
"taskset", taskset, android_tool, f"--flagfile={MODEL_FLAGFILE_NAME}"
]
# Just launch the traced benchmark tool with TRACY_NO_EXIT=1 without
# waiting for the adb command to complete as that won't happen.
process = adb_start_cmd(run_cmd, android_case_dir, verbose=self.verbose)
wait_for_iree_benchmark_module_start(process, self.verbose)
# Now it's okay to collect the trace via the capture tool. This will
# send the signal to let the previously waiting benchmark tool to
# complete.
capture_cmd = [
capture_config.trace_capture_tool, "-f", "-o", capture_filename
]
# If verbose, just let the subprocess print its output. The subprocess
# may need to detect if the output is a TTY to decide whether to log
# verbose progress info and use ANSI colors, so it's better to use
# stdout redirection than to capture the output in a string.
stdout_redirect = None if self.verbose else subprocess.DEVNULL
execute_cmd(capture_cmd, verbose=self.verbose, stdout=stdout_redirect)
def __deduce_taskset(self, bench_mode: Sequence[str]) -> str:
"""Deduces the CPU affinity taskset mask according to benchmark modes."""
# TODO: we actually should check the number of cores the phone have.
if "big-core" in bench_mode:
return "80" if "1-thread" in bench_mode else "f0"
if "little-core" in bench_mode:
return "08" if "1-thread" in bench_mode else "0f"
# Not specified: use the 7th core.
return "80"
def __push_vmfb_file(self, benchmark_case_dir: pathlib.Path):
vmfb_path = get_vmfb_full_path_for_benchmark_case(benchmark_case_dir)
vmfb_rel_dir = vmfb_path.parent.relative_to(self.config.root_benchmark_dir)
self.__check_and_push_file(vmfb_path, pathlib.PurePosixPath(vmfb_rel_dir))
def __check_and_push_file(self, host_path: pathlib.Path,
relative_dir: pathlib.PurePosixPath):
"""Checks if the file has been pushed and pushes it if not."""
android_path = self.already_pushed_files.get(host_path)
if android_path is not None:
return android_path
android_path = adb_push_to_tmp_dir(host_path,
relative_dir=relative_dir,
verbose=self.verbose)
self.already_pushed_files[host_path] = android_path
return android_path
def set_cpu_frequency_scaling_governor(governor: str):
git_root = execute_cmd_and_get_output(["git", "rev-parse", "--show-toplevel"])
cpu_script = (pathlib.Path(git_root) / "build_tools" / "benchmarks" /
"set_android_scaling_governor.sh")
android_path = adb_push_to_tmp_dir(cpu_script)
adb_execute_as_root([android_path, governor])
def set_gpu_frequency_scaling_policy(policy: str):
git_root = execute_cmd_and_get_output(["git", "rev-parse", "--show-toplevel"])
device_model = get_android_device_model()
gpu_name = get_android_gpu_name()
benchmarks_tool_dir = pathlib.Path(git_root) / "build_tools" / "benchmarks"
if device_model == "Pixel-6" or device_model == "Pixel-6-Pro":
gpu_script = benchmarks_tool_dir / "set_pixel6_gpu_scaling_policy.sh"
elif gpu_name.lower().startswith("adreno"):
gpu_script = benchmarks_tool_dir / "set_adreno_gpu_scaling_policy.sh"
else:
raise RuntimeError(
f"Unsupported device '{device_model}' for setting GPU scaling policy")
android_path = adb_push_to_tmp_dir(gpu_script)
adb_execute_as_root([android_path, policy])
def main(args):
device_info = get_android_device_info(args.verbose)
if args.verbose:
print(device_info)
commit = get_git_commit_hash("HEAD")
benchmark_config = BenchmarkConfig.build_from_args(args, commit)
benchmark_suite = BenchmarkSuite.load_from_benchmark_suite_dir(
benchmark_config.root_benchmark_dir)
benchmark_driver = AndroidBenchmarkDriver(device_info=device_info,
benchmark_config=benchmark_config,
benchmark_suite=benchmark_suite,
benchmark_grace_time=1.0,
verbose=args.verbose)
if args.continue_from_directory:
# Merge in previous benchmarks and captures.
benchmark_driver.add_previous_benchmarks_and_captures(
args.continue_from_directory)
if args.pin_cpu_freq:
set_cpu_frequency_scaling_governor("performance")
atexit.register(set_cpu_frequency_scaling_governor, "schedutil")
if args.pin_gpu_freq:
set_gpu_frequency_scaling_policy("performance")
atexit.register(set_gpu_frequency_scaling_policy, "default")
# Clear the benchmark directory on the Android device first just in case
# there are leftovers from manual or failed runs.
execute_cmd_and_get_output(["adb", "shell", "rm", "-rf", ANDROID_TMPDIR],
verbose=args.verbose)
if not args.no_clean:
# Clear the benchmark directory on the Android device.
atexit.register(execute_cmd_and_get_output,
["adb", "shell", "rm", "-rf", ANDROID_TMPDIR],
verbose=args.verbose)
# Also clear temporary directory on the host device.
atexit.register(shutil.rmtree, args.tmp_dir)
# Tracy client and server communicate over port 8086 by default. If we want
# to capture traces along the way, forward port via adb.
trace_capture_config = benchmark_config.trace_capture_config
if trace_capture_config:
execute_cmd_and_get_output(["adb", "forward", "tcp:8086", "tcp:8086"],
verbose=args.verbose)
atexit.register(execute_cmd_and_get_output,
["adb", "forward", "--remove", "tcp:8086"],
verbose=args.verbose)
benchmark_driver.run()
benchmark_results = benchmark_driver.get_benchmark_results()
if args.output is not None:
with open(args.output, "w") as f:
f.write(benchmark_results.to_json_str())
if args.verbose:
print(benchmark_results.commit)
print(benchmark_results.benchmarks)
if trace_capture_config:
# Put all captures in a tarball and remove the origial files.
with tarfile.open(trace_capture_config.capture_tarball, "w:gz") as tar:
for capture_filename in benchmark_driver.get_capture_filenames():
tar.add(capture_filename)
benchmark_errors = benchmark_driver.get_benchmark_errors()
if benchmark_errors:
print("Benchmarking completed with errors", file=sys.stderr)
raise RuntimeError(benchmark_errors)
if __name__ == "__main__":
args = build_common_argument_parser().parse_args()
main(args)
|
{
"content_hash": "2d2f59b36cddab2457806d5efebb9120",
"timestamp": "",
"source": "github",
"line_count": 406,
"max_line_length": 80,
"avg_line_length": 40.751231527093594,
"alnum_prop": 0.6601390148080991,
"repo_name": "google/iree",
"id": "705592c7ca6b19a73369e68a956449322c2ed937",
"size": "16785",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "build_tools/benchmarks/run_benchmarks_on_android.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "23010"
},
{
"name": "Batchfile",
"bytes": "353"
},
{
"name": "C",
"bytes": "3830546"
},
{
"name": "C++",
"bytes": "8161374"
},
{
"name": "CMake",
"bytes": "899403"
},
{
"name": "Dockerfile",
"bytes": "28245"
},
{
"name": "GLSL",
"bytes": "2629"
},
{
"name": "HTML",
"bytes": "31018"
},
{
"name": "Java",
"bytes": "31697"
},
{
"name": "JavaScript",
"bytes": "18714"
},
{
"name": "MLIR",
"bytes": "5606822"
},
{
"name": "NASL",
"bytes": "3852"
},
{
"name": "PowerShell",
"bytes": "7893"
},
{
"name": "Python",
"bytes": "1143963"
},
{
"name": "Shell",
"bytes": "248374"
},
{
"name": "Starlark",
"bytes": "600260"
}
],
"symlink_target": ""
}
|
from lettuce import step
from lettuce import world
from http_server import build_response
@step('a code of (\d+)')
def a_code(step, code):
world.code = int(code)
@step('I build the response')
def call_build_response(step):
world.response = build_response(world.code, "placeholder data")
@step('I receive (.+)')
def compare(step, expected):
assert world.response == expected, "Got %s" % world.response
|
{
"content_hash": "d6a45bd096c250d1bbbb206fe7ab1a5e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 67,
"avg_line_length": 23.27777777777778,
"alnum_prop": 0.6992840095465394,
"repo_name": "markcharyk/GeventServer",
"id": "b0478c8c4092b66dac99fea074f9ba7d7709c755",
"size": "419",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "features/build_response_steps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10122"
}
],
"symlink_target": ""
}
|
import time
from gtestm import cli
from gtestm.netcfg import config
from gtestm.run import linear
from gtestm.run import general
from gtestm.run import parallel
from gtestm.utils import testdata
def comp_serial_parallel():
cfg = config.Config()
serial_td = testdata.TestData()
parallel_td = testdata.TestData()
serial_sd = testdata.StateData()
parallel_sd = testdata.StateData()
start = time.time()
print("Starting serial tests")
print(linear.linear_run(cfg, serial_td, serial_sd))
serial_rt = time.time() - start
print("Took", time.time() - start, "seconds")
print("Starting parallel tests")
start = time.time()
print(parallel.parallel_run(cfg, parallel_td, parallel_sd))
parallel_rt = time.time() - start
print("Took", time.time() - start, "seconds")
print("Serial runs:", serial_rt)
print("Parallel runs:", parallel_rt)
print("Differs by", serial_rt - parallel_rt, "seconds.")
for file in serial_td.tests:
s_t = serial_td.tests[file]
p_t = parallel_td.tests[file]
print(s_t)
print(p_t)
def parallel_check():
cfg = config.Config()
parallel_td = testdata.TestData()
parallel_sd = testdata.StateData()
parallel.parallel_run(cfg, parallel_td, parallel_sd)
return parallel_td
def concurrency_check():
print("hi")
td0 = parallel_check()
print("hi2")
td1 = parallel_check()
print("hi")
for test in td0.tests:
if td0.tests[test]['status'] != td1.tests[test]['status']:
print(test)
print(td0.tests[test]['status'])
print(td1.tests[test]['status'])
print("end")
if __name__ == "__main__":
print("Please launch main.py, instead, with the following command: main.py -m diag")
|
{
"content_hash": "93cd1563c0bd41ae62e6471223464d8c",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 88,
"avg_line_length": 27.181818181818183,
"alnum_prop": 0.6343366778149386,
"repo_name": "AlterionX/CSGeist",
"id": "26450a84e807e694776abb98fa768991fa57ef65",
"size": "1817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gtestm/diagnostic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52546"
}
],
"symlink_target": ""
}
|
import __builtin__
__builtin__.NoneType = type(None)
class Object(object):
"""
Our own base class. Contains methods to simplify serialization.
"""
__version__ = 0
nosave = [ ]
def __getstate__(self):
rv = vars(self).copy()
for f in self.nosave:
if f in rv:
del rv[f]
rv["__version__"] = self.__version__
return rv
# None, to prevent this from being called when unnecessary.
after_setstate = None
def __setstate__(self, new_dict):
version = new_dict.pop("__version__", 0)
self.__dict__.update(new_dict)
if version != self.__version__:
self.after_upgrade(version) # E1101
if self.after_setstate:
self.after_setstate() # E1102
# We don't handle slots with this mechanism, since the call to vars should
# throw an error.
sentinels = { }
class Sentinel(object):
"""
This is used to represent a sentinel object. There will be exactly one
sentinel object with a name existing in the system at any time.
"""
def __new__(cls, name):
rv = sentinels.get(name, None)
if rv is None:
rv = object.__new__(cls, name)
sentinels[name] = rv
return rv
def __init__(self, name):
self.name = name
def __reduce__(self):
return (Sentinel, (self.name, ))
|
{
"content_hash": "fa2e87788308f8ce95699c8a8426ed30",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 74,
"avg_line_length": 21.21212121212121,
"alnum_prop": 0.5578571428571428,
"repo_name": "kfcpaladin/sze-the-game",
"id": "fb72fc7375fee4d99fa461378472d2bcaa937789",
"size": "2543",
"binary": false,
"copies": "1",
"ref": "refs/heads/orphan",
"path": "renpy/object.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3537204"
},
{
"name": "Ren'Py",
"bytes": "943500"
},
{
"name": "Shell",
"bytes": "2115"
},
{
"name": "Visual Basic",
"bytes": "287"
}
],
"symlink_target": ""
}
|
descr = """Image Processing SciKit
Provide image processing capabilities to SciPy, including:
- Image IO without PIL dependencies
- Image warping (wrappers based on ndimage)
- Connected components
- Color-space manipulations
- Linear space-invariant filters
- Hough transform
- Shortest paths
- Grey-level co-occurrence matrices
- Edge detection
- Image collections
"""
import os
import sys
DISTNAME = 'scikits.image'
DESCRIPTION = 'Image processing routines for SciPy'
LONG_DESCRIPTION = descr
MAINTAINER = 'Stefan van der Walt',
MAINTAINER_EMAIL = 'stefan@sun.ac.za',
URL = 'http://github.com/stefanv/scikits.image'
LICENSE = 'Modified BSD'
DOWNLOAD_URL = URL
VERSION = '0.1'
import setuptools
from numpy.distutils.core import setup
def configuration(parent_package='', top_path=None, package_name=DISTNAME):
if os.path.exists('MANIFEST'): os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(package_name, parent_package, top_path,
version = VERSION,
maintainer = MAINTAINER,
maintainer_email = MAINTAINER_EMAIL,
description = DESCRIPTION,
license = LICENSE,
url = URL,
download_url = DOWNLOAD_URL,
long_description = LONG_DESCRIPTION)
return config
if __name__ == "__main__":
setup(configuration = configuration,
install_requires = 'numpy',
namespace_packages = ['scikits'],
packages = setuptools.find_packages(),
include_package_data = True,
#test_suite="tester", # for python setup.py test
zip_safe = True, # the package can run out of an .egg file
classifiers =
[ 'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Topic :: Scientific/Engineering'])
|
{
"content_hash": "4f6828c4831232e22e09fcc62d837fb3",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 75,
"avg_line_length": 33.87692307692308,
"alnum_prop": 0.5958219800181653,
"repo_name": "mahipal/SciPy-Image-Kit",
"id": "b5cbe5e78485c5403a906708b9d148c4d1b8468b",
"size": "2226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
}
|
from tqdm import tqdm
from scipy.interpolate import interp1d
import pandas as pd
import numpy as np
import wisps
from wisps import drop_nan
from astropy.coordinates import SkyCoord
#import pymc3 as pm
from wisps.simulations import HS, MAG_LIMITS, Rsun, Zsun, custom_volume, SELECTION_FUNCTION, SPGRID
import wisps.simulations as wispsim
#from .binaries import make_systems
import numba
import dask
from scipy.interpolate import griddata
import wisps.simulations as wispsim
import pickle
import popsims
from multiprocessing import Pool
POINTINGS=pd.read_pickle(wisps.OUTPUT_FILES+'/pointings_correctedf110.pkl')
#some re-arragments because the limiting distance depends on the pointing
dist_arrays=pd.DataFrame.from_records([x.dist_limits for x in POINTINGS]).applymap(lambda x:np.vstack(x).astype(float))
#ignore pymc, ignore pre-computed distances
POINTING_POINTING_NAMES= dict(zip([x.name for x in POINTINGS], POINTINGS))
#BAYESIAN_DISTANCES_VOLUMES=np.load(wisps.OUTPUT_FILES+'/bayesian_pointings.pkl', allow_pickle=True)
corr_pols=wisps.POLYNOMIAL_RELATIONS['mag_limit_corrections']
#imports
#----------------------
#constants
Rsun=wispsim.Rsun
Zsun=wispsim.Zsun
spgrid=SPGRID
#-----------------------
PNTS=pd.read_pickle(wisps.OUTPUT_FILES+'/pointings_correctedf110.pkl')
pnt_names=[x.name for x in PNTS]
#print (pnts[0].survey)
COORDS=SkyCoord([p.coord for p in PNTS ])
galc=COORDS.transform_to('galactic')
LBS=np.vstack([[x.coord.galactic.l.radian,x.coord.galactic.b.radian] for x in PNTS ])
LS=galc.l.radian
BS=galc.b.radian
#wispsim.make_pointings()
@numba.jit(nopython=True)
def fit_snr_exptime(ts, mag, d, e, f, m0):
return d*(mag-m0)+e*np.log10(ts/1000)+f
@numba.jit(nopython=True)
def mag_unc_exptime_relation( mag, t, m0, beta, a, b):
tref = 1000.
#m0, beta, a, b= params
return ((t/tref)**-beta)*(10**(a*(mag-m0)+b))
def probability_of_selection(spt, snr):
"""
probablity of selection for a given snr and spt
"""
ref_df=SELECTION_FUNCTION.dropna()
#self.data['spt']=self.data.spt.apply(splat.typeToNum)
interpoints=np.array([ref_df.spt.values, ref_df.logsnr.values]).T
return griddata(interpoints, ref_df.tot_label.values , (spt, np.log10(snr)), method='linear')
def get_distances_and_pointings(df, h):
DISTANCE_SAMPLES=pd.read_pickle(wisps.OUTPUT_FILES+'/distance_samples{}'.format(h))
volumes=np.vstack([np.nansum(list(x.volumes[h].values())) for x in POINTINGS]).flatten()
volumes_cdf=np.cumsum(volumes)/np.nansum(volumes)
pntindex=np.arange(0, len(POINTINGS))
names=np.array([x.name for x in POINTINGS])
exptimes_mag=np.array([x.imag_exptime for x in POINTINGS ])
exptime_spec= np.array([x.exposure_time for x in POINTINGS])
spt_r=np.round(df.spt.values)
pntindex_to_use=wisps.random_draw(pntindex, volumes_cdf, nsample=len(spt_r)).astype(int)
pnts=np.take(names, pntindex_to_use)
pntings=np.take( np.array(POINTINGS), pntindex_to_use)
#print ( pntings)
exps= np.take(exptimes_mag, pntindex_to_use)
exp_grism= np.take(exptime_spec, pntindex_to_use)
spt_r=np.floor(df.spt.values).astype(int)
dists_for_spts= np.array([np.random.choice(DISTANCE_SAMPLES[k][idx]) for idx, k in zip(pntindex_to_use, spt_r)])
df['dist']= dists_for_spts
df['pntname']= pnts
df['pnt']=pntings#df.pntname.apply(lambda x: np.array(PNTS)[pnt_names.index(x)])
df['exp_image']= exps
df['exp_grism']=exp_grism
return df
def get_snr_and_selection_prob(df):
snrjs110= 10**(fit_snr_exptime(df['exp_grism'].values, df['appF110'].values, *list(MAG_LIMITS['snr_exp']['F110'])))
snrjs140= 10**(fit_snr_exptime(df['exp_grism'].values, df['appF140'].values, *list(MAG_LIMITS['snr_exp']['F140'])))
snrjs160= 10**(fit_snr_exptime(df['exp_grism'].values, df['appF160'].values, *list(MAG_LIMITS['snr_exp']['F160'])))
df['snrj110']=snrjs110
df['snrj140']= snrjs140
df['snrjs160']= snrjs160
df['snrj']=np.nanmin(np.vstack([snrjs110, snrjs140, snrjs160]), axis=0)
df['slprob']=probability_of_selection(df.spt.values, df['snrj'].values)
return df
def get_absmags_hst_filters(df, mag_key):
"""
get abs_mag
"""
#load relations
relabsmags=wisps.POLYNOMIAL_RELATIONS['abs_mags']
relcolors=wisps.POLYNOMIAL_RELATIONS['colors']
binary_flag=df.is_binary.values
#compute absolue magnitudes for singles
res=np.ones_like(df.spt.values)*np.nan
abs_mags_singles=np.random.normal((relabsmags[mag_key+'W'][0])(df.spt.values), relabsmags[mag_key+'W'][1])
#for binaries, base this on their absolute J and H mag
color_key='j_'+mag_key.lower()
#if mag_key=='F160':
# color_key='h_f160'
#colors=np.random.normal((relcolors[color_key][0])(df.spt.values), relcolors[color_key][1])
#abs_mags_binaries=df['abs_2MASS_J']-colors
abs_mag_primaries=np.random.normal((relabsmags[mag_key+'W'][0])(df.prim_spt.values) , relabsmags[mag_key+'W'][1])
abs_mag_secondaries=np.random.normal((relabsmags[mag_key+'W'][0])(df.sec_spt.values) , relabsmags[mag_key+'W'][1])
abs_mags_binaries=-2.5*np.log10(10**(-0.4* abs_mag_primaries)+10**(-0.4* abs_mag_secondaries))
np.place(res, ~binary_flag, abs_mags_singles[~binary_flag])
np.place(res, binary_flag, abs_mags_binaries[binary_flag])
#absolute mag
df['abs{}'.format(mag_key)]=res
df['prim_abs{}'.format(mag_key)]=abs_mag_primaries
df['sec_abs{}'.format(mag_key)]= abs_mag_secondaries
#df['abs{}'.format(mag_key)]=abs_mags_singles
#apparent mag
app=res+5*np.log10(df.dist/10.0)
app_er= mag_unc_exptime_relation(app.values, df['exp_image'].values, *list( MAG_LIMITS['mag_unc_exp'][mag_key]))
df['app{}'.format(mag_key)]= np.random.normal(app, app_er)
df['app{}'.format(mag_key)+'er']=app_er
return df
def compute_effective_numbers(model, h):
"""
model: evol model
h : scaleheights
"""
df0=popsims.make_systems(model_name=model, bfraction=0.2,\
mass_age_range= [0.01, 0.15, 0.1, 8.0],\
nsample=int(1e6),
save=True)
#drop nans in spt
df0=(df0[~df0.spt.isna()]).reset_index(drop=True)
mask= np.logical_and(df0.spt>=17, df0.spt<=41)
df0=(df0[mask]).reset_index(drop=True)
#assign distances and poiunts
df0=get_distances_and_pointings(df0, h)
#assign absolute mags
df0=get_absmags_hst_filters(df0, 'F110')
df0=get_absmags_hst_filters(df0, 'F140')
df0=get_absmags_hst_filters(df0, 'F160')
print(df0.keys())
#add snr and selection probability
df0=get_snr_and_selection_prob(df0)
mag_limits=pd.DataFrame.from_records(df0.pnt.apply(lambda x: x.mag_limits).values)
#make cuts
flags0=df0.appF110 >= mag_limits['F110']+(corr_pols['F110W'][0])(df0.spt)
flags1=df0.appF140 >= mag_limits['F140']+(corr_pols['F140W'][0])(df0.spt)
flags2=df0.appF160 >= mag_limits['F160']+(corr_pols['F160W'][0])(df0.spt)
flags3= df0.snrj <3.
flags=np.logical_or.reduce([flags0,flags1, flags2, flags3])
df0['is_cut']=flags
df0.to_hdf(wisps.OUTPUT_FILES+'/final_simulated_sample_cut_binaries_updatedrelations.h5', key=str(model)+str(h)+str('spt_abs_mag'))
#cutdf.to_hdf(wisps.OUTPUT_FILES+'/final_simulated_sample_cut.h5', key=str(model)+str('h'))
def compute_effective_numbers_old(model, h):
#DISTANCES=pd.DataFrame(pd.read_pickle(wisps.OUTPUT_FILES+'/cdf_distance_tables.pkl')[h])
##given a distribution of masses, ages, teffss
## based on my polynomial relations and my own selection function
DISTANCE_SAMPLES=pd.read_pickle(wisps.OUTPUT_FILES+'/distance_samples{}.gz'.format(h))
volumes=np.vstack([np.nansum(list(x.volumes[h].values())) for x in POINTINGS]).flatten()
volumes_cdf=np.cumsum(volumes)/np.nansum(volumes)
pntindex=np.arange(0, len(POINTINGS))
names=np.array([x.name for x in POINTINGS])
exptimes_mag=np.array([x.imag_exptime for x in POINTINGS ])
exptime_spec= np.array([x.exposure_time for x in POINTINGS])
syst=make_systems_nocombined_light(model_name=model, bfraction=0.2, nsample=5e3, recompute=True)
print (len(syst))
#mask_array= np.logical_and(syst['system_spts']).flatten()
spts=(syst['system_spts']).flatten()
print ('----------------------------')
print (model, h)
print ('how many ......... {}'.format(len(spts)))
mask= np.logical_and( spts>=17, spts<=41)
spts=spts[mask]
spt_r=np.round(spts)
pntindex_to_use=wisps.random_draw(pntindex, volumes_cdf, nsample=len(spts)).astype(int)
pnts=np.take(names, pntindex_to_use)
exps= np.take(exptimes_mag, pntindex_to_use)
exp_grism= np.take(exptime_spec, pntindex_to_use)
#LONGS=(BAYESIAN_DISTANCES_VOLUMES['ls'][h]).flatten()
#LATS=(BAYESIAN_DISTANCES_VOLUMES['bs'][h]).flatten()
#retrieve key by key, let's see ho long it takes to run
spt_r=np.floor(spts).astype(int)
dists_for_spts= np.array([np.random.choice(DISTANCE_SAMPLES[k][idx]) for idx, k in zip(pntindex_to_use, spt_r)])
#rs= pnt_distances[:,1][pntindex_to_use]
#zs= pnt_distances[:,2][pntindex_to_use]
#@np.vectorize
#def match_dist_to_spt(spt, idxn):
"""
one to one matching between distance and spt
to avoid all sorts of interpolations or binning
"""
#assign distance
#spt_r=np.floor(spt)
#d=np.nan
#r=np.nan
#z=np.nan
#if (spt_r in DISTANCE_WITHIN_LIMITS_BOOLS.keys()):
#bools=[(DISTANCE_WITHIN_LIMITS_BOOLS[k]) for x in spt_r][idxn]
#dist_array=((BAYESIAN_DISTANCES_VOLUMES[h]['distances'])[idxn])#[bools]
#rs=((BAYESIAN_DISTANCES_VOLUMES[h]['rs'])[idxn])#[bools]
#zs=((BAYESIAN_DISTANCES_VOLUMES[h]['zs'])[idxn])#[bools]
#draw a distance
#if len(dist_array[bools]) <= 0 :
# pass
#else:
# bidx=np.random.choice(len(dist_array[bools]))
# d= (dist_array[bools])[bidx]
# r=(rs[bools])[bidx]
# z=(zs[bools])[bidx]
#return dist_array, rs, zs
#polynomial relations
relabsmags=wisps.POLYNOMIAL_RELATIONS['abs_mags']
relsnrs=wisps.POLYNOMIAL_RELATIONS['snr']
#print (relabsmags)
#print (relsnrs)
#add pointings
#get distances withing magnitude limits
#dbools=[DISTANCE_WITHIN_LIMITS_BOOLS[k] for k in spt_r]
#assign distances using cdf-inversion
#pnt_distances= np.vstack([draw_distances(x, 1e5, h) for x in tqdm(POINTINGS)])
#pnt_distances= (DISTANCES[names].values)#np.vstack([draw_distances(x, 1e5, h) for x in tqdm(POINTINGS)])
#dists_for_spts=np.vstack(BAYESIAN_DISTANCES_VOLUMES[h]['distances']).flatten()[pntindex_to_use]#[dbools]
#rs=np.vstack(BAYESIAN_DISTANCES_VOLUMES[h]['rs']).flatten()[pntindex_to_use]#[dbools]
#zs=np.vstack(BAYESIAN_DISTANCES_VOLUMES[h]['zs']).flatten()[pntindex_to_use]#[dbools]
#distance_index= np.random.choice(np.arange(len(dist_array), len(spts)))
#dists_for_spts= dist_array[distance_index]
#rs=rs_array[distance_index]
#zs=rs_array[distance_index]
#compute magnitudes absolute mags
f110s= np.random.normal((relabsmags['F110W'][0])(spts), relabsmags['F110W'][1])
f140s= np.random.normal((relabsmags['F140W'][0])(spts), relabsmags['F140W'][1])
f160s= np.random.normal((relabsmags['F160W'][0])(spts), relabsmags['F160W'][1])
#compute apparent magnitudes
appf140s0=f140s+5*np.log10(dists_for_spts/10.0)
appf110s0=f110s+5*np.log10(dists_for_spts/10.0)
appf160s0=f160s+5*np.log10(dists_for_spts/10.0)
#print ('shape .....{}'.format(exps))
#add magnitude uncertainities
f110_ers= mag_unc_exptime_relation(appf110s0, exps, *list( MAG_LIMITS['mag_unc_exp']['F110']))
f140_ers= mag_unc_exptime_relation(appf140s0, exps, *list( MAG_LIMITS['mag_unc_exp']['F140']))
f160_ers= mag_unc_exptime_relation(appf160s0, exps, *list( MAG_LIMITS['mag_unc_exp']['F160']))
appf110s= np.random.normal(appf110s0, f110_ers)
appf140s= np.random.normal(appf140s0, f140_ers)
appf160s= np.random.normal(appf160s0, f160_ers)
#snrjs=10**np.random.normal( (relsnrs['snr_F140W'][0])(appf140s),relsnrs['snr_F140W'][1])
#print (exp_grism)
snrjs110= 10**(fit_snr_exptime( exp_grism, appf110s, *list(MAG_LIMITS['snr_exp']['F110'])))
snrjs140= 10**(fit_snr_exptime( exp_grism, appf140s, *list(MAG_LIMITS['snr_exp']['F140'])))
snrjs160= 10**(fit_snr_exptime( exp_grism, appf160s, *list(MAG_LIMITS['snr_exp']['F160'])))
#assign upper and lo limits
snr_bool_up= np.logical_or.reduce([ appf110s >25, appf140s >25, appf160s>24])
snr_bool_do= np.logical_or.reduce([ appf110s <15, appf140s <15, appf160s>15])
snrjs= np.nanmin(np.vstack([snrjs110, snrjs140, snrjs160]), axis=0)
#replace by 1000 or 1
snrjs[snr_bool_up]=10**2.7
snrjs[snr_bool_do]=1.
sl= probability_of_selection(spts, snrjs)
#comput the rest from the survey
#dict_values={model: {h: {}, 'age': None, 'teff': None, 'spts': None}}
#dict_values=pd.read_pickle(wisps.OUTPUT_FILES+'/effective_numbers_from_sims')
#dict_values[model][h]={}
#dict_values[model]={}
#dict_values.update({model: {h:{}}})
#print (model)
#print (dict_values.keys())
#print (np.nanmax(dict_values[model]['age']))
#print (np.nanmax(syst['system_age'][~np.isnan((syst['system_spts']).flatten())]))
#print (model)
#print
#dict_values[model]['spts']=spts
#dict_values[model]['teff']=syst['system_teff'][mask]
#dict_values[model]['age']=
morevals={'f110':f110s, 'f140':f140s, 'f160':f160s, 'd':dists_for_spts, 'appf140':appf140s,
'appf110':appf110s, 'appf160':appf160s, 'sl':sl, 'pnt':pnts, 'age':syst['system_age'][mask],
'teff': syst['system_teff'][mask], 'spts': spts, 'f110_unc': f110_ers, 'f140_unc': f140_ers, 'f160_unc': f160_ers,
'snrj110': snrjs110, 'snrj140': snrjs140, 'snrj160': snrjs160, 'snrj': snrjs}
#assert len(spts) == len(pnts)
#assert len(f110s) == len(pnts)
#dict_values[model][h].update(morevals)
simdf=pd.DataFrame.from_records(morevals).rename(columns={'dist':'d',
'snrj': 'snr', 'slprob': 'sl', 'spts': 'spt', 'pnt': 'pntname'})
simdf['pnt']=simdf.pntname.apply(lambda x: np.array(PNTS)[pnt_names.index(x)])
#corrts0=
mag_limits=pd.DataFrame.from_records(simdf.pnt.apply(lambda x: x.mag_limits).values)
assert len(mag_limits)==len(appf140s0)
flags0=simdf.appf110 >= mag_limits['F110']+(corr_pols['F110W'][0])(simdf.spt)
flags1=simdf.appf140 >= mag_limits['F140']+(corr_pols['F140W'][0])(simdf.spt)
flags2=simdf.appf160 >= mag_limits['F160']+(corr_pols['F160W'][0])(simdf.spt)
flags3= simdf.snr <3.
flags=np.logical_or.reduce([flags0,flags1, flags2, flags3])
cutdf=(simdf[~flags]).reset_index(drop=True)
#cutdf=simdf
#print ('Before cut {}'.format(len(simdf)))
#print ('After cut {}'.format(len(cutdf)))
cutdf.to_hdf(wisps.OUTPUT_FILES+'/final_simulated_sample_cut.h5', key=str(model)+str(h)+str(h)+'F110_corrected')
del cutdf
#import pickle
#with open(wisps.OUTPUT_FILES+'/effective_numbers_from_sims', 'wb') as file:
# pickle.dump(dict_values,file)
return
def get_all_values_from_model(model, hs):
"""
For a given set of evolutionary models obtain survey values
"""
#obtain spectral types from modelss
for h in tqdm(hs):
_= compute_effective_numbers(model, h)
#syst=make_systems(model_name=model, bfraction=0.2)
#spts=(syst['system_spts']).flatten()
#comput the rest from the survey
#dict_values=pd.read_pickle(wisps.OUTPUT_FILES+'/effective_numbers_from_sims')
#dict_values[model]['spts']=wisps.drop_nan(spts)
#dict_values[model]['teff']=syst['system_teff'][~np.isnan(spts)]
#dict_values[model]['age']=syst['system_age'][~np.isnan(spts)]
#for h in tqdm(hs):
# dict_values[model][h]={}
# dict_values[model][h].update(compute_effective_numbers(wisps.drop_nan(spts),SPGRID, h))
#import pickle
#with open(wisps.OUTPUT_FILES+'/effective_numbers_from_sims', 'wb') as file:
# pickle.dump(dict_values,file)
#del dict_values
if __name__=='__main__':
"""c
Purpose:compute number densities
"""
#recompute=kwargs.get("recompute", True)
#hs=kwargs.get("hs", )
#recompute for different evolutionary models
get_all_values_from_model('burrows1997', wispsim.HS)
get_all_values_from_model('burrows2001', wispsim.HS)
get_all_values_from_model('baraffe2003', wispsim.HS)
get_all_values_from_model('saumon2008', wispsim.HS)
get_all_values_from_model('marley2019', wispsim.HS)
get_all_values_from_model('phillips2020', wispsim.HS)
|
{
"content_hash": "1484dcebbddfa762811000c3e1d8a5ab",
"timestamp": "",
"source": "github",
"line_count": 472,
"max_line_length": 135,
"avg_line_length": 35.67584745762712,
"alnum_prop": 0.6576993883247224,
"repo_name": "caganze/wisps",
"id": "4b584410bda0e183b42c0b7f14915f1a49beab5c",
"size": "16926",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wisps/simulations/effective_numbers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "20156919"
},
{
"name": "Python",
"bytes": "250409"
},
{
"name": "Shell",
"bytes": "118"
},
{
"name": "Stan",
"bytes": "3388"
},
{
"name": "TeX",
"bytes": "4576327"
}
],
"symlink_target": ""
}
|
import os
import re
import sys
from setuptools import setup, find_packages
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
def find_version(*paths):
fname = os.path.join(*paths)
with open(fname) as fhandler:
version_file = fhandler.read()
version_match = re.search(r"^__VERSION__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if not version_match:
raise RuntimeError("Unable to find version string in %s" % (fname, ))
version = version_match.group(1)
return version
version = find_version('pymesos', '__init__.py')
install_requires=[
'six',
'http-parser',
'addict',
]
PY3 = sys.version_info > (3, )
PYPY = getattr(sys, 'pypy_version_info', False) and True or False
if (PY3 or PYPY):
install_requires += ['kazoo']
else:
install_requires += ['zkpython']
print(install_requires)
setup(
name='pymesos',
version=version,
description="A pure python implementation of Mesos scheduler and executor",
packages=find_packages(),
platforms=['POSIX'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Programming Language :: Python',
],
keywords='mesos',
author="Zhongbo Tian",
author_email="tianzhongbo@douban.com",
url="https://github.com/douban/pymesos",
download_url=('https://github.com/douban/pymesos/archive/%s.tar.gz' %
version),
install_requires=install_requires,
setup_requires=pytest_runner,
tests_require=['pytest-cov', 'pytest-randomly', 'pytest-mock', 'pytest'],
)
|
{
"content_hash": "c2d298f643371147fec9b3c1ca3119b5",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 79,
"avg_line_length": 29.25423728813559,
"alnum_prop": 0.6251448435689455,
"repo_name": "douban/pymesos",
"id": "43503c4e9abe8e2b35dc3c04f8469181b0a1f166",
"size": "1726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "196379"
}
],
"symlink_target": ""
}
|
"""
Created on 18 Aug 2016
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
from scs_core.sync.interval_timer import IntervalTimer
from scs_core.sync.runner import Runner
# --------------------------------------------------------------------------------------------------------------------
class TimedRunner(Runner):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, interval, sample_count=None):
"""
Constructor
"""
self.__timer = IntervalTimer(interval)
self.__sample_count = sample_count
# ----------------------------------------------------------------------------------------------------------------
def samples(self, sampler):
self.reset() # reset to prevent uneven intervals
if self.__sample_count is None:
while self.__timer.true():
yield sampler.sample()
else:
for _ in self.__timer.range(self.__sample_count):
yield sampler.sample()
def reset(self):
self.__timer.reset()
# ----------------------------------------------------------------------------------------------------------------
@property
def timer(self):
return self.__timer
@property
def sample_count(self):
return self.__sample_count
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "TimedRunner:{timer:%s, sample_count:%s}" % (self.__timer, self.__sample_count)
|
{
"content_hash": "29742dfcb90c1a44128825fdab2f6eac",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 118,
"avg_line_length": 28.262295081967213,
"alnum_prop": 0.37064965197215777,
"repo_name": "south-coast-science/scs_core",
"id": "6e7b430073e04730cca1caf188f5a1b0669a3f52",
"size": "1724",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/scs_core/sync/timed_runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1461551"
}
],
"symlink_target": ""
}
|
"""tests/build/test_build_gene.py
Tests for genes that are built on the variants
"""
from scout.build.variant.gene import build_gene
def test_build_gene():
## GIVEN information about a gene and a transcript
transcript_info = {
"functional_annotations": ["transcript_ablation"],
"transcript_id": "ENST00000249504",
"hgnc_id": 5134,
"sift_prediction": "deleterious",
}
gene_info = {
"transcripts": [transcript_info],
"most_severe_transcript": transcript_info,
"most_severe_consequence": "transcript_ablation",
"most_severe_sift": "deleterious",
"most_severe_polyphen": None,
"hgnc_id": 5134,
"region_annotation": "exonic",
"coding_sequence_name": "c.95T>C",
"canonical_transcript": "ENST00000249504",
}
## WHEN building the gene object
gene_obj = build_gene(gene_info)
## THEN assert that the hgnc id was added correct
assert gene_obj["hgnc_id"] == gene_info["hgnc_id"]
## Then assert no hgnc symbol was found
assert "hgnc_symbol" not in gene_obj
def test_build_gene_hgnc_info():
## GIVEN information about a gene and some hgnc information
transcript_info = {
"functional_annotations": ["transcript_ablation"],
"transcript_id": "ENST00000249504",
"hgnc_id": 5134,
"sift_prediction": "deleterious",
}
gene_info = {
"transcripts": [transcript_info],
"most_severe_transcript": transcript_info,
"most_severe_consequence": "transcript_ablation",
"most_severe_sift": "deleterious",
"most_severe_polyphen": None,
"hgnc_id": 5134,
"region_annotation": "exonic",
}
transcript_1 = {
"ensembl_transcript_id": "ENST00000498438",
"is_primary": False,
"start": 176968944,
"end": 176974482,
}
transcript_2 = {
"ensembl_transcript_id": "ENST00000249504",
"is_primary": True,
"refseq_id": "NM_021192",
"start": 176972014,
"end": 176974722,
"is_canonical": True,
}
hgnc_transcripts = [transcript_1, transcript_2]
hgnc_gene = {
"hgnc_id": 5134,
"hgnc_symbol": "HOXD11",
"ensembl_id": "ENSG00000128713",
"chromosome": "2",
"start": 176968944,
"end": 176974722,
"build": 37,
"description": "homeobox D11",
"aliases": ["HOX4", "HOXD11", "HOX4F"],
"entrez_id": 3237,
"omim_ids": 142986,
"pli_score": 0.0131898476206074,
"primary_transcripts": ["NM_021192"],
"ucsc_id": "uc010fqx.4",
"uniprot_ids": ["P31277"],
"vega_id": "OTTHUMG00000132510",
"transcripts": hgnc_transcripts,
"incomplete_penetrance": False,
"inheritance_models": ["AD"],
"transcripts_dict": {
"ENST00000498438": transcript_1,
"ENST00000249504": transcript_2,
},
}
hgncid_to_gene = {5134: hgnc_gene}
## WHEN adding gene and transcript information and building variant
gene_obj = build_gene(gene_info, hgncid_to_gene=hgncid_to_gene)
## THEN assert that the hgnc id was added correct
assert gene_obj["hgnc_id"] == gene_info["hgnc_id"]
## THEN assert that the hgnc symbol was added from the hgnc gene information
assert gene_obj["hgnc_symbol"] == hgnc_gene["hgnc_symbol"]
## THEN assert that the gene inheritance models was added correct
assert gene_obj["inheritance"] == hgnc_gene["inheritance_models"]
|
{
"content_hash": "df2741218f15e97ed5898b201fc9cefd",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 80,
"avg_line_length": 31.43362831858407,
"alnum_prop": 0.5968468468468469,
"repo_name": "Clinical-Genomics/scout",
"id": "e679e4a48589933c980fdac863dfc7aa11e69aa0",
"size": "3552",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/build/test_build_gene.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "12516"
},
{
"name": "Dockerfile",
"bytes": "1451"
},
{
"name": "HTML",
"bytes": "911931"
},
{
"name": "JavaScript",
"bytes": "32692"
},
{
"name": "Makefile",
"bytes": "1046"
},
{
"name": "Python",
"bytes": "2419990"
}
],
"symlink_target": ""
}
|
import cPickle
import unittest
import multiprocessing
import multiprocessing.pool
import nose.tools as ntools
import numpy
from smqtk.algorithms.classifier.libsvm import LibSvmClassifier
from smqtk.representation import \
ClassificationElementFactory, \
DescriptorElementFactory
from smqtk.representation.classification_element.memory import \
MemoryClassificationElement
from smqtk.representation.descriptor_element.local_elements import \
DescriptorMemoryElement
if LibSvmClassifier.is_usable():
class TestLibSvmClassifier (unittest.TestCase):
def test_no_save_model_pickle(self):
# Test model preservation across pickling even without model cache
# file paths set.
classifier = LibSvmClassifier(
train_params={
'-t': 0, # linear kernel
'-b': 1, # enable probability estimates
'-c': 2, # SVM-C parameter C
'-q': '', # quite mode
},
normalize=None, # DO NOT normalize descriptors
)
ntools.assert_true(classifier.svm_model is None)
# Empty model should not trigger __LOCAL__ content in pickle
ntools.assert_not_in('__LOCAL__', classifier.__getstate__())
_ = cPickle.loads(cPickle.dumps(classifier))
# train arbitrary model (same as ``test_simple_classification``)
DIM = 2
N = 1000
POS_LABEL = 'positive'
NEG_LABEL = 'negative'
d_factory = DescriptorElementFactory(DescriptorMemoryElement, {})
c_factory = ClassificationElementFactory(MemoryClassificationElement, {})
def make_element((i, v)):
d = d_factory.new_descriptor('test', i)
d.set_vector(v)
return d
# Constructing artificial descriptors
x = numpy.random.rand(N, DIM)
x_pos = x[x[:, 1] <= 0.45]
x_neg = x[x[:, 1] >= 0.55]
p = multiprocessing.pool.ThreadPool()
d_pos = p.map(make_element, enumerate(x_pos))
d_neg = p.map(make_element, enumerate(x_neg, start=N//2))
p.close()
p.join()
# Training
classifier.train({POS_LABEL: d_pos, NEG_LABEL: d_neg})
# Test original classifier
t_v = numpy.random.rand(DIM)
t = d_factory.new_descriptor('query', 0)
t.set_vector(t_v)
c_expected = classifier.classify(t, c_factory)
# Should see __LOCAL__ content in pickle state now
p_state = classifier.__getstate__()
ntools.assert_in('__LOCAL__', p_state)
ntools.assert_in('__LOCAL_LABELS__', p_state)
ntools.assert_in('__LOCAL_MODEL__', p_state)
ntools.assert_true(len(p_state['__LOCAL_LABELS__']) > 0)
ntools.assert_true(len(p_state['__LOCAL_MODEL__']) > 0)
# Restored classifier should classify the same test descriptor the
# same
#: :type: LibSvmClassifier
classifier2 = cPickle.loads(cPickle.dumps(classifier))
c_post_pickle = classifier2.classify(t, c_factory)
# There may be floating point error, so extract actual confidence
# values and check post round
c_pp_positive = c_post_pickle[POS_LABEL]
c_pp_negative = c_post_pickle[NEG_LABEL]
c_e_positive = c_expected[POS_LABEL]
c_e_negative = c_expected[NEG_LABEL]
ntools.assert_almost_equal(c_e_positive, c_pp_positive, 5)
ntools.assert_almost_equal(c_e_negative, c_pp_negative, 5)
def test_simple_classification(self):
"""
simple LibSvmClassifier test - 2-class
Test libSVM classification functionality using random constructed
data, training the y=0.5 split
"""
DIM = 2
N = 1000
POS_LABEL = 'positive'
NEG_LABEL = 'negative'
p = multiprocessing.pool.ThreadPool()
d_factory = DescriptorElementFactory(DescriptorMemoryElement, {})
c_factory = ClassificationElementFactory(MemoryClassificationElement, {})
def make_element((i, v)):
d = d_factory.new_descriptor('test', i)
d.set_vector(v)
return d
# Constructing artificial descriptors
x = numpy.random.rand(N, DIM)
x_pos = x[x[:, 1] <= 0.45]
x_neg = x[x[:, 1] >= 0.55]
d_pos = p.map(make_element, enumerate(x_pos))
d_neg = p.map(make_element, enumerate(x_neg, start=N//2))
# Create/Train test classifier
classifier = LibSvmClassifier(
train_params={
'-t': 0, # linear kernel
'-b': 1, # enable probability estimates
'-c': 2, # SVM-C parameter C
'-q': '', # quite mode
},
normalize=None, # DO NOT normalize descriptors
)
classifier.train({POS_LABEL: d_pos, NEG_LABEL: d_neg})
# Test classifier
x = numpy.random.rand(N, DIM)
x_pos = x[x[:, 1] <= 0.45]
x_neg = x[x[:, 1] >= 0.55]
d_pos = p.map(make_element, enumerate(x_pos, N))
d_neg = p.map(make_element, enumerate(x_neg, N + N//2))
d_pos_sync = {} # for comparing to async
for d in d_pos:
c = classifier.classify(d, c_factory)
ntools.assert_equal(c.max_label(),
POS_LABEL,
"Found False positive: %s :: %s" %
(d.vector(), c.get_classification()))
d_pos_sync[d] = c
d_neg_sync = {}
for d in d_neg:
c = classifier.classify(d, c_factory)
ntools.assert_equal(c.max_label(), NEG_LABEL,
"Found False negative: %s :: %s" %
(d.vector(), c.get_classification()))
d_neg_sync[d] = c
# test that async classify produces the same results
# -- d_pos
m_pos = classifier.classify_async(d_pos, c_factory)
ntools.assert_equal(m_pos, d_pos_sync,
"Async computation of pos set did not yield "
"the same results as synchronous "
"classification.")
# -- d_neg
m_neg = classifier.classify_async(d_neg, c_factory)
ntools.assert_equal(m_neg, d_neg_sync,
"Async computation of neg set did not yield "
"the same results as synchronous "
"classification.")
# -- combined -- threaded
combined_truth = dict(d_pos_sync.iteritems())
combined_truth.update(d_neg_sync)
m_combined = classifier.classify_async(
d_pos + d_neg, c_factory,
use_multiprocessing=False,
)
ntools.assert_equal(m_combined, combined_truth,
"Async computation of all test descriptors "
"did not yield the same results as "
"synchronous classification.")
# -- combined -- multiprocess
m_combined = classifier.classify_async(
d_pos + d_neg, c_factory,
use_multiprocessing=True,
)
ntools.assert_equal(m_combined, combined_truth,
"Async computation of all test descriptors "
"(mixed order) did not yield the same results "
"as synchronous classification.")
# Closing resources
p.close()
p.join()
def test_simple_multiclass_classification(self):
"""
simple LibSvmClassifier test - 3-class
Test libSVM classification functionality using random constructed
data, training the y=0.33 and y=.66 split
"""
DIM = 2
N = 1000
P1_LABEL = 'p1'
P2_LABEL = 'p2'
P3_LABEL = 'p3'
p = multiprocessing.pool.ThreadPool()
d_factory = DescriptorElementFactory(DescriptorMemoryElement, {})
c_factory = ClassificationElementFactory(MemoryClassificationElement, {})
di = 0
def make_element((i, v)):
d = d_factory.new_descriptor('test', i)
d.set_vector(v)
return d
# Constructing artificial descriptors
x = numpy.random.rand(N, DIM)
x_p1 = x[x[:, 1] <= 0.30]
x_p2 = x[(x[:, 1] >= 0.36) & (x[:, 1] <= 0.63)]
x_p3 = x[x[:, 1] >= 0.69]
d_p1 = p.map(make_element, enumerate(x_p1, di))
di += len(d_p1)
d_p2 = p.map(make_element, enumerate(x_p2, di))
di += len(d_p2)
d_p3 = p.map(make_element, enumerate(x_p3, di))
di += len(d_p3)
# Create/Train test classifier
classifier = LibSvmClassifier(
train_params={
'-t': 0, # linear kernel
'-b': 1, # enable probability estimates
'-c': 2, # SVM-C parameter C
'-q': '' # quite mode
},
normalize=None, # DO NOT normalize descriptors
)
classifier.train({P1_LABEL: d_p1, P2_LABEL: d_p2, P3_LABEL: d_p3})
# Test classifier
x = numpy.random.rand(N, DIM)
x_p1 = x[x[:, 1] <= 0.30]
x_p2 = x[(x[:, 1] >= 0.36) & (x[:, 1] <= 0.63)]
x_p3 = x[x[:, 1] >= 0.69]
d_p1 = p.map(make_element, enumerate(x_p1, di))
di += len(d_p1)
d_p2 = p.map(make_element, enumerate(x_p2, di))
di += len(d_p2)
d_p3 = p.map(make_element, enumerate(x_p3, di))
di += len(d_p3)
d_p1_sync = {}
for d in d_p1:
c = classifier.classify(d, c_factory)
ntools.assert_equal(c.max_label(),
P1_LABEL,
"Incorrect %s label: %s :: %s" %
(P1_LABEL, d.vector(),
c.get_classification()))
d_p1_sync[d] = c
d_p2_sync = {}
for d in d_p2:
c = classifier.classify(d, c_factory)
ntools.assert_equal(c.max_label(),
P2_LABEL,
"Incorrect %s label: %s :: %s" %
(P2_LABEL, d.vector(),
c.get_classification()))
d_p2_sync[d] = c
d_neg_sync = {}
for d in d_p3:
c = classifier.classify(d, c_factory)
ntools.assert_equal(c.max_label(),
P3_LABEL,
"Incorrect %s label: %s :: %s" %
(P3_LABEL, d.vector(),
c.get_classification()))
d_neg_sync[d] = c
# test that async classify produces the same results
# -- p1
async_p1 = classifier.classify_async(d_p1, c_factory)
ntools.assert_equal(async_p1, d_p1_sync,
"Async computation of p1 set did not yield "
"the same results as synchronous computation.")
# -- p2
async_p2 = classifier.classify_async(d_p2, c_factory)
ntools.assert_equal(async_p2, d_p2_sync,
"Async computation of p2 set did not yield "
"the same results as synchronous computation.")
# -- neg
async_neg = classifier.classify_async(d_p3, c_factory)
ntools.assert_equal(async_neg, d_neg_sync,
"Async computation of neg set did not yield "
"the same results as synchronous computation.")
# -- combined -- threaded
sync_combined = dict(d_p1_sync.iteritems())
sync_combined.update(d_p2_sync)
sync_combined.update(d_neg_sync)
async_combined = classifier.classify_async(
d_p1 + d_p2 + d_p3, c_factory,
use_multiprocessing=False
)
ntools.assert_equal(async_combined, sync_combined,
"Async computation of all test descriptors "
"did not yield the same results as "
"synchronous classification.")
# -- combined -- multiprocess
async_combined = classifier.classify_async(
d_p1 + d_p2 + d_p3, c_factory,
use_multiprocessing=True
)
ntools.assert_equal(async_combined, sync_combined,
"Async computation of all test descriptors "
"(mixed order) did not yield the same results "
"as synchronous classification.")
# Closing resources
p.close()
p.join()
|
{
"content_hash": "b62d4edb2cc9aa41a1646ccb9466df2f",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 85,
"avg_line_length": 42.19571865443425,
"alnum_prop": 0.4825337005363096,
"repo_name": "Purg/SMQTK",
"id": "a2a67d6f4e56e152403333e8869c08bc970cfb4b",
"size": "13798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/smqtk/tests/algorithms/classifier/test_libsvm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "93558"
},
{
"name": "C++",
"bytes": "812600"
},
{
"name": "CMake",
"bytes": "68672"
},
{
"name": "CSS",
"bytes": "2297"
},
{
"name": "Cuda",
"bytes": "69131"
},
{
"name": "HTML",
"bytes": "79601"
},
{
"name": "Java",
"bytes": "97253"
},
{
"name": "JavaScript",
"bytes": "123457"
},
{
"name": "Jupyter Notebook",
"bytes": "85336"
},
{
"name": "M4",
"bytes": "61280"
},
{
"name": "Makefile",
"bytes": "4344"
},
{
"name": "Matlab",
"bytes": "23266"
},
{
"name": "Perl",
"bytes": "3762394"
},
{
"name": "Python",
"bytes": "1281460"
},
{
"name": "Shell",
"bytes": "26340"
},
{
"name": "TeX",
"bytes": "74581"
}
],
"symlink_target": ""
}
|
"""Tests for ImageCache class and helper functions."""
import datetime
import os
import tempfile
import time
from unittest import mock
import uuid
from oslo_utils import uuidutils
from ironic.common import exception
from ironic.common import image_service
from ironic.common import images
from ironic.common import utils
from ironic.drivers.modules import image_cache
from ironic.tests import base
def touch(filename):
open(filename, 'w').close()
class BaseTest(base.TestCase):
def setUp(self):
super().setUp()
self.master_dir = tempfile.mkdtemp()
self.cache = image_cache.ImageCache(self.master_dir, None, None)
self.dest_dir = tempfile.mkdtemp()
self.dest_path = os.path.join(self.dest_dir, 'dest')
self.uuid = uuidutils.generate_uuid()
self.master_path = ''.join([os.path.join(self.master_dir, self.uuid),
'.converted'])
self.img_info = {}
@mock.patch.object(image_service, 'get_image_service', autospec=True)
@mock.patch.object(image_cache.ImageCache, 'clean_up', autospec=True)
@mock.patch.object(image_cache.ImageCache, '_download_image', autospec=True)
class TestImageCacheFetch(BaseTest):
@mock.patch.object(image_cache, '_fetch', autospec=True)
def test_fetch_image_no_master_dir(self, mock_fetch, mock_download,
mock_clean_up, mock_image_service):
self.cache.master_dir = None
self.cache.fetch_image(self.uuid, self.dest_path)
self.assertFalse(mock_download.called)
mock_fetch.assert_called_once_with(
None, self.uuid, self.dest_path, True)
self.assertFalse(mock_clean_up.called)
mock_image_service.assert_not_called()
@mock.patch.object(image_cache, '_fetch', autospec=True)
def test_fetch_image_no_master_dir_memory_low(self,
mock_fetch,
mock_download,
mock_clean_up,
mock_image_service):
mock_fetch.side_effect = exception.InsufficientMemory
self.cache.master_dir = None
self.assertRaises(exception.InsufficientMemory,
self.cache.fetch_image,
self.uuid, self.dest_path)
self.assertFalse(mock_download.called)
mock_fetch.assert_called_once_with(
None, self.uuid, self.dest_path, True)
self.assertFalse(mock_clean_up.called)
mock_image_service.assert_not_called()
@mock.patch.object(os, 'link', autospec=True)
@mock.patch.object(image_cache, '_delete_dest_path_if_stale',
return_value=True, autospec=True)
@mock.patch.object(image_cache, '_delete_master_path_if_stale',
return_value=True, autospec=True)
def test_fetch_image_dest_and_master_uptodate(
self, mock_cache_upd, mock_dest_upd, mock_link, mock_download,
mock_clean_up, mock_image_service):
self.cache.fetch_image(self.uuid, self.dest_path)
mock_cache_upd.assert_called_once_with(
self.master_path, self.uuid,
mock_image_service.return_value.show.return_value)
mock_dest_upd.assert_called_once_with(self.master_path, self.dest_path)
self.assertFalse(mock_link.called)
self.assertFalse(mock_download.called)
self.assertFalse(mock_clean_up.called)
mock_image_service.assert_called_once_with(self.uuid, context=None)
mock_image_service.return_value.show.assert_called_once_with(self.uuid)
@mock.patch.object(os, 'link', autospec=True)
@mock.patch.object(image_cache, '_delete_dest_path_if_stale',
return_value=True, autospec=True)
@mock.patch.object(image_cache, '_delete_master_path_if_stale',
return_value=True, autospec=True)
def test_fetch_image_dest_and_master_uptodate_no_force_raw(
self, mock_cache_upd, mock_dest_upd, mock_link, mock_download,
mock_clean_up, mock_image_service):
master_path = os.path.join(self.master_dir, self.uuid)
self.cache.fetch_image(self.uuid, self.dest_path, force_raw=False)
mock_cache_upd.assert_called_once_with(
master_path, self.uuid,
mock_image_service.return_value.show.return_value)
mock_dest_upd.assert_called_once_with(master_path, self.dest_path)
self.assertFalse(mock_link.called)
self.assertFalse(mock_download.called)
self.assertFalse(mock_clean_up.called)
@mock.patch.object(os, 'link', autospec=True)
@mock.patch.object(image_cache, '_delete_dest_path_if_stale',
return_value=False, autospec=True)
@mock.patch.object(image_cache, '_delete_master_path_if_stale',
return_value=True, autospec=True)
def test_fetch_image_dest_out_of_date(
self, mock_cache_upd, mock_dest_upd, mock_link, mock_download,
mock_clean_up, mock_image_service):
self.cache.fetch_image(self.uuid, self.dest_path)
mock_cache_upd.assert_called_once_with(
self.master_path, self.uuid,
mock_image_service.return_value.show.return_value)
mock_dest_upd.assert_called_once_with(self.master_path, self.dest_path)
mock_link.assert_called_once_with(self.master_path, self.dest_path)
self.assertFalse(mock_download.called)
self.assertFalse(mock_clean_up.called)
@mock.patch.object(os, 'link', autospec=True)
@mock.patch.object(image_cache, '_delete_dest_path_if_stale',
return_value=True, autospec=True)
@mock.patch.object(image_cache, '_delete_master_path_if_stale',
return_value=False, autospec=True)
def test_fetch_image_master_out_of_date(
self, mock_cache_upd, mock_dest_upd, mock_link, mock_download,
mock_clean_up, mock_image_service):
self.cache.fetch_image(self.uuid, self.dest_path)
mock_cache_upd.assert_called_once_with(
self.master_path, self.uuid,
mock_image_service.return_value.show.return_value)
mock_dest_upd.assert_called_once_with(self.master_path, self.dest_path)
self.assertFalse(mock_link.called)
mock_download.assert_called_once_with(
self.cache, self.uuid, self.master_path, self.dest_path,
mock_image_service.return_value.show.return_value,
ctx=None, force_raw=True)
mock_clean_up.assert_called_once_with(self.cache)
mock_image_service.assert_called_once_with(self.uuid, context=None)
mock_image_service.return_value.show.assert_called_once_with(self.uuid)
@mock.patch.object(os, 'link', autospec=True)
@mock.patch.object(image_cache, '_delete_dest_path_if_stale',
return_value=True, autospec=True)
@mock.patch.object(image_cache, '_delete_master_path_if_stale',
return_value=False, autospec=True)
def test_fetch_image_both_master_and_dest_out_of_date(
self, mock_cache_upd, mock_dest_upd, mock_link, mock_download,
mock_clean_up, mock_image_service):
self.cache.fetch_image(self.uuid, self.dest_path)
mock_cache_upd.assert_called_once_with(
self.master_path, self.uuid,
mock_image_service.return_value.show.return_value)
mock_dest_upd.assert_called_once_with(self.master_path, self.dest_path)
self.assertFalse(mock_link.called)
mock_download.assert_called_once_with(
self.cache, self.uuid, self.master_path, self.dest_path,
mock_image_service.return_value.show.return_value,
ctx=None, force_raw=True)
mock_clean_up.assert_called_once_with(self.cache)
def test_fetch_image_not_uuid(self, mock_download, mock_clean_up,
mock_image_service):
href = u'http://abc.com/ubuntu.qcow2'
href_converted = str(uuid.uuid5(uuid.NAMESPACE_URL, href))
master_path = ''.join([os.path.join(self.master_dir, href_converted),
'.converted'])
self.cache.fetch_image(href, self.dest_path)
mock_download.assert_called_once_with(
self.cache, href, master_path, self.dest_path,
mock_image_service.return_value.show.return_value,
ctx=None, force_raw=True)
self.assertTrue(mock_clean_up.called)
def test_fetch_image_not_uuid_no_force_raw(self, mock_download,
mock_clean_up,
mock_image_service):
href = u'http://abc.com/ubuntu.qcow2'
href_converted = str(uuid.uuid5(uuid.NAMESPACE_URL, href))
master_path = os.path.join(self.master_dir, href_converted)
self.cache.fetch_image(href, self.dest_path, force_raw=False)
mock_download.assert_called_once_with(
self.cache, href, master_path, self.dest_path,
mock_image_service.return_value.show.return_value,
ctx=None, force_raw=False)
self.assertTrue(mock_clean_up.called)
@mock.patch.object(image_cache, '_fetch', autospec=True)
class TestImageCacheDownload(BaseTest):
def test__download_image(self, mock_fetch):
def _fake_fetch(ctx, uuid, tmp_path, *args):
self.assertEqual(self.uuid, uuid)
self.assertNotEqual(self.dest_path, tmp_path)
self.assertNotEqual(os.path.dirname(tmp_path), self.master_dir)
with open(tmp_path, 'w') as fp:
fp.write("TEST")
mock_fetch.side_effect = _fake_fetch
self.cache._download_image(self.uuid, self.master_path, self.dest_path,
self.img_info)
self.assertTrue(os.path.isfile(self.dest_path))
self.assertTrue(os.path.isfile(self.master_path))
self.assertEqual(os.stat(self.dest_path).st_ino,
os.stat(self.master_path).st_ino)
with open(self.dest_path) as fp:
self.assertEqual("TEST", fp.read())
def test__download_image_large_url(self, mock_fetch):
# A long enough URL may exceed the file name limits of the file system.
# Make sure we don't use any parts of the URL anywhere.
url = "http://example.com/image.iso?secret=%s" % ("x" * 1000)
def _fake_fetch(ctx, href, tmp_path, *args):
self.assertEqual(url, href)
self.assertNotEqual(self.dest_path, tmp_path)
self.assertNotEqual(os.path.dirname(tmp_path), self.master_dir)
with open(tmp_path, 'w') as fp:
fp.write("TEST")
mock_fetch.side_effect = _fake_fetch
self.cache._download_image(url, self.master_path, self.dest_path,
self.img_info)
self.assertTrue(os.path.isfile(self.dest_path))
self.assertTrue(os.path.isfile(self.master_path))
self.assertEqual(os.stat(self.dest_path).st_ino,
os.stat(self.master_path).st_ino)
with open(self.dest_path) as fp:
self.assertEqual("TEST", fp.read())
@mock.patch.object(image_cache, 'LOG', autospec=True)
@mock.patch.object(os, 'link', autospec=True)
def test__download_image_linkfail(self, mock_link, mock_log, mock_fetch):
mock_link.side_effect = [None, OSError]
self.assertRaises(exception.ImageDownloadFailed,
self.cache._download_image,
self.uuid, self.master_path, self.dest_path,
self.img_info)
self.assertTrue(mock_fetch.called)
self.assertEqual(2, mock_link.call_count)
self.assertTrue(mock_log.error.called)
def test__download_image_raises_memory_guard(self, mock_fetch):
mock_fetch.side_effect = exception.InsufficientMemory
self.assertRaises(exception.InsufficientMemory,
self.cache._download_image,
self.uuid, self.master_path,
self.dest_path, self.img_info)
@mock.patch.object(os, 'unlink', autospec=True)
class TestUpdateImages(BaseTest):
@mock.patch.object(os.path, 'exists', return_value=False, autospec=True)
def test__delete_master_path_if_stale_glance_img_not_cached(
self, mock_path_exists, mock_unlink):
res = image_cache._delete_master_path_if_stale(self.master_path,
self.uuid,
self.img_info)
self.assertFalse(mock_unlink.called)
mock_path_exists.assert_called_once_with(self.master_path)
self.assertFalse(res)
@mock.patch.object(os.path, 'exists', return_value=True, autospec=True)
def test__delete_master_path_if_stale_glance_img(
self, mock_path_exists, mock_unlink):
res = image_cache._delete_master_path_if_stale(self.master_path,
self.uuid,
self.img_info)
self.assertFalse(mock_unlink.called)
mock_path_exists.assert_called_once_with(self.master_path)
self.assertTrue(res)
def test__delete_master_path_if_stale_no_master(self, mock_unlink):
res = image_cache._delete_master_path_if_stale(self.master_path,
'http://11',
self.img_info)
self.assertFalse(mock_unlink.called)
self.assertFalse(res)
def test__delete_master_path_if_stale_no_updated_at(self, mock_unlink):
touch(self.master_path)
href = 'http://awesomefreeimages.al/img111'
res = image_cache._delete_master_path_if_stale(self.master_path, href,
self.img_info)
mock_unlink.assert_called_once_with(self.master_path)
self.assertFalse(res)
def test__delete_master_path_if_stale_master_up_to_date(self, mock_unlink):
touch(self.master_path)
href = 'http://awesomefreeimages.al/img999'
self.img_info = {
'updated_at': datetime.datetime(1999, 11, 15, 8, 12, 31)
}
res = image_cache._delete_master_path_if_stale(self.master_path, href,
self.img_info)
self.assertFalse(mock_unlink.called)
self.assertTrue(res)
def test__delete_master_path_if_stale_master_same_time(self, mock_unlink):
# When times identical should not delete cached file
touch(self.master_path)
mtime = utils.unix_file_modification_datetime(self.master_path)
href = 'http://awesomefreeimages.al/img999'
self.img_info = {
'updated_at': mtime
}
res = image_cache._delete_master_path_if_stale(self.master_path, href,
self.img_info)
self.assertFalse(mock_unlink.called)
self.assertTrue(res)
def test__delete_master_path_if_stale_out_of_date(self, mock_unlink):
touch(self.master_path)
href = 'http://awesomefreeimages.al/img999'
self.img_info = {
'updated_at': datetime.datetime((datetime.datetime.utcnow().year
+ 1), 11, 15, 8, 12, 31)
}
res = image_cache._delete_master_path_if_stale(self.master_path, href,
self.img_info)
mock_unlink.assert_called_once_with(self.master_path)
self.assertFalse(res)
def test__delete_dest_path_if_stale_no_dest(self, mock_unlink):
res = image_cache._delete_dest_path_if_stale(self.master_path,
self.dest_path)
self.assertFalse(mock_unlink.called)
self.assertFalse(res)
def test__delete_dest_path_if_stale_no_master(self, mock_unlink):
touch(self.dest_path)
res = image_cache._delete_dest_path_if_stale(self.master_path,
self.dest_path)
mock_unlink.assert_called_once_with(self.dest_path)
self.assertFalse(res)
def test__delete_dest_path_if_stale_out_of_date(self, mock_unlink):
touch(self.master_path)
touch(self.dest_path)
res = image_cache._delete_dest_path_if_stale(self.master_path,
self.dest_path)
mock_unlink.assert_called_once_with(self.dest_path)
self.assertFalse(res)
def test__delete_dest_path_if_stale_up_to_date(self, mock_unlink):
touch(self.master_path)
os.link(self.master_path, self.dest_path)
res = image_cache._delete_dest_path_if_stale(self.master_path,
self.dest_path)
self.assertFalse(mock_unlink.called)
self.assertTrue(res)
class TestImageCacheCleanUp(base.TestCase):
def setUp(self):
super(TestImageCacheCleanUp, self).setUp()
self.master_dir = tempfile.mkdtemp()
self.cache = image_cache.ImageCache(self.master_dir,
cache_size=10,
cache_ttl=600)
@mock.patch.object(image_cache.ImageCache, '_clean_up_ensure_cache_size',
autospec=True)
def test_clean_up_old_deleted(self, mock_clean_size):
mock_clean_size.return_value = None
files = [os.path.join(self.master_dir, str(i))
for i in range(2)]
for filename in files:
touch(filename)
# NOTE(dtantsur): Can't alter ctime, have to set mtime to the future
new_current_time = time.time() + 900
os.utime(files[0], (new_current_time - 100, new_current_time - 100))
with mock.patch.object(time, 'time', lambda: new_current_time):
self.cache.clean_up()
mock_clean_size.assert_called_once_with(self.cache, mock.ANY, None)
survived = mock_clean_size.call_args[0][1]
self.assertEqual(1, len(survived))
self.assertEqual(files[0], survived[0][0])
# NOTE(dtantsur): do not compare milliseconds
self.assertEqual(int(new_current_time - 100), int(survived[0][1]))
self.assertEqual(int(new_current_time - 100),
int(survived[0][2].st_mtime))
@mock.patch.object(image_cache.ImageCache, '_clean_up_ensure_cache_size',
autospec=True)
def test_clean_up_old_with_amount(self, mock_clean_size):
files = [os.path.join(self.master_dir, str(i))
for i in range(2)]
for filename in files:
with open(filename, 'wb') as f:
f.write(b'X')
new_current_time = time.time() + 900
with mock.patch.object(time, 'time', lambda: new_current_time):
self.cache.clean_up(amount=1)
self.assertFalse(mock_clean_size.called)
# Exactly one file is expected to be deleted
self.assertTrue(any(os.path.exists(f) for f in files))
self.assertFalse(all(os.path.exists(f) for f in files))
@mock.patch.object(image_cache.ImageCache, '_clean_up_ensure_cache_size',
autospec=True)
def test_clean_up_files_with_links_untouched(self, mock_clean_size):
mock_clean_size.return_value = None
files = [os.path.join(self.master_dir, str(i))
for i in range(2)]
for filename in files:
touch(filename)
os.link(filename, filename + 'copy')
new_current_time = time.time() + 900
with mock.patch.object(time, 'time', lambda: new_current_time):
self.cache.clean_up()
for filename in files:
self.assertTrue(os.path.exists(filename))
mock_clean_size.assert_called_once_with(mock.ANY, [], None)
@mock.patch.object(image_cache.ImageCache, '_clean_up_too_old',
autospec=True)
def test_clean_up_ensure_cache_size(self, mock_clean_ttl):
mock_clean_ttl.side_effect = lambda *xx: xx[1:]
# NOTE(dtantsur): Cache size in test is 10 bytes, we create 6 files
# with 3 bytes each and expect 3 to be deleted
files = [os.path.join(self.master_dir, str(i))
for i in range(6)]
for filename in files:
with open(filename, 'w') as fp:
fp.write('123')
# NOTE(dtantsur): Make 3 files 'newer' to check that
# old ones are deleted first
new_current_time = time.time() + 100
for filename in files[:3]:
os.utime(filename, (new_current_time, new_current_time))
with mock.patch.object(time, 'time', lambda: new_current_time):
self.cache.clean_up()
for filename in files[:3]:
self.assertTrue(os.path.exists(filename))
for filename in files[3:]:
self.assertFalse(os.path.exists(filename))
mock_clean_ttl.assert_called_once_with(mock.ANY, mock.ANY, None)
@mock.patch.object(image_cache.ImageCache, '_clean_up_too_old',
autospec=True)
def test_clean_up_ensure_cache_size_with_amount(self, mock_clean_ttl):
mock_clean_ttl.side_effect = lambda *xx: xx[1:]
# NOTE(dtantsur): Cache size in test is 10 bytes, we create 6 files
# with 3 bytes each and set amount to be 15, 5 files are to be deleted
files = [os.path.join(self.master_dir, str(i))
for i in range(6)]
for filename in files:
with open(filename, 'w') as fp:
fp.write('123')
# NOTE(dtantsur): Make 1 file 'newer' to check that
# old ones are deleted first
new_current_time = time.time() + 100
os.utime(files[0], (new_current_time, new_current_time))
with mock.patch.object(time, 'time', lambda: new_current_time):
self.cache.clean_up(amount=15)
self.assertTrue(os.path.exists(files[0]))
for filename in files[5:]:
self.assertFalse(os.path.exists(filename))
mock_clean_ttl.assert_called_once_with(mock.ANY, mock.ANY, 15)
@mock.patch.object(image_cache.LOG, 'info', autospec=True)
@mock.patch.object(image_cache.ImageCache, '_clean_up_too_old',
autospec=True)
def test_clean_up_cache_still_large(self, mock_clean_ttl, mock_log):
mock_clean_ttl.side_effect = lambda *xx: xx[1:]
# NOTE(dtantsur): Cache size in test is 10 bytes, we create 2 files
# than cannot be deleted and expected this to be logged
files = [os.path.join(self.master_dir, str(i))
for i in range(2)]
for filename in files:
with open(filename, 'w') as fp:
fp.write('123')
os.link(filename, filename + 'copy')
self.cache.clean_up()
for filename in files:
self.assertTrue(os.path.exists(filename))
self.assertTrue(mock_log.called)
mock_clean_ttl.assert_called_once_with(mock.ANY, mock.ANY, None)
@mock.patch.object(utils, 'rmtree_without_raise', autospec=True)
@mock.patch.object(image_cache, '_fetch', autospec=True)
def test_temp_images_not_cleaned(self, mock_fetch, mock_rmtree):
def _fake_fetch(ctx, uuid, tmp_path, *args):
with open(tmp_path, 'w') as fp:
fp.write("TEST" * 10)
# assume cleanup from another thread at this moment
self.cache.clean_up()
self.assertTrue(os.path.exists(tmp_path))
mock_fetch.side_effect = _fake_fetch
master_path = os.path.join(self.master_dir, 'uuid')
dest_path = os.path.join(tempfile.mkdtemp(), 'dest')
self.cache._download_image('uuid', master_path, dest_path, {})
self.assertTrue(mock_rmtree.called)
@mock.patch.object(utils, 'rmtree_without_raise', autospec=True)
@mock.patch.object(image_cache, '_fetch', autospec=True)
def test_temp_dir_exception(self, mock_fetch, mock_rmtree):
mock_fetch.side_effect = exception.IronicException
self.assertRaises(exception.IronicException,
self.cache._download_image,
'uuid', 'fake', 'fake', {})
self.assertTrue(mock_rmtree.called)
@mock.patch.object(image_cache.LOG, 'warning', autospec=True)
@mock.patch.object(image_cache.ImageCache, '_clean_up_too_old',
autospec=True)
@mock.patch.object(image_cache.ImageCache, '_clean_up_ensure_cache_size',
autospec=True)
def test_clean_up_amount_not_satisfied(self, mock_clean_size,
mock_clean_ttl, mock_log):
mock_clean_ttl.side_effect = lambda *xx: xx[1:]
mock_clean_size.side_effect = lambda self, listing, amount: amount
self.cache.clean_up(amount=15)
self.assertTrue(mock_log.called)
def test_cleanup_ordering(self):
class ParentCache(image_cache.ImageCache):
def __init__(self):
super(ParentCache, self).__init__('a', 1, 1, None)
@image_cache.cleanup(priority=10000)
class Cache1(ParentCache):
pass
@image_cache.cleanup(priority=20000)
class Cache2(ParentCache):
pass
@image_cache.cleanup(priority=10000)
class Cache3(ParentCache):
pass
self.assertEqual(image_cache._cache_cleanup_list[0][1], Cache2)
# The order of caches with same prioirty is not deterministic.
item_possibilities = [Cache1, Cache3]
second_item_actual = image_cache._cache_cleanup_list[1][1]
self.assertIn(second_item_actual, item_possibilities)
item_possibilities.remove(second_item_actual)
third_item_actual = image_cache._cache_cleanup_list[2][1]
self.assertEqual(item_possibilities[0], third_item_actual)
@mock.patch.object(image_cache, '_cache_cleanup_list', autospec=True)
@mock.patch.object(os, 'statvfs', autospec=True)
@mock.patch.object(image_service, 'get_image_service', autospec=True)
class CleanupImageCacheTestCase(base.TestCase):
def setUp(self):
super(CleanupImageCacheTestCase, self).setUp()
self.mock_first_cache = mock.MagicMock(spec_set=[])
self.mock_second_cache = mock.MagicMock(spec_set=[])
self.cache_cleanup_list = [(50, self.mock_first_cache),
(20, self.mock_second_cache)]
self.mock_first_cache.return_value.master_dir = 'first_cache_dir'
self.mock_second_cache.return_value.master_dir = 'second_cache_dir'
def test_no_clean_up(self, mock_image_service, mock_statvfs,
cache_cleanup_list_mock):
# Enough space found - no clean up
mock_show = mock_image_service.return_value.show
mock_show.return_value = dict(size=42)
mock_statvfs.return_value = mock.MagicMock(
spec_set=['f_frsize', 'f_bavail'], f_frsize=1, f_bavail=1024)
cache_cleanup_list_mock.__iter__.return_value = self.cache_cleanup_list
image_cache.clean_up_caches(None, 'master_dir', [('uuid', 'path')])
mock_show.assert_called_once_with('uuid')
mock_statvfs.assert_called_once_with('master_dir')
self.assertFalse(self.mock_first_cache.return_value.clean_up.called)
self.assertFalse(self.mock_second_cache.return_value.clean_up.called)
mock_statvfs.assert_called_once_with('master_dir')
@mock.patch.object(os, 'stat', autospec=True)
def test_one_clean_up(self, mock_stat, mock_image_service, mock_statvfs,
cache_cleanup_list_mock):
# Not enough space, first cache clean up is enough
mock_stat.return_value.st_dev = 1
mock_show = mock_image_service.return_value.show
mock_show.return_value = dict(size=42)
mock_statvfs.side_effect = [
mock.MagicMock(f_frsize=1, f_bavail=1,
spec_set=['f_frsize', 'f_bavail']),
mock.MagicMock(f_frsize=1, f_bavail=1024,
spec_set=['f_frsize', 'f_bavail'])
]
cache_cleanup_list_mock.__iter__.return_value = self.cache_cleanup_list
image_cache.clean_up_caches(None, 'master_dir', [('uuid', 'path')])
mock_show.assert_called_once_with('uuid')
mock_statvfs.assert_called_with('master_dir')
self.assertEqual(2, mock_statvfs.call_count)
self.mock_first_cache.return_value.clean_up.assert_called_once_with(
amount=(42 - 1))
self.assertFalse(self.mock_second_cache.return_value.clean_up.called)
# Since we are using generator expression in clean_up_caches, stat on
# second cache wouldn't be called if we got enough free space on
# cleaning up the first cache.
mock_stat_calls_expected = [mock.call('master_dir'),
mock.call('first_cache_dir')]
mock_statvfs_calls_expected = [mock.call('master_dir'),
mock.call('master_dir')]
self.assertEqual(mock_stat_calls_expected, mock_stat.mock_calls)
self.assertEqual(mock_statvfs_calls_expected, mock_statvfs.mock_calls)
@mock.patch.object(os, 'stat', autospec=True)
def test_clean_up_another_fs(self, mock_stat, mock_image_service,
mock_statvfs, cache_cleanup_list_mock):
# Not enough space, need to cleanup second cache
mock_stat.side_effect = [mock.MagicMock(st_dev=1, spec_set=['st_dev']),
mock.MagicMock(st_dev=2, spec_set=['st_dev']),
mock.MagicMock(st_dev=1, spec_set=['st_dev'])]
mock_show = mock_image_service.return_value.show
mock_show.return_value = dict(size=42)
mock_statvfs.side_effect = [
mock.MagicMock(f_frsize=1, f_bavail=1,
spec_set=['f_frsize', 'f_bavail']),
mock.MagicMock(f_frsize=1, f_bavail=1024,
spec_set=['f_frsize', 'f_bavail'])
]
cache_cleanup_list_mock.__iter__.return_value = self.cache_cleanup_list
image_cache.clean_up_caches(None, 'master_dir', [('uuid', 'path')])
mock_show.assert_called_once_with('uuid')
mock_statvfs.assert_called_with('master_dir')
self.assertEqual(2, mock_statvfs.call_count)
self.mock_second_cache.return_value.clean_up.assert_called_once_with(
amount=(42 - 1))
self.assertFalse(self.mock_first_cache.return_value.clean_up.called)
# Since first cache exists on a different partition, it wouldn't be
# considered for cleanup.
mock_stat_calls_expected = [mock.call('master_dir'),
mock.call('first_cache_dir'),
mock.call('second_cache_dir')]
mock_statvfs_calls_expected = [mock.call('master_dir'),
mock.call('master_dir')]
self.assertEqual(mock_stat_calls_expected, mock_stat.mock_calls)
self.assertEqual(mock_statvfs_calls_expected, mock_statvfs.mock_calls)
@mock.patch.object(os, 'stat', autospec=True)
def test_both_clean_up(self, mock_stat, mock_image_service, mock_statvfs,
cache_cleanup_list_mock):
# Not enough space, clean up of both caches required
mock_stat.return_value.st_dev = 1
mock_show = mock_image_service.return_value.show
mock_show.return_value = dict(size=42)
mock_statvfs.side_effect = [
mock.MagicMock(f_frsize=1, f_bavail=1,
spec_set=['f_frsize', 'f_bavail']),
mock.MagicMock(f_frsize=1, f_bavail=2,
spec_set=['f_frsize', 'f_bavail']),
mock.MagicMock(f_frsize=1, f_bavail=1024,
spec_set=['f_frsize', 'f_bavail'])
]
cache_cleanup_list_mock.__iter__.return_value = self.cache_cleanup_list
image_cache.clean_up_caches(None, 'master_dir', [('uuid', 'path')])
mock_show.assert_called_once_with('uuid')
mock_statvfs.assert_called_with('master_dir')
self.assertEqual(3, mock_statvfs.call_count)
self.mock_first_cache.return_value.clean_up.assert_called_once_with(
amount=(42 - 1))
self.mock_second_cache.return_value.clean_up.assert_called_once_with(
amount=(42 - 2))
mock_stat_calls_expected = [mock.call('master_dir'),
mock.call('first_cache_dir'),
mock.call('second_cache_dir')]
mock_statvfs_calls_expected = [mock.call('master_dir'),
mock.call('master_dir'),
mock.call('master_dir')]
self.assertEqual(mock_stat_calls_expected, mock_stat.mock_calls)
self.assertEqual(mock_statvfs_calls_expected, mock_statvfs.mock_calls)
@mock.patch.object(os, 'stat', autospec=True)
def test_clean_up_fail(self, mock_stat, mock_image_service, mock_statvfs,
cache_cleanup_list_mock):
# Not enough space even after cleaning both caches - failure
mock_stat.return_value.st_dev = 1
mock_show = mock_image_service.return_value.show
mock_show.return_value = dict(size=42)
mock_statvfs.return_value = mock.MagicMock(
f_frsize=1, f_bavail=1, spec_set=['f_frsize', 'f_bavail'])
cache_cleanup_list_mock.__iter__.return_value = self.cache_cleanup_list
self.assertRaises(exception.InsufficientDiskSpace,
image_cache.clean_up_caches,
None, 'master_dir', [('uuid', 'path')])
mock_show.assert_called_once_with('uuid')
mock_statvfs.assert_called_with('master_dir')
self.assertEqual(3, mock_statvfs.call_count)
self.mock_first_cache.return_value.clean_up.assert_called_once_with(
amount=(42 - 1))
self.mock_second_cache.return_value.clean_up.assert_called_once_with(
amount=(42 - 1))
mock_stat_calls_expected = [mock.call('master_dir'),
mock.call('first_cache_dir'),
mock.call('second_cache_dir')]
mock_statvfs_calls_expected = [mock.call('master_dir'),
mock.call('master_dir'),
mock.call('master_dir')]
self.assertEqual(mock_stat_calls_expected, mock_stat.mock_calls)
self.assertEqual(mock_statvfs_calls_expected, mock_statvfs.mock_calls)
class TestFetchCleanup(base.TestCase):
@mock.patch.object(images, 'converted_size', autospec=True)
@mock.patch.object(images, 'fetch', autospec=True)
@mock.patch.object(images, 'image_to_raw', autospec=True)
@mock.patch.object(images, 'force_raw_will_convert', autospec=True,
return_value=True)
@mock.patch.object(image_cache, '_clean_up_caches', autospec=True)
def test__fetch(
self, mock_clean, mock_will_convert, mock_raw, mock_fetch,
mock_size):
mock_size.return_value = 100
image_cache._fetch('fake', 'fake-uuid', '/foo/bar', force_raw=True)
mock_fetch.assert_called_once_with('fake', 'fake-uuid',
'/foo/bar.part', force_raw=False)
mock_clean.assert_called_once_with('/foo', 100)
mock_raw.assert_called_once_with('fake-uuid', '/foo/bar',
'/foo/bar.part')
mock_will_convert.assert_called_once_with('fake-uuid', '/foo/bar.part')
@mock.patch.object(images, 'converted_size', autospec=True)
@mock.patch.object(images, 'fetch', autospec=True)
@mock.patch.object(images, 'image_to_raw', autospec=True)
@mock.patch.object(images, 'force_raw_will_convert', autospec=True,
return_value=False)
@mock.patch.object(image_cache, '_clean_up_caches', autospec=True)
def test__fetch_already_raw(
self, mock_clean, mock_will_convert, mock_raw, mock_fetch,
mock_size):
image_cache._fetch('fake', 'fake-uuid', '/foo/bar', force_raw=True)
mock_fetch.assert_called_once_with('fake', 'fake-uuid',
'/foo/bar.part', force_raw=False)
mock_clean.assert_not_called()
mock_size.assert_not_called()
mock_raw.assert_called_once_with('fake-uuid', '/foo/bar',
'/foo/bar.part')
mock_will_convert.assert_called_once_with('fake-uuid', '/foo/bar.part')
@mock.patch.object(images, 'converted_size', autospec=True)
@mock.patch.object(images, 'fetch', autospec=True)
@mock.patch.object(images, 'image_to_raw', autospec=True)
@mock.patch.object(images, 'force_raw_will_convert', autospec=True,
return_value=True)
@mock.patch.object(image_cache, '_clean_up_caches', autospec=True)
def test__fetch_estimate_fallback(
self, mock_clean, mock_will_convert, mock_raw, mock_fetch,
mock_size):
mock_size.side_effect = [100, 10]
mock_clean.side_effect = [exception.InsufficientDiskSpace(), None]
image_cache._fetch('fake', 'fake-uuid', '/foo/bar', force_raw=True)
mock_fetch.assert_called_once_with('fake', 'fake-uuid',
'/foo/bar.part', force_raw=False)
mock_size.assert_has_calls([
mock.call('/foo/bar.part', estimate=False),
mock.call('/foo/bar.part', estimate=True),
])
mock_clean.assert_has_calls([
mock.call('/foo', 100),
mock.call('/foo', 10),
])
mock_raw.assert_called_once_with('fake-uuid', '/foo/bar',
'/foo/bar.part')
mock_will_convert.assert_called_once_with('fake-uuid', '/foo/bar.part')
|
{
"content_hash": "d1fcd5447bde4bd428239f5ad628b5f0",
"timestamp": "",
"source": "github",
"line_count": 802,
"max_line_length": 79,
"avg_line_length": 47.913965087281795,
"alnum_prop": 0.5946600046842064,
"repo_name": "openstack/ironic",
"id": "ffe6bed09a9a2c8112f4ce43a88674517cdc8efa",
"size": "39034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/tests/unit/drivers/modules/test_image_cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "349"
},
{
"name": "PowerShell",
"bytes": "1676"
},
{
"name": "Python",
"bytes": "9506176"
},
{
"name": "Shell",
"bytes": "188127"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import itertools
verbose = False
happiness = {}
people = set()
#f = open('inputs/input13_test.txt')
f = open('inputs/input13.txt')
for line in f:
split_line = line.split()
person1 = split_line[0]
direction = split_line[2]
amount = int(split_line[3])
person2 = split_line[10][:-1]
if verbose:
print(person1, direction, amount, person2)
people.add(person1)
people.add(person2)
if direction == 'lose':
happiness[person1+person2] = -amount
else:
assert direction == 'gain'
happiness[person1+person2] = amount
f.close()
if verbose:
print(people)
print(happiness)
def find_maximum_happiness(people, happiness):
maximum_happiness = 0
for arragement in itertools.permutations(people):
happiness_gained = 0
for person1, person2 in zip(arragement[:-1], arragement[1:]):
happiness_gained += happiness[person1 + person2]
happiness_gained += happiness[person2 + person1]
# add happiness for first and last pair
person1 = arragement[0]
person2 = arragement[-1]
happiness_gained += happiness[person1 + person2]
happiness_gained += happiness[person2 + person1]
maximum_happiness = max(maximum_happiness, happiness_gained)
if verbose:
print(arragement, happiness_gained)
return maximum_happiness
print(find_maximum_happiness(people, happiness))
# part b
for person in people:
happiness['Self' + person] = 0
happiness[person + 'Self'] = 0
people.add('Self')
print(find_maximum_happiness(people, happiness))
|
{
"content_hash": "e78f092c12785f605a333fdafe0d3da1",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 69,
"avg_line_length": 26.174603174603174,
"alnum_prop": 0.6519102486355367,
"repo_name": "jjhelmus/adventofcode",
"id": "1ba1f4de77128d05e58f3cd50d90def814c088f0",
"size": "1649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day13.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34182"
}
],
"symlink_target": ""
}
|
import sys
import threading
_tls = threading.local()
import pyuv
from flower.core.channel import channel
from flower.core.sched import tasklet, getcurrent, schedule
def get_fd(io):
if not isinstance(io, int):
if hasattr(io, 'fileno'):
if callable(io.fileno):
fd = io.fileno()
else:
fd = io.fileno
else:
raise ValueError("invalid file descriptor number")
else:
fd = io
return fd
def uv_mode(m):
if m == 0:
return pyuv.UV_READABLE
elif m == 1:
return pyuv.UV_WRITABLE
else:
return pyuv.UV_READABLE | pyuv.UV_WRITABLE
class UVExit(Exception):
pass
class UV(object):
def __init__(self):
self.loop = pyuv.Loop()
self._async = pyuv.Async(self.loop, self._wakeloop)
self._async.unref()
self.fds = {}
self._lock = threading.RLock()
self.running = False
# start the server task
self._runtask = tasklet(self.run, "uv_server")()
def _wakeloop(self, handle):
self.loop.update_time()
def wakeup(self):
self._async.send()
def switch(self):
if not self.running:
self._runtask = tasklet(self.run)()
getcurrent().remove()
self._runtask.switch()
def idle(self, handle):
if getcurrent() is self._runtask:
schedule()
def run(self):
t = pyuv.Timer(self.loop)
t.start(self.idle, 0.0001, 0.0001)
t.unref()
self.running = True
try:
self.loop.run()
finally:
self.running = False
def uv_server():
global _tls
try:
return _tls.uv_server
except AttributeError:
uv_server = _tls.uv_server = UV()
return uv_server
def uv_sleep(seconds, ref=True):
""" use the event loop for sleep. This an alternative to our own
time events scheduler """
uv = uv_server()
c = channel()
def _sleep_cb(handle):
handle.stop()
c.send(None)
sleep = pyuv.Timer(uv.loop)
sleep.start(_sleep_cb, seconds, seconds)
if not ref:
sleep.unref()
c.receive()
def uv_idle(ref=True):
""" use the event loop for idling. This an alternative to our own
time events scheduler """
uv = uv_server()
c = channel()
def _sleep_cb(handle):
handle.stop()
c.send(True)
idle = pyuv.Idle(uv.loop)
idle.start(_sleep_cb)
if not ref:
idle.unref()
return c.receive()
|
{
"content_hash": "53b35475ffcde00fbd4e4d88004c6282",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 69,
"avg_line_length": 21.3781512605042,
"alnum_prop": 0.5605345911949685,
"repo_name": "benoitc/flower",
"id": "f3d13cff94f2b098fa1d668212f9e95579358f37",
"size": "2637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flower/core/uv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2801"
},
{
"name": "Python",
"bytes": "95255"
}
],
"symlink_target": ""
}
|
"""Policy Engine For <Project_name>"""
from oslo_config import cfg
from oslo_policy import opts as policy_opts
from oslo_policy import policy
from <project_name> import exception
CONF = cfg.CONF
policy_opts.set_defaults(cfg.CONF, 'policy.json')
_ENFORCER = None
def init():
global _ENFORCER
if not _ENFORCER:
_ENFORCER = policy.Enforcer(CONF)
def enforce_action(context, action):
"""Checks that the action can be done by the given context.
Applies a check to ensure the context's project_id and user_id can be
applied to the given action using the policy enforcement api.
"""
return enforce(context, action, {'project_id': context.project_id,
'user_id': context.user_id})
def enforce(context, action, target):
"""Verifies that the action is valid on the target in this context.
:param context: <project_name> context
:param action: string representing the action to be checked
this should be colon separated for clarity.
i.e. ``compute:create_instance``,
:param object: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:raises PolicyNotAuthorized: if verification fails.
"""
init()
return _ENFORCER.enforce(action, target, context.to_dict(),
do_raise=True,
exc=exception.PolicyNotAuthorized,
action=action)
def check_is_admin(roles, context=None):
"""Whether or not user is admin according to policy setting.
Can use roles or user_id from context to determine if user is admin.
In a multi-domain configuration, roles alone may not be sufficient.
"""
init()
# include project_id on target to avoid KeyError if context_is_admin
# policy definition is missing, and default admin_or_owner rule
# attempts to apply. Since our credentials dict does not include a
# project_id, this target can never match as a generic rule.
target = {'project_id': ''}
if context is None:
credentials = {'roles': roles}
else:
credentials = {'roles': context.roles,
'user_id': context.user_id
}
return _ENFORCER.enforce('context_is_admin', target, credentials)
|
{
"content_hash": "5e7b26b528e4f63acd7a30a3851a7fa3",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 77,
"avg_line_length": 32.38157894736842,
"alnum_prop": 0.6432344575375863,
"repo_name": "hahaps/openstack-project-generator",
"id": "cc71aa71961ff6f29d8dd0e4877ecf743b80fd95",
"size": "3132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "template/<project_name>/policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "689778"
},
{
"name": "Shell",
"bytes": "23190"
}
],
"symlink_target": ""
}
|
call_script = 1 # (call_script,<script_id>),
try_end = 3 # (try_end),
try_begin = 4 # (try_begin),
else_try = 5 # (else_try),
try_for_range = 6 # (try_for_range,<destination>,<lower_bound>,<upper_bound>),
# works like a for loop from lower-bound up to (upper-bound - 1)
try_for_range_backwards = 7 # same as above but starts from (upper-bound - 1) down-to lower bound.
# (try_for_range_backwards,<destination>,<lower_bound>,<upper_bound>),
try_for_parties = 11 # (try_for_parties,<destination>),
try_for_agents = 12 # (try_for_agents,<destination>),
store_script_param_1 = 21 # (store_script_param_1,<destination>), (within a script) stores the first script parameter.
store_script_param_2 = 22 # (store_script_param_2,<destination>), (within a script) stores the second script parameter.
store_script_param = 23 # (store_script_param,<destination>,<script_param_no>), (within a script) stores <script_param_no>th script parameter.
#-----------------------------------------------------------------------------
# CONDITION OPERATIONS
#-----------------------------------------------------------------------------
ge = 30 # (ge,<value>,<value>), greater than or equal to
eq = 31 # (eq,<value>,<value>), equal to
gt = 32 # (gt,<value>,<value>), greater than
is_between = 33 # (is_between,<value>,<lower_bound>,<upper_bound>), greater than or equal to lower bound and less than upper bound
entering_town = 36 # (entering_town,<town_id>),
map_free = 37 # (map_free),
encountered_party_is_attacker = 39 # (encountered_party_is_attacker),
conversation_screen_is_active = 42 # (conversation_screen_active), used in mission template triggers only
set_player_troop = 47 # (set_player_troop,<troop_id>),
store_repeat_object = 50 # (store_repeat_object,<destination>), stores the index of a repeated dialog option for repeat_for_factions, etc.
set_result_string = 60 # (set_result_string, <string_id>), sets the result string for game scripts that need one
key_is_down = 70 # (key_is_down, <key_id>), fails if the key is not currently down (key_is_down, <key_id>),
key_clicked = 71 # (key_clicked, <key_id>), fails if the key is not clicked on the specific frame
game_key_is_down = 72 # (game_key_is_down, <game_key_id>), fails if the game key is not currently down
game_key_clicked = 73 # (game_key_clicked, <game_key_id>), fails if the game key is not clicked on the specific frame
mouse_get_position = 75 # (mouse_get_position, <position_no>), x and y values of position are filled
omit_key_once = 77 # (omit_key_once, <key_id>), game omits any bound action for the key once
clear_omitted_keys = 78 # (clear_omitted_keys),
get_global_cloud_amount = 90 # (get_global_cloud_amount, <destination>), returns a value between 0-100
set_global_cloud_amount = 91 # (set_global_cloud_amount, <value>), value is clamped to 0-100
get_global_haze_amount = 92 # (get_global_haze_amount, <destination>), returns a value between 0-100
set_global_haze_amount = 93 # (set_global_haze_amount, <value>), value is clamped to 0-100
hero_can_join = 101 # (hero_can_join, [party_id]),
hero_can_join_as_prisoner = 102 # (hero_can_join_as_prisoner, [party_id]),
party_can_join = 103 # (party_can_join),
party_can_join_as_prisoner = 104 # (party_can_join_as_prisoner),
troops_can_join = 105 # (troops_can_join,<value>),
troops_can_join_as_prisoner = 106 # (troops_can_join_as_prisoner,<value>),
party_can_join_party = 107 # (party_can_join_party, <joiner_party_id>, <host_party_id>,[flip_prisoners]),
party_end_battle = 108 # (party_end_battle,<party_no>),
main_party_has_troop = 110 # (main_party_has_troop,<troop_id>),
party_is_in_town = 130 # (party_is_in_town,<party_id_1>,<party_id_2>),
party_is_in_any_town = 131 # (party_is_in_any_town,<party_id>),
party_is_active = 132 # (party_is_active,<party_id>),
player_has_item = 150 # (player_has_item,<item_id>),
troop_has_item_equipped = 151 # (troop_has_item_equipped,<troop_id>,<item_id>),
troop_is_mounted = 152 # (troop_is_mounted,<troop_id>),
troop_is_guarantee_ranged = 153 # (troop_is_guarantee_ranged, <troop_id>),
troop_is_guarantee_horse = 154 # (troop_is_guarantee_horse, <troop_id>),
check_quest_active = 200 # (check_quest_active,<quest_id>),
check_quest_finished = 201 # (check_quest_finished,<quest_id>),
check_quest_succeeded = 202 # (check_quest_succeeded,<quest_id>),
check_quest_failed = 203 # (check_quest_failed,<quest_id>),
check_quest_concluded = 204 # (check_quest_concluded,<quest_id>),
is_trial_version = 250 # (is_trial_version),
is_edit_mode_enabled = 255 # (is_edit_mode_enabled),
options_get_damage_to_player = 260 # (options_get_damage_to_player, <destination>), 0 = 1/4, 1 = 1/2, 2 = 1/1
options_set_damage_to_player = 261 # (options_set_damage_to_player, <value>), 0 = 1/4, 1 = 1/2, 2 = 1/1
options_get_damage_to_friends = 262 # (options_get_damage_to_friends, <destination>), 0 = 1/2, 1 = 3/4, 2 = 1/1
options_set_damage_to_friends = 263 # (options_set_damage_to_friends, <value>), 0 = 1/2, 1 = 3/4, 2 = 1/1
options_get_combat_ai = 264 # (options_get_combat_ai, <destination>), 0 = good, 1 = average, 2 = poor
options_set_combat_ai = 265 # (options_set_combat_ai, <value>), 0 = good, 1 = average, 2 = poor
options_get_campaign_ai = 266 # (options_get_campaign_ai, <destination>), 0 = good, 1 = average, 2 = poor
options_set_campaign_ai = 267 # (options_set_campaign_ai, <value>), 0 = good, 1 = average, 2 = poor
options_get_combat_speed = 268 # (options_get_combat_speed, <destination>), 0 = slowest, 1 = slower, 2 = normal, 3 = faster, 4 = fastest
options_set_combat_speed = 269 # (options_set_combat_speed, <value>), 0 = slowest, 1 = slower, 2 = normal, 3 = faster, 4 = fastest
profile_get_banner_id = 350 # (profile_get_banner_id, <destination>),
profile_set_banner_id = 351 # (profile_set_banner_id, <value>),
get_achievement_stat = 370 # (get_achievement_stat, <destination>, <achievement_id>, <stat_index>),
set_achievement_stat = 371 # (set_achievement_stat, <achievement_id>, <stat_index>, <value>),
unlock_achievement = 372 # (unlock_achievement, <achievement_id>),
send_message_to_url = 380 # (send_message_to_url, <string_id>, <encode_url>), result will be returned to script_game_receive_url_response
multiplayer_send_message_to_server = 388 # (multiplayer_send_int_to_server, <message_type>),
multiplayer_send_int_to_server = 389 # (multiplayer_send_int_to_server, <message_type>, <value>),
multiplayer_send_2_int_to_server = 390 # (multiplayer_send_2_int_to_server, <message_type>, <value>, <value>),
multiplayer_send_3_int_to_server = 391 # (multiplayer_send_3_int_to_server, <message_type>, <value>, <value>, <value>),
multiplayer_send_4_int_to_server = 392 # (multiplayer_send_4_int_to_server, <message_type>, <value>, <value>, <value>, <value>),
multiplayer_send_string_to_server = 393 # (multiplayer_send_string_to_server, <message_type>, <string_id>),
multiplayer_send_message_to_player = 394 # (multiplayer_send_message_to_player, <player_id>, <message_type>),
multiplayer_send_int_to_player = 395 # (multiplayer_send_int_to_player, <player_id>, <message_type>, <value>),
multiplayer_send_2_int_to_player = 396 # (multiplayer_send_2_int_to_player, <player_id>, <message_type>, <value>, <value>),
multiplayer_send_3_int_to_player = 397 # (multiplayer_send_3_int_to_player, <player_id>, <message_type>, <value>, <value>, <value>),
multiplayer_send_4_int_to_player = 398 # (multiplayer_send_4_int_to_player, <player_id>, <message_type>, <value>, <value>, <value>, <value>),
multiplayer_send_string_to_player = 399 # (multiplayer_send_string_to_player, <player_id>, <message_type>, <string_id>),
get_max_players = 400 # (get_max_players, <destination>),
player_is_active = 401 # (player_is_active, <player_id>),
player_get_team_no = 402 # (player_get_team_no, <destination>, <player_id>),
player_set_team_no = 403 # (player_get_team_no, <destination>, <player_id>),
player_get_troop_id = 404 # (player_get_troop_id, <destination>, <player_id>),
player_set_troop_id = 405 # (player_get_troop_id, <destination>, <player_id>),
player_get_agent_id = 406 # (player_get_agent_id, <destination>, <player_id>),
player_get_gold = 407 # (player_get_gold, <destination>, <player_id>),
player_set_gold = 408 # (player_set_gold, <player_id>, <value>, <max_value>), set max_value to 0 if no limit is wanted
player_spawn_new_agent = 409 # (player_spawn_new_agent, <player_id>, <entry_point>),
player_add_spawn_item = 410 # (player_add_spawn_item, <player_id>, <item_slot_no>, <item_id>),
multiplayer_get_my_team = 411 # (multiplayer_get_my_team, <destination>),
multiplayer_get_my_troop = 412 # (multiplayer_get_my_troop, <destination>),
multiplayer_set_my_troop = 413 # (multiplayer_get_my_troop, <destination>),
multiplayer_get_my_gold = 414 # (multiplayer_get_my_gold, <destination>),
multiplayer_get_my_player = 415 # (multiplayer_get_my_player, <destination>),
multiplayer_clear_scene = 416 # (multiplayer_clear_scene),
multiplayer_is_server = 417 # (multiplayer_is_server),
multiplayer_is_dedicated_server = 418 # (multiplayer_is_dedicated_server),
game_in_multiplayer_mode = 419 # (game_in_multiplayer_mode),
multiplayer_make_everyone_enemy = 420 # (multiplayer_make_everyone_enemy),
player_control_agent = 421 # (player_control_agent, <player_id>, <agent_id>),
player_get_item_id = 422 # (player_get_item_id, <destination>, <player_id>, <item_slot_no>) only for server
player_get_banner_id = 423 # (player_get_banner_id, <destination>, <player_id>),
game_get_reduce_campaign_ai = 424 # (game_get_reduce_campaign_ai, <destination>), depreciated, use options_get_campaign_ai instead
multiplayer_find_spawn_point = 425 # (multiplayer_find_spawn_point, <destination>, <team_no>, <examine_all_spawn_points>, <is_horseman>),
set_spawn_effector_scene_prop_kind = 426 # (set_spawn_effector_scene_prop_kind <team_no> <scene_prop_kind_no>)
set_spawn_effector_scene_prop_id = 427 # (set_spawn_effector_scene_prop_id <scene_prop_id>)
player_set_is_admin = 429 # (player_set_is_admin, <player_id>, <value>), value is 0 or 1
player_is_admin = 430 # (player_is_admin, <player_id>),
player_get_score = 431 # (player_get_score, <destination>, <player_id>),
player_set_score = 432 # (player_set_score,<player_id>, <value>),
player_get_kill_count = 433 # (player_get_kill_count, <destination>, <player_id>),
player_set_kill_count = 434 # (player_set_kill_count,<player_id>, <value>),
player_get_death_count = 435 # (player_get_death_count, <destination>, <player_id>),
player_set_death_count = 436 # (player_set_death_count, <player_id>, <value>),
player_get_ping = 437 # (player_get_ping, <destination>, <player_id>),
player_is_busy_with_menus = 438 # (player_is_busy_with_menus, <player_id>),
player_get_is_muted = 439 # (player_get_is_muted, <destination>, <player_id>),
player_set_is_muted = 440 # (player_set_is_muted, <player_id>, <value>, [mute_for_everyone]),
# mute_for_everyone optional parameter should be set to 1 if player is muted for everyone (this works only on server).
player_get_unique_id = 441 # (player_get_unique_id, <destination>, <player_id>), can only bew used on server side
player_get_gender = 442 # (player_get_gender, <destination>, <player_id>),
team_get_bot_kill_count = 450 # (team_get_bot_kill_count, <destination>, <team_id>),
team_set_bot_kill_count = 451 # (team_get_bot_kill_count, <destination>, <team_id>),
team_get_bot_death_count = 452 # (team_get_bot_death_count, <destination>, <team_id>),
team_set_bot_death_count = 453 # (team_get_bot_death_count, <destination>, <team_id>),
team_get_kill_count = 454 # (team_get_kill_count, <destination>, <team_id>),
team_get_score = 455 # (team_get_score, <destination>, <team_id>),
team_set_score = 456 # (team_set_score, <team_id>, <value>),
team_set_faction = 457 # (team_set_faction, <team_id>, <faction_id>),
team_get_faction = 458 # (team_get_faction, <destination>, <team_id>),
player_save_picked_up_items_for_next_spawn = 459 # (player_save_picked_up_items_for_next_spawn, <player_id>),
player_get_value_of_original_items = 460 # (player_get_value_of_original_items, <player_id>), default troop items will be counted as zero (except horse)
player_item_slot_is_picked_up = 461 # (player_item_slot_is_picked_up, <player_id>, <item_slot_no>),
# item slots are overriden when player picks up an item and stays alive until the next round
kick_player = 465 # (kick_player, <player_id>),
ban_player = 466 # (ban_player, <player_id>, <value>, <player_id>), set value = 1 for banning temporarily, assign 2nd player an admin if banning is permanent
save_ban_info_of_player = 467 # (save_ban_info_of_player, <player_id>),
ban_player_using_saved_ban_info = 468 # (ban_player_using_saved_ban_info),
start_multiplayer_mission = 470 # (start_multiplayer_mission, <mission_template_id>, <scene_id>, <started_manually>),
server_add_message_to_log = 473 # (server_add_message_to_log, <string_id>),
server_get_renaming_server_allowed = 475 # (server_get_renaming_server_allowed, <destination>), 0-1
server_get_changing_game_type_allowed = 476 # (server_get_changing_game_type_allowed, <destination>), 0-1
server_get_combat_speed = 478 # (server_get_combat_speed, <destination>), 0-2
server_set_combat_speed = 479 # (server_set_combat_speed, <value>), 0-2
server_get_friendly_fire = 480 # (server_get_friendly_fire, <destination>),
server_set_friendly_fire = 481 # (server_set_friendly_fire, <value>), 0 = off, 1 = on
server_get_control_block_dir = 482 # (server_get_control_block_dir, <destination>),
server_set_control_block_dir = 483 # (server_set_control_block_dir, <value>), 0 = automatic, 1 = by mouse movement
server_set_password = 484 # (server_set_password, <string_id>),
server_get_add_to_game_servers_list = 485 # (server_get_add_to_game_servers_list, <destination>),
server_set_add_to_game_servers_list = 486 # (server_set_add_to_game_servers_list, <value>),
server_get_ghost_mode = 487 # (server_get_ghost_mode, <destination>),
server_set_ghost_mode = 488 # (server_set_ghost_mode, <value>),
server_set_name = 489 # (server_set_name, <string_id>),
server_get_max_num_players = 490 # (server_get_max_num_players, <destination>),
server_set_max_num_players = 491 # (server_set_max_num_players, <value>),
server_set_welcome_message = 492 # (server_set_welcome_message, <string_id>),
server_get_melee_friendly_fire = 493 # (server_get_melee_friendly_fire, <destination>),
server_set_melee_friendly_fire = 494 # (server_set_melee_friendly_fire, <value>), 0 = off, 1 = on
server_get_friendly_fire_damage_self_ratio = 495 # (server_get_friendly_fire_damage_self_ratio, <destination>),
server_set_friendly_fire_damage_self_ratio = 496 # (server_set_friendly_fire_damage_self_ratio, <value>), 0-100
server_get_friendly_fire_damage_friend_ratio = 497 # (server_get_friendly_fire_damage_friend_ratio, <destination>),
server_set_friendly_fire_damage_friend_ratio = 498 # (server_set_friendly_fire_damage_friend_ratio, <value>), 0-100
server_get_anti_cheat = 499 # (server_get_anti_cheat, <destination>),
server_set_anti_cheat = 477 # (server_set_anti_cheat, <value>), 0 = off, 1 = on
troop_set_slot = 500 # (troop_set_slot,<troop_id>,<slot_no>,<value>),
party_set_slot = 501 # (party_set_slot,<party_id>,<slot_no>,<value>),
faction_set_slot = 502 # (faction_set_slot,<faction_id>,<slot_no>,<value>),
scene_set_slot = 503 # (scene_set_slot,<scene_id>,<slot_no>,<value>),
party_template_set_slot = 504 # (party_template_set_slot,<party_template_id>,<slot_no>,<value>),
agent_set_slot = 505 # (agent_set_slot,<agent_id>,<slot_no>,<value>),
quest_set_slot = 506 # (quest_set_slot,<quest_id>,<slot_no>,<value>),
item_set_slot = 507 # (item_set_slot,<item_id>,<slot_no>,<value>),
player_set_slot = 508 # (player_set_slot,<player_id>,<slot_no>,<value>),
team_set_slot = 509 # (team_set_slot,<team_id>,<slot_no>,<value>),
scene_prop_set_slot = 510 # (scene_prop_set_slot,<scene_prop_instance_id>,<slot_no>,<value>),
troop_get_slot = 520 # (troop_get_slot,<destination>,<troop_id>,<slot_no>),
party_get_slot = 521 # (party_get_slot,<destination>,<party_id>,<slot_no>),
faction_get_slot = 522 # (faction_get_slot,<destination>,<faction_id>,<slot_no>),
scene_get_slot = 523 # (scene_get_slot,<destination>,<scene_id>,<slot_no>),
party_template_get_slot = 524 # (party_template_get_slot,<destination>,<party_template_id>,<slot_no>),
agent_get_slot = 525 # (agent_get_slot,<destination>,<agent_id>,<slot_no>),
quest_get_slot = 526 # (quest_get_slot,<destination>,<quest_id>,<slot_no>),
item_get_slot = 527 # (item_get_slot,<destination>,<item_id>,<slot_no>),
player_get_slot = 528 # (player_get_slot,<destination>,<player_id>,<slot_no>),
team_get_slot = 529 # (team_get_slot,<destination>,<player_id>,<slot_no>),
scene_prop_get_slot = 530 # (scene_prop_get_slot,<destination>,<scene_prop_instance_id>,<slot_no>),
troop_slot_eq = 540 # (troop_slot_eq,<troop_id>,<slot_no>,<value>),
party_slot_eq = 541 # (party_slot_eq,<party_id>,<slot_no>,<value>),
faction_slot_eq = 542 # (faction_slot_eq,<faction_id>,<slot_no>,<value>),
scene_slot_eq = 543 # (scene_slot_eq,<scene_id>,<slot_no>,<value>),
party_template_slot_eq = 544 # (party_template_slot_eq,<party_template_id>,<slot_no>,<value>),
agent_slot_eq = 545 # (agent_slot_eq,<agent_id>,<slot_no>,<value>),
quest_slot_eq = 546 # (quest_slot_eq,<quest_id>,<slot_no>,<value>),
item_slot_eq = 547 # (item_slot_eq,<item_id>,<slot_no>,<value>),
player_slot_eq = 548 # (player_slot_eq,<player_id>,<slot_no>,<value>),
team_slot_eq = 549 # (team_slot_eq,<team_id>,<slot_no>,<value>),
scene_prop_slot_eq = 550 # (scene_prop_slot_eq,<scene_prop_instance_id>,<slot_no>,<value>),
troop_slot_ge = 560 # (troop_slot_ge,<troop_id>,<slot_no>,<value>),
party_slot_ge = 561 # (party_slot_ge,<party_id>,<slot_no>,<value>),
faction_slot_ge = 562 # (faction_slot_ge,<faction_id>,<slot_no>,<value>),
scene_slot_ge = 563 # (scene_slot_ge,<scene_id>,<slot_no>,<value>),
party_template_slot_ge = 564 # (party_template_slot_ge,<party_template_id>,<slot_no>,<value>),
agent_slot_ge = 565 # (agent_slot_ge,<agent_id>,<slot_no>,<value>),
quest_slot_ge = 566 # (quest_slot_ge,<quest_id>,<slot_no>,<value>),
item_slot_ge = 567 # (item_slot_ge,<item_id>,<slot_no>,<value>),
player_slot_ge = 568 # (player_slot_ge,<player_id>,<slot_no>,<value>),
team_slot_ge = 569 # (team_slot_ge,<team_id>,<slot_no>,<value>),
scene_prop_slot_ge = 570 # (scene_prop_slot_ge,<scene_prop_instance_id>,<slot_no>,<value>),
play_sound_at_position = 599 # (play_sound_at_position, <sound_id>, <position_no>, [options]),
play_sound = 600 # (play_sound,<sound_id>,[options]),
play_track = 601 # (play_track,<track_id>, [options]), 0 = default, 1 = fade out current track, 2 = stop current track
play_cue_track = 602 # (play_cue_track,<track_id>), starts immediately
music_set_situation = 603 # (music_set_situation, <situation_type>),
music_set_culture = 604 # (music_set_culture, <culture_type>),
stop_all_sounds = 609 # (stop_all_sounds, [options]), 0 = stop only looping sounds, 1 = stop all sounds
store_last_sound_channel = 615 # (store_last_sound_channel, <destination>),
stop_sound_channel = 616 # (stop_sound_channel, <sound_channel_no>),
copy_position = 700 # (copy_position,<position_no_1>,<position_no_2>), copies position_no_2 to position_no_1
init_position = 701 # (init_position,<position_no>),
get_trigger_object_position = 702 # (get_trigger_object_position,<position_no>),
get_angle_between_positions = 705 # (get_angle_between_positions, <destination_fixed_point>, <position_no_1>, <position_no_2>),
position_has_line_of_sight_to_position = 707 # (position_has_line_of_sight_to_position, <position_no_1>, <position_no_2>),
get_distance_between_positions = 710 # (get_distance_between_positions,<destination>,<position_no_1>,<position_no_2>), gets distance in centimeters.
get_distance_between_positions_in_meters = 711 # (get_distance_between_positions_in_meters,<destination>,<position_no_1>,<position_no_2>), gets distance in meters.
get_sq_distance_between_positions = 712 # (get_sq_distance_between_positions,<destination>,<position_no_1>,<position_no_2>), gets squared distance in centimeters
get_sq_distance_between_positions_in_meters = 713 # (get_sq_distance_between_positions_in_meters,<destination>,<position_no_1>,<position_no_2>), gets squared distance in meters
position_is_behind_position = 714 # (position_is_behind_position,<position_no_1>,<position_no_2>),
get_sq_distance_between_position_heights = 715 # (get_sq_distance_between_position_heights,<destination>,<position_no_1>,<position_no_2>), gets squared distance in centimeters
position_transform_position_to_parent = 716 # (position_transform_position_to_parent,<dest_position_no>,<position_no>,<position_no_to_be_transformed>),
position_transform_position_to_local = 717 # (position_transform_position_to_local, <dest_position_no>,<position_no>,<position_no_to_be_transformed>),
position_copy_rotation = 718 # (position_copy_rotation,<position_no_1>,<position_no_2>), copies rotation of position_no_2 to position_no_1
position_copy_origin = 719 # (position_copy_origin,<position_no_1>,<position_no_2>), copies origin of position_no_2 to position_no_1
position_move_x = 720 # (position_move_x,<position_no>,<movement>,[value]), movement is in cms, [0 = local; 1=global]
position_move_y = 721 # (position_move_y,<position_no>,<movement>,[value]),
position_move_z = 722 # (position_move_z,<position_no>,<movement>,[value]),
position_rotate_x = 723 # (position_rotate_x,<position_no>,<angle>),
position_rotate_y = 724 # (position_rotate_y,<position_no>,<angle>),
position_rotate_z = 725 # (position_rotate_z,<position_no>,<angle>,[use_global_z_axis]), set use_global_z_axis as 1 if needed, otherwise you don't have to give that.
position_get_x = 726 # (position_get_x,<destination_fixed_point>,<position_no>), x position in meters * fixed point multiplier is returned
position_get_y = 727 # (position_get_y,<destination_fixed_point>,<position_no>), y position in meters * fixed point multiplier is returned
position_get_z = 728 # (position_get_z,<destination_fixed_point>,<position_no>), z position in meters * fixed point multiplier is returned
position_set_x = 729 # (position_set_x,<position_no>,<value_fixed_point>), meters / fixed point multiplier is set
position_set_y = 730 # (position_set_y,<position_no>,<value_fixed_point>), meters / fixed point multiplier is set
position_set_z = 731 # (position_set_z,<position_no>,<value_fixed_point>), meters / fixed point multiplier is set
position_get_scale_x = 735 # (position_get_scale_x,<destination_fixed_point>,<position_no>), x scale in meters * fixed point multiplier is returned
position_get_scale_y = 736 # (position_get_scale_y,<destination_fixed_point>,<position_no>), y scale in meters * fixed point multiplier is returned
position_get_scale_z = 737 # (position_get_scale_z,<destination_fixed_point>,<position_no>), z scale in meters * fixed point multiplier is returned
position_rotate_x_floating = 738 # (position_rotate_x_floating,<position_no>,<angle>), angle in degree * fixed point multiplier
position_rotate_y_floating = 739 # (position_rotate_y_floating,<position_no>,<angle>), angle in degree * fixed point multiplier
position_get_rotation_around_z = 740 # (position_get_rotation_around_z,<destination>,<position_no>), rotation around z axis is returned as angle
position_normalize_origin = 741 # (position_normalize_origin,<destination_fixed_point>,<position_no>),
# destination = convert_to_fixed_point(length(position.origin))
# position.origin *= 1/length(position.origin) so it normalizes the origin vector
position_get_rotation_around_x = 742 # (position_get_rotation_around_x, <destination>, <position_no>), rotation around x axis is returned as angle
position_get_rotation_around_y = 743 # (position_get_rotation_around_y, <destination>, <position_no>), rotation around y axis is returned as angle
position_set_scale_x = 744 # (position_set_scale_x, <position_no>, <value_fixed_point>), x scale in meters / fixed point multiplier is set
position_set_scale_y = 745 # (position_set_scale_y, <position_no>, <value_fixed_point>), y scale in meters / fixed point multiplier is set
position_set_scale_z = 746 # (position_set_scale_z, <position_no>, <value_fixed_point>), z scale in meters / fixed point multiplier is set
position_get_screen_projection = 750 # (position_get_screen_projection, <position_no_1>, <position_no_2>), returns screen projection of position_no_2 to position_no_1
position_set_z_to_ground_level = 791 # (position_set_z_to_ground_level, <position_no>), only works during a mission
position_get_distance_to_terrain = 792 # (position_get_distance_to_terrain, <destination>, <position_no>), only works during a mission
position_get_distance_to_ground_level = 793 # (position_get_distance_to_ground_level, <destination>, <position_no>), only works during a mission
start_presentation = 900 # (start_presentation, <presentation_id>),
start_background_presentation = 901 # (start_background_presentation, <presentation_id>), can only be used in game menus
presentation_set_duration = 902 # (presentation_set_duration, <duration-in-1/100-seconds>), there must be an active presentation
is_presentation_active = 903 # (is_presentation_active, <presentation_id),
create_text_overlay = 910 # (create_text_overlay, <destination>, <string_id>), returns overlay id
create_mesh_overlay = 911 # (create_mesh_overlay, <destination>, <mesh_id>), returns overlay id
create_button_overlay = 912 # (create_button_overlay, <destination>, <string_id>), returns overlay id
create_image_button_overlay = 913 # (create_image_button_overlay, <destination>, <mesh_id>, <mesh_id>), returns overlay id. second mesh is the pressed button mesh
create_slider_overlay = 914 # (create_slider_overlay, <destination>, <min_value>, <max_value>), returns overlay id
create_progress_overlay = 915 # (create_progress_overlay, <destination>, <min_value>, <max_value>), returns overlay id
create_combo_button_overlay = 916 # (create_combo_button_overlay, <destination>), returns overlay id
create_text_box_overlay = 917 # (create_text_box_overlay, <destination>), returns overlay id
create_check_box_overlay = 918 # (create_check_box_overlay, <destination>), returns overlay id
create_simple_text_box_overlay = 919 # (create_simple_text_box_overlay, <destination>), returns overlay id
overlay_set_text = 920 # (overlay_set_text, <overlay_id>, <string_id>),
overlay_set_color = 921 # (overlay_set_color, <overlay_id>, <color>), color in RGB format like 0xRRGGBB (put hexadecimal values for RR GG and BB parts)
overlay_set_alpha = 922 # (overlay_set_alpha, <overlay_id>, <alpha>), alpha in A format like 0xAA (put hexadecimal values for AA part)
overlay_set_hilight_color = 923 # (overlay_set_hilight_color, <overlay_id>, <color>), color in RGB format like 0xRRGGBB (put hexadecimal values for RR GG and BB parts)
overlay_set_hilight_alpha = 924 # (overlay_set_hilight_alpha, <overlay_id>, <alpha>), alpha in A format like 0xAA (put hexadecimal values for AA part)
overlay_set_size = 925 # (overlay_set_size, <overlay_id>, <position_no>), position's x and y values are used
overlay_set_position = 926 # (overlay_set_position, <overlay_id>, <position_no>), position's x and y values are used
overlay_set_val = 927 # (overlay_set_val, <overlay_id>, <value>), can be used for sliders, combo buttons and check boxes
overlay_set_boundaries = 928 # (overlay_set_boundaries, <overlay_id>, <min_value>, <max_value>),
overlay_set_area_size = 929 # (overlay_set_area_size, <overlay_id>, <position_no>), position's x and y values are used
overlay_set_mesh_rotation = 930 # (overlay_set_mesh_rotation, <overlay_id>, <position_no>), position's rotation values are used for rotations around x, y and z axis
overlay_add_item = 931 # (overlay_add_item, <overlay_id>, <string_id>), adds an item to the combo box
overlay_animate_to_color = 932 # (overlay_animate_to_color, <overlay_id>, <duration-in-1/1000-seconds>, <color>), alpha value will not be used
overlay_animate_to_alpha = 933 # (overlay_animate_to_alpha, <overlay_id>, <duration-in-1/1000-seconds>, <color>), only alpha value will be used
overlay_animate_to_highlight_color = 934 # (overlay_animate_to_highlight_color, <overlay_id>, <duration-in-1/1000-seconds>, <color>), alpha value will not be used
overlay_animate_to_highlight_alpha = 935 # (overlay_animate_to_highlight_alpha, <overlay_id>, <duration-in-1/1000-seconds>, <color>), only alpha value will be used
overlay_animate_to_size = 936 # (overlay_animate_to_size, <overlay_id>, <duration-in-1/1000-seconds>, <position_no>), position's x and y values are used as
overlay_animate_to_position = 937 # (overlay_animate_to_position, <overlay_id>, <duration-in-1/1000-seconds>, <position_no>), position's x and y values are used as
create_image_button_overlay_with_tableau_material = 938 # (create_image_button_overlay_with_tableau_material, <destination>, <mesh_id>, <tableau_material_id>, <value>),
# returns overlay id, value is passed to tableau_material, when mesh_id is -1, a default mesh is generated automatically
create_mesh_overlay_with_tableau_material = 939 # (create_mesh_overlay_with_tableau_material, <destination>, <mesh_id>, <tableau_material_id>, <value>),
# returns overlay id, value is passed to tableau_material, when mesh_id is -1, a default mesh is generated automatically
create_game_button_overlay = 940 # (create_game_button_overlay, <destination>, <string_id>), returns overlay id
create_in_game_button_overlay = 941 # (create_in_game_button_overlay, <destination>, <string_id>), returns overlay id
create_number_box_overlay = 942 # (create_number_box_overlay, <destination>, <min_value>, <max_value>), returns overlay id
create_listbox_overlay = 943 # (create_list_box_overlay, <destination>), returns overlay id
create_mesh_overlay_with_item_id = 944 # (create_mesh_overlay_with_item_id, <destination>, <item_id>), returns overlay id.
set_container_overlay = 945 # (set_container_overlay, <overlay_id>), sets the container overlay that new overlays will attach to. give -1 to reset
overlay_get_position = 946 # (overlay_get_position, <destination>, <overlay_id>)
overlay_set_display = 947 # (overlay_set_display, <overlay_id>, <value>), shows/hides overlay (1 = show, 0 = hide)
create_combo_label_overlay = 948 # (create_combo_label_overlay, <destination>), returns overlay id
overlay_obtain_focus = 949 # (overlay_obtain_focus, <overlay_id>), works for textboxes only
overlay_set_tooltip = 950 # (overlay_set_tooltip, <overlay_id>, <string_id>),
overlay_set_container_overlay = 951 # (overlay_set_container_overlay, <overlay_id>, <container_overlay_id>) -1 to reset
overlay_set_additional_render_height = 952 # (overlay_set_additional_render_height, <overlay_id>, <height_adder>),
show_object_details_overlay = 960 # (show_object_details_overlay, <value>), 0 = hide, 1 = show
show_item_details = 970 # (show_item_details, <item_id>, <position_no>, <show_default_text_or_not>)
# show_default_text_or_not should be 1 for showing "default" for default item costs
close_item_details = 971 # (close_item_details)
show_item_details_with_modifier = 972 # (show_item_details_with_modifier, <item_id>, <item_modifier>, <position_no>, <show_default_text_or_not>)
# show_default_text_or_not should be 1 for showing "default" for default item costs
context_menu_add_item = 980 # (right_mouse_menu_add_item, <string_id>, <value>), must be called only inside script_game_right_mouse_menu_get_buttons
get_average_game_difficulty = 990 # (get_average_game_difficulty, <destination>),
get_level_boundary = 991 # (get_level_boundary, <destination>, <level_no>),
#-----------------------------------------------------------------------------
# MISSION CONDITIONS
#-----------------------------------------------------------------------------
all_enemies_defeated = 1003 # (all_enemies_defeated),
race_completed_by_player = 1004 # (race_completed_by_player),
num_active_teams_le = 1005 # (num_active_teams_le,<value>),
main_hero_fallen = 1006 # (main_hero_fallen),
#-----------------------------------------------------------------------------
# NEGATIONS
#-----------------------------------------------------------------------------
neg = 0x80000000 # (neg|<operation>),
this_or_next = 0x40000000 # (this_or_next|<operation>),
lt = neg | ge # (lt,<value>,<value>), less than
neq = neg | eq # (neq,<value>,<value>), not equal to
le = neg | gt # (le,<value>,<value>), less than or equal to
#-----------------------------------------------------------------------------
# CONSEQUENCE OPERATIONS
#-----------------------------------------------------------------------------
finish_party_battle_mode = 1019 # (finish_party_battle_mode),
set_party_battle_mode = 1020 # (set_party_battle_mode),
set_camera_follow_party = 1021 # (set_camera_follow_party,<party_id>), works on map only.
start_map_conversation = 1025 # (start_map_conversation,<troop_id>),
rest_for_hours = 1030 # (rest_for_hours,<rest_period>,[time_speed],[remain_attackable]),
rest_for_hours_interactive = 1031 # (rest_for_hours_interactive,<rest_period>,[time_speed],[remain_attackable]),
add_xp_to_troop = 1062 # (add_xp_to_troop,<value>,[troop_id]),
add_gold_as_xp = 1063 # (add_gold_as_xp,<value>,[troop_id]),
add_xp_as_reward = 1064 # (add_xp_as_reward,<value>),
add_gold_to_party = 1070 # (add_gold_to_party,<value>,<party_id>), party_id should be different from 0
set_party_creation_random_limits = 1080 # (set_party_creation_random_limits, <min_value>, <max_value>), (values should be between 0, 100)
troop_set_note_available = 1095 # (troop_set_note_available, <troop_id>, <value>), 1 = available, 0 = not available
faction_set_note_available = 1096 # (faction_set_note_available, <faction_id>, <value>), 1 = available, 0 = not available
party_set_note_available = 1097 # (party_set_note_available, <party_id>, <value>), 1 = available, 0 = not available
quest_set_note_available = 1098 # (quest_set_note_available, <quest_id>, <value>), 1 = available, 0 = not available
spawn_around_party = 1100 # (spawn_around_party,<party_id>,<party_template_id>), id of spawned party is put into reg0
set_spawn_radius = 1103 # (set_spawn_radius,<value>),
display_debug_message = 1104 # (display_debug_message,<string_id>,[hex_colour_code]),
# displays message only in debug mode, but writes to rgl_log.txt in both release and debug modes when edit mode is enabled
display_log_message = 1105 # (display_log_message,<string_id>,[hex_colour_code]),
display_message = 1106 # (display_message,<string_id>,[hex_colour_code]),
set_show_messages = 1107 # (set_show_messages,<value>), 0 disables window messages 1 re-enables them.
add_troop_note_tableau_mesh = 1108 # (add_troop_note_tableau_mesh,<troop_id>,<tableau_material_id>),
add_faction_note_tableau_mesh = 1109 # (add_faction_note_tableau_mesh,<faction_id>,<tableau_material_id>),
add_party_note_tableau_mesh = 1110 # (add_party_note_tableau_mesh,<party_id>,<tableau_material_id>),
add_quest_note_tableau_mesh = 1111 # (add_quest_note_tableau_mesh,<quest_id>,<tableau_material_id>),
add_info_page_note_tableau_mesh = 1090 # (add_info_page_note_tableau_mesh,<info_page_id>,<tableau_material_id>),
add_troop_note_from_dialog = 1114 # (add_troop_note_from_dialog,<troop_id>,<note_slot_no>, <value>), there are maximum of 8 slots. value = 1 -> shows when the note is added
add_faction_note_from_dialog = 1115 # (add_faction_note_from_dialog,<faction_id>,<note_slot_no>, <value>), there are maximum of 8 slots value = 1 -> shows when the note is added
add_party_note_from_dialog = 1116 # (add_party_note_from_dialog,<party_id>,<note_slot_no>, <value>), there are maximum of 8 slots value = 1 -> shows when the note is added
add_quest_note_from_dialog = 1112 # (add_quest_note_from_dialog,<quest_id>,<note_slot_no>, <value>), there are maximum of 8 slots value = 1 -> shows when the note is added
add_info_page_note_from_dialog = 1091 # (add_info_page_note_from_dialog,<info_page_id>,<note_slot_no>, <value>), there are maximum of 8 slots value = 1 -> shows when the note is added
add_troop_note_from_sreg = 1117 # (add_troop_note_from_sreg,<troop_id>,<note_slot_no>,<string_id>, <value>), there are maximum of 8 slots value = 1 -> shows when the note is added
add_faction_note_from_sreg = 1118 # (add_faction_note_from_sreg,<faction_id>,<note_slot_no>,<string_id>, <value>), there are maximum of 8 slots value = 1 -> shows when the note is added
add_party_note_from_sreg = 1119 # (add_party_note_from_sreg,<party_id>,<note_slot_no>,<string_id>, <value>), there are maximum of 8 slots value = 1 -> shows when the note is added
add_quest_note_from_sreg = 1113 # (add_quest_note_from_sreg,<quest_id>,<note_slot_no>,<string_id>, <value>), there are maximum of 8 slots value = 1 -> shows when the note is added
add_info_page_note_from_sreg = 1092 # (add_info_page_note_from_sreg,<info_page_id>,<note_slot_no>,<string_id>, <value>), there are maximum of 8 slots value = 1 -> shows when the note is added
tutorial_box = 1120 # (tutorial_box,<string_id>,<string_id>), deprecated use dialog_box instead.
dialog_box = 1120 # (tutorial_box,<text_string_id>,<title_string_id>),
question_box = 1121 # (question_box,<string_id>, [<yes_string_id>], [<no_string_id>]),
tutorial_message = 1122 # (tutorial_message,<string_id>, <color>, , <auto_close_time>), set string_id = -1 for hiding the message
tutorial_message_set_position = 1123 # (tutorial_message_set_position, <position_x>, <position_y>),
tutorial_message_set_size = 1124 # (tutorial_message_set_size, <size_x>, <size_y>),
tutorial_message_set_center_justify = 1125 # (tutorial_message_set_center_justify, <val>), set not 0 for center justify, 0 for not center justify
tutorial_message_set_background = 1126 # (tutorial_message_set_background, <value>), 1 = on, 0 = off, default is off
set_tooltip_text = 1130 # (set_tooltip_text, <string_id>),
reset_price_rates = 1170 # (reset_price_rates),
set_price_rate_for_item = 1171 # (set_price_rate_for_item,<item_id>,<value_percentage>),
set_price_rate_for_item_type = 1172 # (set_price_rate_for_item_type,<item_type_id>,<value_percentage>),
party_join = 1201 # (party_join),
party_join_as_prisoner = 1202 # (party_join_as_prisoner),
troop_join = 1203 # (troop_join,<troop_id>),
troop_join_as_prisoner = 1204 # (troop_join_as_prisoner,<troop_id>),
remove_member_from_party = 1210 # (remove_member_from_party,<troop_id>,[party_id]),
remove_regular_prisoners = 1211 # (remove_regular_prisoners,<party_id>),
remove_troops_from_companions = 1215 # (remove_troops_from_companions,<troop_id>,<value>),
remove_troops_from_prisoners = 1216 # (remove_troops_from_prisoners,<troop_id>,<value>),
heal_party = 1225 # (heal_party,<party_id>),
disable_party = 1230 # (disable_party,<party_id>),
enable_party = 1231 # (enable_party,<party_id>),
remove_party = 1232 # (remove_party,<party_id>),
add_companion_party = 1233 # (add_companion_party,<troop_id_hero>),
add_troop_to_site = 1250 # (add_troop_to_site,<troop_id>,<scene_id>,<entry_no>),
remove_troop_from_site = 1251 # (remove_troop_from_site,<troop_id>,<scene_id>),
modify_visitors_at_site = 1261 # (modify_visitors_at_site,<scene_id>),
reset_visitors = 1262 # (reset_visitors),
set_visitor = 1263 # (set_visitor,<entry_no>,<troop_id>,[<dna>]),
set_visitors = 1264 # (set_visitors,<entry_no>,<troop_id>,<number_of_troops>),
add_visitors_to_current_scene = 1265 # (add_visitors_to_current_scene,<entry_no>,<troop_id>,<number_of_troops>,<team_no>,<group_no>),
# team no and group no are used in multiplayer mode only. default team in entry is used in single player mode
scene_set_day_time = 1266 # (scene_set_day_time, <value>), value in hours (0-23), must be called within ti_before_mission_start triggers
set_relation = 1270 # (set_relation,<faction_id>,<faction_id>,<value>),
faction_set_name = 1275 # (faction_set_name, <faction_id>, <string_id>),
faction_set_color = 1276 # (faction_set_color, <faction_id>, <value>),
faction_get_color = 1277 # (faction_get_color, <color>, <faction_id>)
start_quest = 1280 # (start_quest,<quest_id>),
complete_quest = 1281 # (complete_quest,<quest_id>),
succeed_quest = 1282 # (succeed_quest,<quest_id>), also concludes the quest
fail_quest = 1283 # (fail_quest,<quest_id>), also concludes the quest
cancel_quest = 1284 # (cancel_quest,<quest_id>),
set_quest_progression = 1285 # (set_quest_progression,<quest_id>,<value>),
conclude_quest = 1286 # (conclude_quest,<quest_id>),
setup_quest_text = 1290 # (setup_quest_text,<quest_id>),
setup_quest_giver = 1291 # (setup_quest_giver,<quest_id>, <string_id>),
start_encounter = 1300 # (start_encounter,<party_id>),
leave_encounter = 1301 # (leave_encounter),
encounter_attack = 1302 # (encounter_attack),
select_enemy = 1303 # (select_enemy,<value>),
set_passage_menu = 1304 # (set_passage_menu,<value>),
auto_set_meta_mission_at_end_commited = 1305 # (auto_set_meta_mission_at_end_commited),
end_current_battle = 1307 # (end_current_battle),
set_mercenary_source_party = 1320 # (set_mercenary_source_party,<party_id>), selects party from which to buy mercenaries
set_merchandise_modifier_quality = 1490 # (set_merchandise_modifier_quality,<value>),quality rate in percentage (average quality = 100)
set_merchandise_max_value = 1491 # (set_merchandise_max_value,<value>),
reset_item_probabilities = 1492 # (reset_item_probabilities),
set_item_probability_in_merchandise = 1493 # (set_item_probability_in_merchandise,<itm_id>,<value>),
troop_set_name = 1501 # (troop_set_name, <troop_id>, <string_no>),
troop_set_plural_name = 1502 # (troop_set_plural_name, <troop_id>, <string_no>),
troop_set_face_key_from_current_profile = 1503 # (troop_set_face_key_from_current_profile, <troop_id>),
troop_set_type = 1505 # (troop_set_type,<troop_id>,<gender>),
troop_get_type = 1506 # (troop_get_type,<destination>,<troop_id>),
troop_is_hero = 1507 # (troop_is_hero,<troop_id>),
troop_is_wounded = 1508 # (troop_is_wounded,<troop_id>), only for heroes!
troop_set_auto_equip = 1509 # (troop_set_auto_equip,<troop_id>,<value>), disables otr enables auto-equipping
troop_ensure_inventory_space = 1510 # (troop_ensure_inventory_space,<troop_id>,<value>),
troop_sort_inventory = 1511 # (troop_sort_inventory,<troop_id>),
troop_add_merchandise = 1512 # (troop_add_merchandise,<troop_id>,<item_type_id>,<value>),
troop_add_merchandise_with_faction = 1513 # (troop_add_merchandise_with_faction,<troop_id>,<faction_id>,<item_type_id>,<value>), faction_id is given to check if troop is eligible to produce that item
troop_get_xp = 1515 # (troop_get_xp, <destination>, <troop_id>),
troop_get_class = 1516 # (troop_get_class, <destination>, <troop_id>),
troop_set_class = 1517 # (troop_set_class, <troop_id>, <value>),
troop_raise_attribute = 1520 # (troop_raise_attribute,<troop_id>,<attribute_id>,<value>),
troop_raise_skill = 1521 # (troop_raise_skill,<troop_id>,<skill_id>,<value>),
troop_raise_proficiency = 1522 # (troop_raise_proficiency,<troop_id>,<proficiency_no>,<value>),
troop_raise_proficiency_linear = 1523 # (troop_raise_proficiency_linear,<troop_id>,<proficiency_no>,<value>),
# raises weapon proficiencies linearly without being limited by weapon master skill
troop_add_proficiency_points = 1525 # (troop_add_proficiency_points,<troop_id>,<value>),
troop_add_gold = 1528 # (troop_add_gold,<troop_id>,<value>),
troop_remove_gold = 1529 # (troop_remove_gold,<troop_id>,<value>),
troop_add_item = 1530 # (troop_add_item,<troop_id>,<item_id>,[modifier]),
troop_remove_item = 1531 # (troop_remove_item,<troop_id>,<item_id>),
troop_clear_inventory = 1532 # (troop_clear_inventory,<troop_id>),
troop_equip_items = 1533 # (troop_equip_items,<troop_id>), equips the items in the inventory automatically
troop_inventory_slot_set_item_amount = 1534 # (troop_inventory_slot_set_item_amount,<troop_id>,<inventory_slot_no>,<value>),
troop_inventory_slot_get_item_amount = 1537 # (troop_inventory_slot_get_item_amount,<destination>,<troop_id>,<inventory_slot_no>),
troop_inventory_slot_get_item_max_amount = 1538 # (troop_inventory_slot_get_item_max_amount,<destination>,<troop_id>,<inventory_slot_no>),
troop_add_items = 1535 # (troop_add_items,<troop_id>,<item_id>,<number>),
troop_remove_items = 1536 # (troop_remove_items,<troop_id>,<item_id>,<number>), puts cost of items to reg0
troop_loot_troop = 1539 # (troop_loot_troop,<target_troop>,<source_troop_id>,<probability>),
troop_get_inventory_capacity = 1540 # (troop_get_inventory_capacity,<destination>,<troop_id>),
troop_get_inventory_slot = 1541 # (troop_get_inventory_slot,<destination>,<troop_id>,<inventory_slot_no>),
troop_get_inventory_slot_modifier = 1542 # (troop_get_inventory_slot_modifier,<destination>,<troop_id>,<inventory_slot_no>),
troop_set_inventory_slot = 1543 # (troop_set_inventory_slot,<troop_id>,<inventory_slot_no>,<value>),
troop_set_inventory_slot_modifier = 1544 # (troop_set_inventory_slot_modifier,<troop_id>,<inventory_slot_no>,<value>),
troop_set_faction = 1550 # (troop_set_faction,<troop_id>,<faction_id>),
troop_set_age = 1555 # (troop_set_age, <troop_id>, <age_slider_pos>), enter a value between 0..100
troop_set_health = 1560 # (troop_set_health,<troop_id>,<relative health (0-100)>),
troop_get_upgrade_troop = 1561 # (troop_get_upgrade_troop,<destination>,<troop_id>,<upgrade_path>), upgrade_path can be: 0 = get first node, 1 = get second node (returns -1 if not available)
item_get_type = 1570 # (item_get_type, <destination>, <item_id>), returned values are listed at header_items.py (values starting with itp_type_)
party_get_num_companions = 1601 # (party_get_num_companions,<destination>,<party_id>),
party_get_num_prisoners = 1602 # (party_get_num_prisoners,<destination>,<party_id>),
party_set_flags = 1603 # (party_set_flag, <party_id>, <flag>, <clear_or_set>), sets flags like pf_default_behavior. see header_parties.py for flags.
party_set_marshall = 1604 # (party_set_marshall, <party_id>, <value>)
party_set_extra_text = 1605 # (party_set_extra_text,<party_id>, <string>)
party_set_aggressiveness = 1606 # (party_set_aggressiveness, <party_id>, <number>),
party_set_courage = 1607 # (party_set_courage, <party_id>, <number>),
party_get_current_terrain = 1608 # (party_get_current_terrain,<destination>,<party_id>),
party_get_template_id = 1609 # (party_get_template_id,<destination>,<party_id>),
party_add_members = 1610 # (party_add_members,<party_id>,<troop_id>,<number>), returns number added in reg0
party_add_prisoners = 1611 # (party_add_prisoners,<party_id>,<troop_id>,<number>), returns number added in reg0
party_add_leader = 1612 # (party_add_leader,<party_id>,<troop_id>,[<number>]),
party_force_add_members = 1613 # (party_force_add_members,<party_id>,<troop_id>,<number>),
party_force_add_prisoners = 1614 # (party_force_add_prisoners,<party_id>,<troop_id>,<number>),
party_remove_members = 1615 # (party_remove_members,<party_id>,<troop_id>,<number>), stores number removed to reg0
party_remove_prisoners = 1616 # (party_remove_prisoners,<party_id>,<troop_id>,<number>), stores number removed to reg0
party_clear = 1617 # (party_clear,<party_id>),
party_wound_members = 1618 # (party_wound_members,<party_id>,<troop_id>,<number>),
party_remove_members_wounded_first = 1619 # (party_remove_members_wounded_first,<party_id>,<troop_id>,<number>), stores number removed to reg0
party_set_faction = 1620 # (party_set_faction,<party_id>,<faction_id>),
party_relocate_near_party = 1623 # (party_relocate_near_party,<party_id>,<target_party_id>,<value_spawn_radius>),
party_get_position = 1625 # (party_get_position,<position_no>,<party_id>),
party_set_position = 1626 # (party_set_position,<party_id>,<position_no>),
map_get_random_position_around_position = 1627 # (map_get_random_position_around_position,<dest_position_no>,<source_position_no>,<radius>),
map_get_land_position_around_position = 1628 # (map_get_land_position_around_position,<dest_position_no>,<source_position_no>,<radius>),
map_get_water_position_around_position = 1629 # (map_get_water_position_around_position,<dest_position_no>,<source_position_no>,<radius>),
party_count_members_of_type = 1630 # (party_count_members_of_type,<destination>,<party_id>,<troop_id>),
party_count_companions_of_type = 1631 # (party_count_companions_of_type,<destination>,<party_id>,<troop_id>),
party_count_prisoners_of_type = 1632 # (party_count_prisoners_of_type,<destination>,<party_id>,<troop_id>),
party_get_free_companions_capacity = 1633 # (party_get_free_companions_capacity,<destination>,<party_id>),
party_get_free_prisoners_capacity = 1634 # (party_get_free_prisoners_capacity,<destination>,<party_id>),
party_get_ai_initiative = 1638 # (party_get_ai_initiative,<destination>,<party_id>), result is between 0-100
party_set_ai_initiative = 1639 # (party_set_ai_initiative,<party_id>,<value>), value is between 0-100
party_set_ai_behavior = 1640 # (party_set_ai_behavior,<party_id>,<ai_bhvr>),
party_set_ai_object = 1641 # (party_set_ai_object,<party_id>,<party_id>),
party_set_ai_target_position = 1642 # (party_set_ai_target_position,<party_id>,<position_no>),
party_set_ai_patrol_radius = 1643 # (party_set_ai_patrol_radius,<party_id>,<radius_in_km>),
party_ignore_player = 1644 # (party_ignore_player, <party_id>,<duration_in_hours>), don't pursue player party for this duration
party_set_bandit_attraction = 1645 # (party_set_bandit_attraction, <party_id>,<attaraction>), set how attractive a target the party is for bandits (0..100)
party_get_helpfulness = 1646 # (party_get_helpfulness,<destination>,<party_id>),
party_set_helpfulness = 1647 # (party_set_helpfulness, <party_id>, <number>), tendency to help friendly parties under attack. (0-10000, 100 default.)
party_get_num_companion_stacks = 1650 # (party_get_num_companion_stacks,<destination>,<party_id>),
party_get_num_prisoner_stacks = 1651 # (party_get_num_prisoner_stacks, <destination>,<party_id>),
party_stack_get_troop_id = 1652 # (party_stack_get_troop_id, <destination>,<party_id>,<stack_no>),
party_stack_get_size = 1653 # (party_stack_get_size, <destination>,<party_id>,<stack_no>),
party_stack_get_num_wounded = 1654 # (party_stack_get_num_wounded, <destination>,<party_id>,<stack_no>),
party_stack_get_troop_dna = 1655 # (party_stack_get_troop_dna, <destination>,<party_id>,<stack_no>),
party_prisoner_stack_get_troop_id = 1656 # (party_get_prisoner_stack_troop,<destination>,<party_id>,<stack_no>),
party_prisoner_stack_get_size = 1657 # (party_get_prisoner_stack_size, <destination>,<party_id>,<stack_no>),
party_prisoner_stack_get_troop_dna = 1658 # (party_prisoner_stack_get_troop_dna, <destination>,<party_id>,<stack_no>),
party_attach_to_party = 1660 # (party_attach_to_party, <party_id>, <party_id to attach to>),
party_detach = 1661 # (party_detach, <party_id>),
party_collect_attachments_to_party = 1662 # (party_collect_attachments_to_party, <party_id>, <destination party_id>),
party_quick_attach_to_current_battle = 1663 # (party_quick_attach_to_current_battle, <party_id>, <side (0:players side, 1:enemy side)>),
party_get_cur_town = 1665 # (party_get_cur_town, <destination>, <party_id>),
party_leave_cur_battle = 1666 # (party_leave_cur_battle, <party_id>),
party_set_next_battle_simulation_time = 1667 # (party_set_next_battle_simulation_time,<party_id>,<next_simulation_time_in_hours>),
party_set_name = 1669 # (party_set_name, <party_id>, <string_no>),
party_add_xp_to_stack = 1670 # (party_add_xp_to_stack, <party_id>, <stack_no>, <xp_amount>),
party_get_morale = 1671 # (party_get_morale, <destination>,<party_id>),
party_set_morale = 1672 # (party_set_morale, <party_id>, <value>), value is clamped to range [0...100].
party_upgrade_with_xp = 1673 # (party_upgrade_with_xp, <party_id>, <xp_amount>, <upgrade_path>), upgrade_path can be: 0 = random, 1 = first, 2 = second
party_add_xp = 1674 # (party_add_xp, <party_id>, <xp_amount>),
party_add_template = 1675 # (party_add_template, <party_id>, <party_template_id>, [reverse_prisoner_status]),
party_set_icon = 1676 # (party_set_icon, <party_id>, <map_icon_id>),
party_set_banner_icon = 1677 # (party_set_banner_icon, <party_id>, <map_icon_id>),
party_add_particle_system = 1678 # (party_add_particle_system, <party_id>, <particle_system_id>),
party_clear_particle_systems = 1679 # (party_clear_particle_systems, <party_id>),
party_get_battle_opponent = 1680 # (party_get_battle_opponent, <destination>, <party_id>)
party_get_icon = 1681 # (party_get_icon, <destination>, <party_id>),
party_set_extra_icon = 1682 # (party_set_extra_icon, <party_id>, <map_icon_id>, <up_down_distance_fixed_point>, <up_down_frequency_fixed_point>, <rotate_frequency_fixed_point>, <fade_in_out_frequency_fixed_point>), frequencies are in number of revolutions per second
party_get_skill_level = 1685 # (party_get_skill_level, <destination>, <party_id>, <skill_no>),
get_battle_advantage = 1690 # (get_battle_advantage, <destination>),
set_battle_advantage = 1691 # (set_battle_advantage, <value>),
party_get_attached_to = 1694 # (party_get_attached_to, <destination>, <party_id>),
party_get_num_attached_parties = 1695 # (party_get_num_attached_parties, <destination>, <party_id>),
party_get_attached_party_with_rank = 1696 # (party_get_attached_party_with_rank, <destination>, <party_id>, <attached_party_no>),
inflict_casualties_to_party_group = 1697 # (inflict_casualties_to_party, <parent_party_id>, <attack_rounds>, <party_id_to_add_causalties_to>),
distribute_party_among_party_group = 1698 # (distribute_party_among_party_group, <party_to_be_distributed>, <group_root_party>),
get_player_agent_no = 1700 # (get_player_agent_no,<destination>),
get_player_agent_kill_count = 1701 # (get_player_agent_kill_count,<destination>,[get_wounded]), set second value to non-zero to get wounded count. returns lifetime kill counts
agent_is_alive = 1702 # (agent_is_alive,<agent_id>),
agent_is_wounded = 1703 # (agent_is_wounded,<agent_id>),
agent_is_human = 1704 # (agent_is_human,<agent_id>),
get_player_agent_own_troop_kill_count = 1705 # (get_player_agent_own_troop_kill_count,<destination>,[get_wounded]), set second value to non-zero to get wounded count
agent_is_ally = 1706 # (agent_is_ally,<agent_id>),
agent_is_non_player = 1707 # (agent_is_non_player, <agent_id>),
agent_is_defender = 1708 # (agent_is_defender,<agent_id>),
agent_is_routed = 1699 # (agent_is_routed,<agent_id>),
agent_is_in_special_mode = 1693 # (agent_is_in_special_mode,<agent_id>),
agent_get_look_position = 1709 # (agent_get_look_position, <position_no>, <agent_id>),
agent_get_position = 1710 # (agent_get_position,<position_no>,<agent_id>),
agent_set_position = 1711 # (agent_set_position,<agent_id>,<position_no>),
agent_get_speed = 1689 # (agent_get_speed, <position_no>, <agent_id>), will return speed in x and y
agent_is_active = 1712 # (agent_is_active,<agent_id>),
agent_set_look_target_agent = 1713 # (agent_set_look_target_agent, <agent_id>, <agent_id>), second agent_id is the target
agent_get_horse = 1714 # (agent_get_horse,<destination>,<agent_id>),
agent_get_rider = 1715 # (agent_get_rider,<destination>,<agent_id>),
agent_get_party_id = 1716 # (agent_get_party_id,<destination>,<agent_id>),
agent_get_entry_no = 1717 # (agent_get_entry_no,<destination>,<agent_id>),
agent_get_troop_id = 1718 # (agent_get_troop_id,<destination>, <agent_id>),
agent_get_item_id = 1719 # (agent_get_item_id,<destination>, <agent_id>), (works only for horses, returns -1 otherwise)
store_agent_hit_points = 1720 # (store_agent_hit_points,<destination>,<agent_id>,[absolute]),
# set absolute to 1 to retrieve actual hps, otherwise will return relative hp in range [0..100]
agent_set_hit_points = 1721 # (agent_set_hit_points,<agent_id>,<value>,[absolute]),
# set absolute to 1 if value is absolute, otherwise value will be treated as relative number in range [0..100]
agent_deliver_damage_to_agent = 1722 # (agent_deliver_damage_to_agent, <agent_id_deliverer>, <agent_id>, <value>, [item_id]),
# if value <= 0, then damage will be calculated using the weapon item. item_id is the item that the damage is delivered. can be ignored.
agent_get_kill_count = 1723 # (agent_get_kill_count,<destination>,<agent_id>,[get_wounded]), set second value to non-zero to get wounded count
agent_get_player_id = 1724 # (agent_get_player_id,<destination>,<agent_id>),
agent_set_invulnerable_shield = 1725 # (agent_set_invulnerable_shield, <agent_id>),
agent_get_wielded_item = 1726 # (agent_get_wielded_item,<destination>,<agent_id>,<hand_no>),
agent_get_ammo = 1727 # (agent_get_ammo,<destination>,<agent_id>, <value>), value = 1 gets ammo for wielded item, value = 0 gets ammo for all items
agent_get_ammo_for_slot = 1825 # (agent_get_ammo_for_slot, <destination>, <agent_id>, <slot_no>), slot no can be between 0-3 (weapon slots)
agent_refill_ammo = 1728 # (agent_refill_ammo,<agent_id>),
agent_refill_wielded_shield_hit_points = 1692 # (agent_refill_wielded_shield_hit_points, <agent_id>),
agent_has_item_equipped = 1729 # (agent_has_item_equipped,<agent_id>,<item_id>),
agent_set_scripted_destination = 1730 # (agent_set_scripted_destination,<agent_id>,<position_no>,<auto_set_z_to_ground_level>), auto_set_z_to_ground_level can be 0 (false) or 1 (true)
agent_get_scripted_destination = 1731 # (agent_get_scripted_destination,<position_no>,<agent_id>),
agent_force_rethink = 1732 # (agent_force_rethink, <agent_id>),
agent_set_no_death_knock_down_only = 1733 # (agent_set_no_death_knock_down_only, <agent_id>, <value>), 0 for disable, 1 for enable
agent_set_horse_speed_factor = 1734 # (agent_set_horse_speed_factor, <agent_id>, <speed_multiplier-in-1/100>),
agent_clear_scripted_mode = 1735 # (agent_clear_scripted_mode,<agent_id>),
agent_set_speed_limit = 1736 # (agent_set_speed_limit,<agent_id>,<speed_limit(kilometers/hour)>), affects ai only
agent_ai_set_always_attack_in_melee = 1737 # (agent_ai_set_always_attack_in_melee, <agent_id>,<value>), to be used in sieges so that agents don't wait on the ladder.
agent_get_simple_behavior = 1738 # (agent_get_simple_behavior, <destination>, <agent_id>), constants are written in header_mission_templates.py, starting with aisb_
agent_get_combat_state = 1739 # (agent_get_combat_state, <destination>, <agent_id>),
agent_set_animation = 1740 # (agent_set_animation, <agent_id>, <anim_id>, [channel_no]), channel_no default is 0. top body only animations should have channel_no value as 1.
agent_set_stand_animation = 1741 # (agent_set_stand_action, <agent_id>, <anim_id>),
agent_set_walk_forward_animation = 1742 # (agent_set_walk_forward_action, <agent_id>, <anim_id>),
agent_set_animation_progress = 1743 # (agent_set_animation_progress, <agent_id>, <value_fixed_point>), value should be between 0-1 (as fixed point)
agent_set_look_target_position = 1744 # (agent_set_look_target_position, <agent_id>, <position_no>),
agent_set_attack_action = 1745 # (agent_set_attack_action, <agent_id>, <value>, <value>), value: -2 = clear any attack action, 0 = thrust, 1 = slashright, 2 = slashleft, 3 = overswing - second value 0 = ready and release, 1 = ready and hold
agent_set_defend_action = 1746 # (agent_set_defend_action, <agent_id>, <value>, <duration-in-1/1000-seconds>), value: -2 = clear any defend action, 0 = defend_down, 1 = defend_right, 2 = defend_left, 3 = defend_up
agent_set_wielded_item = 1747 # (agent_set_wielded_item, <agent_id>, <item_id>),
agent_set_scripted_destination_no_attack = 1748 # (agent_set_scripted_destination_no_attack,<agent_id>,<position_no>,<auto_set_z_to_ground_level>), auto_set_z_to_ground_level can be 0 (false) or 1 (true)
agent_fade_out = 1749 # (agent_fade_out, <agent_id>),
agent_play_sound = 1750 # (agent_play_sound, <agent_id>, <sound_id>),
agent_start_running_away = 1751 # (agent_start_running_away, <agent_id>, [position_no]), if position_no is entered, agent will run away to that location; pos0 will be ignored.
agent_stop_running_away = 1752 # (agent_stop_run_away, <agent_id>),
agent_ai_set_aggressiveness = 1753 # (agent_ai_set_aggressiveness, <agent_id>, <value>), 100 is the default aggressiveness. higher the value, less likely to run back
agent_set_kick_allowed = 1754 # (agent_set_kick_allowed, <agent_id>, <value>), 0 for disable, 1 for allow
remove_agent = 1755 # (remove_agent, <agent_id>),
agent_get_attached_scene_prop = 1756 # (agent_get_attached_scene_prop, <destination>, <agent_id>)
agent_set_attached_scene_prop = 1757 # (agent_set_attached_scene_prop, <agent_id>, <scene_prop_id>)
agent_set_attached_scene_prop_x = 1758 # (agent_set_attached_scene_prop_x, <agent_id>, <value>)
agent_set_attached_scene_prop_y = 1809 # (agent_set_attached_scene_prop_y, <agent_id>, <value>)
agent_set_attached_scene_prop_z = 1759 # (agent_set_attached_scene_prop_z, <agent_id>, <value>)
agent_get_time_elapsed_since_removed = 1760 # (agent_get_time_elapsed_since_dead, <destination>, <agent_id>),
agent_get_number_of_enemies_following = 1761 # (agent_get_number_of_enemies_following, <destination>, <agent_id>),
agent_set_no_dynamics = 1762 # (agent_set_no_dynamics, <agent_id>, <value>), 0 = turn dynamics off, 1 = turn dynamics on (required for cut-scenes)
agent_get_attack_action = 1763 # (agent_get_attack_action, <destination>, <agent_id>), free = 0, readying_attack = 1, releasing_attack = 2, completing_attack_after_hit = 3, attack_parried = 4, reloading = 5, after_release = 6, cancelling_attack = 7
agent_get_defend_action = 1764 # (agent_get_defend_action, <destination>, <agent_id>), free = 0, parrying = 1, blocking = 2
agent_get_group = 1765 # (agent_get_group, <destination>, <agent_id>),
agent_set_group = 1766 # (agent_set_group, <agent_id>, <value>),
agent_get_action_dir = 1767 # (agent_get_action_dir, <destination>, <agent_id>), invalid = -1, down = 0, right = 1, left = 2, up = 3
agent_get_animation = 1768 # (agent_get_animation, <destination>, <agent_id>, <body_part), 0 = lower body part, 1 = upper body part
agent_is_in_parried_animation = 1769 # (agent_is_in_parried_animation, <agent_id>),
agent_get_team = 1770 # (agent_get_team ,<destination>, <agent_id>),
agent_set_team = 1771 # (agent_set_team , <agent_id>, <value>),
agent_get_class = 1772 # (agent_get_class ,<destination>, <agent_id>),
agent_get_division = 1773 # (agent_get_division ,<destination>, <agent_id>),
agent_unequip_item = 1774 # (agent_unequip_item, <agent_id>, <item_id>, [weapon_slot_no]),
# weapon_slot_no is optional, and can be between 1-4 (used only for weapons, not armor). in either case, item_id has to be set correctly.
class_is_listening_order = 1775 # (class_is_listening_order, <team_no>, <sub_class>),
agent_set_ammo = 1776 # (agent_set_ammo,<agent_id>,<item_id>,<value>), value = a number between 0 and maximum ammo
agent_add_offer_with_timeout = 1777 # (agent_add_offer_with_timeout, <agent_id>, <agent_id>, <duration-in-1/1000-seconds>),
# second agent_id is offerer, 0 value for duration is an infinite offer
agent_check_offer_from_agent = 1778 # (agent_check_offer_from_agent, <agent_id>, <agent_id>), second agent_id is offerer
agent_equip_item = 1779 # (agent_equip_item, <agent_id>, <item_id>, [weapon_slot_no]), for weapons, agent needs to have an empty weapon slot.
# weapon_slot_no is optional, and can be between 1-4 (used only for weapons, not armor).
entry_point_get_position = 1780 # (entry_point_get_position, <position_no>, <entry_no>),
entry_point_set_position = 1781 # (entry_point_set_position, <entry_no>, <position_no>),
entry_point_is_auto_generated = 1782 # (entry_point_is_auto_generated, <entry_no>),
agent_set_division = 1783 # (agent_set_division, <agent_id>, <value>),
team_get_hold_fire_order = 1784 # (team_get_hold_fire_order, <destination>, <team_no>, <sub_class>),
team_get_movement_order = 1785 # (team_get_movement_order, <destination>, <team_no>, <sub_class>),
team_get_riding_order = 1786 # (team_get_riding_order, <destination>, <team_no>, <sub_class>),
team_get_weapon_usage_order = 1787 # (team_get_weapon_usage_order, <destination>, <team_no>, <sub_class>),
teams_are_enemies = 1788 # (teams_are_enemies, <team_no>, <team_no_2>),
team_give_order = 1790 # (team_give_order, <team_no>, <sub_class>, <order_id>),
team_set_order_position = 1791 # (team_set_order_position, <team_no>, <sub_class>, <position_no>),
team_get_leader = 1792 # (team_get_leader, <destination>, <team_no>),
team_set_leader = 1793 # (team_set_leader, <team_no>, <new_leader_agent_id>),
team_get_order_position = 1794 # (team_get_order_position, <position_no>, <team_no>, <sub_class>),
team_set_order_listener = 1795 # (team_set_order_listener, <team_no>, <sub_class>, <merge_with_old_listeners>), clear listeners if sub_class is less than zero
team_set_relation = 1796 # (team_set_relation, <team_no>, <team_no_2>, <value>), -1 for enemy, 1 for friend, 0 for neutral
set_rain = 1797 # (set_rain,<rain-type>,<strength>), (rain_type: 1= rain, 2=snow ; strength: 0 - 100)
set_fog_distance = 1798 # (set_fog_distance, <distance_in_meters>, [fog_color]),
get_scene_boundaries = 1799 # (get_scene_boundaries, <position_min>, <position_max>),
scene_prop_enable_after_time = 1800 # (scene_prop_enable_after_time, <scene_prop_id>, <value>)
scene_prop_has_agent_on_it = 1801 # (scene_prop_has_agent_on_it, <scene_prop_id>, <agent_id>)
agent_clear_relations_with_agents = 1802 # (agent_clear_relations_with_agents, <agent_id>),
agent_add_relation_with_agent = 1803 # (agent_add_relation_with_agent, <agent_id>, <agent_id>, <value>), -1 = enemy, 0 = neutral (no friendly fire at all), 1 = ally
agent_get_item_slot = 1804 # (agent_get_item_slot, <destination>, <agent_id>, <equip_slot>), equip slots are defined in header_items starting with ek_
ai_mesh_face_group_show_hide = 1805 # (ai_mesh_face_group_show_hide, <group_no>, <value>), 1 for enable, 0 for disable
agent_is_alarmed = 1806 # (agent_is_alarmed, <agent_id>),
agent_set_is_alarmed = 1807 # (agent_set_is_alarmed, <agent_id>, <value>), 1 for enable, 0 for disable
agent_stop_sound = 1808 # (agent_stop_sound, <agent_id>),
scene_prop_get_num_instances = 1810 # (scene_prop_get_num_instances, <destination>, <scene_prop_id>),
scene_prop_get_instance = 1811 # (scene_prop_get_instance, <destination>, <scene_prop_id>, <instance_no>),
scene_prop_get_visibility = 1812 # (scene_prop_get_visibility, <destination>, <scene_prop_id>),
scene_prop_set_visibility = 1813 # (scene_prop_set_visibility, <scene_prop_id>, <value>),
scene_prop_set_hit_points = 1814 # (scene_prop_set_hit_points, <scene_prop_id>, <value>),
scene_prop_get_hit_points = 1815 # (scene_prop_get_hit_points, <destination>, <scene_prop_id>),
scene_prop_get_max_hit_points = 1816 # (scene_prop_get_max_hit_points, <destination>, <scene_prop_id>),
scene_prop_get_team = 1817 # (scene_prop_get_team, <value>, <scene_prop_id>),
scene_prop_set_team = 1818 # (scene_prop_set_team, <scene_prop_id>, <value>),
scene_prop_set_prune_time = 1819 # (scene_prop_set_prune_time, <scene_prop_id>, <value>),
# prune time can only be set to objects that are already on the prune queue. static objects are not affected by this operation.
scene_prop_set_cur_hit_points = 1820 # (scene_prop_set_cur_hit_points, <scene_prop_id>, <value>),
scene_prop_fade_out = 1822 # (scene_prop_fade_out, <scene_prop_id>, <fade_out_time>),
scene_prop_fade_in = 1823 # (scene_prop_fade_in, <scene_prop_id>, <fade_in_time>),
agent_is_in_line_of_sight = 1826 # (agent_is_in_line_of_sight, <agent_id>, <position_no>), rotation of the position register is not used.
agent_deliver_damage_to_agent_advanced = 1827 # (agent_deliver_damage_to_agent_advanced, <destination>, <agent_id_deliverer>, <agent_id>, <value>, [item_id]),
# if value <= 0, then damage will be calculated using the weapon item. item_id is the item that the damage is delivered. can be ignored.
# this advanced mode of agent_deliver_damage_to_agent has 2 differences. 1- the delivered damage is returned. 2- the damage delivery is done after checking the relationship between agents. this might cause no damage, or even damage to the shooter agent because of a friendly fire.
team_get_gap_distance = 1828 # (team_get_gap_distance, <destination>, <team_no>, <sub_class>),
add_missile = 1829 # (add_missile, <agent_id>, <starting_position>, <starting_speed_fixed_point>, <weapon_item_id>, <weapon_item_modifier>, <missile_item_id>, <missile_item_modifier>), starting position also contains the direction of the arrow
scene_item_get_num_instances = 1830 # (scene_item_get_num_instances, <destination>, <item_id>),
scene_item_get_instance = 1831 # (scene_item_get_instance, <destination>, <item_id>, <instance_no>),
scene_spawned_item_get_num_instances = 1832 # (scene_spawned_item_get_num_instances, <destination>, <item_id>),
scene_spawned_item_get_instance = 1833 # (scene_spawned_item_get_instance, <destination>, <item_id>, <instance_no>),
scene_allows_mounted_units = 1834 # (scene_allows_mounted_units),
class_set_name = 1837 # (class_set_name, <sub_class>, <string_id>),
prop_instance_is_valid = 1838 # (prop_instance_is_valid, <scene_prop_id>),
prop_instance_get_variation_id = 1840 # (prop_instance_get_variation_id, <destination>, <scene_prop_id>),
prop_instance_get_variation_id_2 = 1841 # (prop_instance_get_variation_id_2, <destination>, <scene_prop_id>),
prop_instance_get_position = 1850 # (prop_instance_get_position, <position_no>, <scene_prop_id>),
prop_instance_get_starting_position = 1851 # (prop_instance_get_starting_position, <position_no>, <scene_prop_id>),
prop_instance_get_scale = 1852 # (prop_instance_get_scale, <position_no>, <scene_prop_id>),
prop_instance_get_scene_prop_kind = 1853 # (prop_instance_get_scene_prop_type, <destination>, <scene_prop_id>)
prop_instance_set_scale = 1854 # (prop_instance_set_scale, <scene_prop_id>, <value_x_fixed_point>, <value_y_fixed_point>, <value_z_fixed_point>),
prop_instance_set_position = 1855 # (prop_instance_set_position, <scene_prop_id>, <position_no>, [dont_send_to_clients]),
# dont_send_to_clients default is 0 - if you are just doing some physics checks with scene props, then set to 1 and don't send to clients
prop_instance_animate_to_position = 1860 # (prop_instance_animate_to_position, <scene_prop_id>, position, <duration-in-1/100-seconds>),
prop_instance_stop_animating = 1861 # (prop_instance_stop_animating, <scene_prop_id>),
prop_instance_is_animating = 1862 # (prop_instance_is_animating, <destination>, <scene_prop_id>),
prop_instance_get_animation_target_position = 1863 # (prop_instance_get_animation_target_position, <pos>, <scene_prop_id>)
prop_instance_enable_physics = 1864 # (prop_instance_enable_physics, <scene_prop_id>, <value>) 0 for disable, 1 for enable
prop_instance_rotate_to_position = 1865 # (prop_instance_rotate_to_position, <scene_prop_id>, position, <duration-in-1/100-seconds>, <total_rotate_angle>),
prop_instance_initialize_rotation_angles = 1866 # (prop_instance_initialize_rotation_angles, <scene_prop_id>),
prop_instance_refill_hit_points = 1870 # (prop_instance_refill_hit_points, <scene_prop_id>),
prop_instance_dynamics_set_properties = 1871 # (prop_instance_dynamics_set_properties,<scene_prop_id>,mass_friction),
prop_instance_dynamics_set_velocity = 1872 # (prop_instance_dynamics_set_velocity,<scene_prop_id>,linear_velocity),
prop_instance_dynamics_set_omega = 1873 # (prop_instance_dynamics_set_omega,<scene_prop_id>,angular_velocity),
prop_instance_dynamics_apply_impulse = 1874 # (prop_instance_dynamics_apply_impulse,<scene_prop_id>,impulse_force),
prop_instance_receive_damage = 1877 # (prop_instance_receive_damage, <scene_prop_id>, <agent_id>, <damage_value>),
prop_instance_intersects_with_prop_instance = 1880 # (prop_instance_intersects_with_prop_instance, <scene_prop_id>, <scene_prop_id>), give second scene_prop_id as -1 to check all scene props.
# cannot check polygon-to-polygon physics models, but can check any other combinations between sphere, capsule and polygon physics models.
prop_instance_play_sound = 1881 # (prop_instance_play_sound, <scene_prop_id>, <sound_id>, [flags]), sound flags can be given
prop_instance_stop_sound = 1882 # (prop_instance_stop_sound, <scene_prop_id>),
prop_instance_clear_attached_missiles = 1885 # (prop_instance_clear_attached_missiles, <scene_prop_id>), works only with dynamic scene props (non-retrievable missiles)
prop_instance_add_particle_system = 1886 # (prop_instance_add_particle_system, <scene_prop_id>, <par_sys_id>, <position_no>), position is local, not global.
prop_instance_stop_all_particle_systems = 1887 # (prop_instance_stop_all_particle_systems, <scene_prop_id>),
replace_prop_instance = 1889 # (replace_prop_instance, <scene_prop_id>, <new_scene_prop_id>),
replace_scene_props = 1890 # (replace_scene_props, <old_scene_prop_id>,<new_scene_prop_id>),
replace_scene_items_with_scene_props = 1891 # (replace_scene_items_with_scene_props, <old_item_id>,<new_scene_prop_id>),
#-----------------------------------------------------------------------------
# MISSION CONSEQUENCES
#-----------------------------------------------------------------------------
set_mission_result = 1906 # (set_mission_result,<value>),
finish_mission = 1907 # (finish_mission, <delay_in_seconds>),
jump_to_scene = 1910 # (jump_to_scene,<scene_id>,<entry_no>),
set_jump_mission = 1911 # (set_jump_mission,<mission_template_id>),
set_jump_entry = 1912 # (set_jump_entry,<entry_no>),
start_mission_conversation = 1920 # (start_mission_conversation,<troop_id>),
add_reinforcements_to_entry = 1930 # (add_reinforcements_to_entry,<mission_template_entry_no>,<value>),
mission_enable_talk = 1935 # (mission_enable_talk), can talk with troops during battles
mission_disable_talk = 1936 # (mission_disable_talk), disables talk option for the mission
mission_tpl_entry_set_override_flags = 1940 # (mission_entry_set_override_flags, <mission_template_id>, <entry_no>, <value>),
mission_tpl_entry_clear_override_items = 1941 # (mission_entry_clear_override_items, <mission_template_id>, <entry_no>),
mission_tpl_entry_add_override_item = 1942 # (mission_entry_add_override_item, <mission_template_id>, <entry_no>, <item_kind_id>),
set_current_color = 1950 # (set_current_color,<red>,<green>,<blue>), a value of 255 means 100%
set_position_delta = 1955 # (set_position_delta,<x_value>,<y_value>,<z_value>),
add_point_light = 1960 # (add_point_light,[flicker_magnitude],[flicker_interval]), flicker_magnitude between 0 and 100, flicker_interval is in 1/100 seconds
add_point_light_to_entity = 1961 # (add_point_light_to_entity,[flicker_magnitude],[flicker_interval]), flicker_magnitude between 0 and 100, flicker_interval is in 1/100 seconds
particle_system_add_new = 1965 # (particle_system_add_new,<par_sys_id>,[position_no]),
particle_system_emit = 1968 # (particle_system_emit,<par_sys_id>,<value_num_particles>,<value_period>),
particle_system_burst = 1969 # (particle_system_burst,<par_sys_id>,<position_no>,[percentage_burst_strength]),
set_spawn_position = 1970 # (set_spawn_position, <position_no>)
spawn_item = 1971 # (spawn_item, <item_kind_id>, <item_modifier>, [seconds_before_pruning]) if seconds_before_pruning = 0 then item never gets pruned
spawn_agent = 1972 # (spawn_agent,<troop_id>), (stores agent_id in reg0)
spawn_horse = 1973 # (spawn_horse,<item_kind_id>, <item_modifier>) (stores agent_id in reg0)
spawn_scene_prop = 1974 # (spawn_scene_prop, <scene_prop_id>) (stores prop_instance_id in reg0) not yet.
particle_system_burst_no_sync = 1975 # (particle_system_burst_without_sync,<par_sys_id>,<position_no>,[percentage_burst_strength]),
spawn_item_without_refill = 1976 # (spawn_item_without_refill, <item_kind_id>, <item_modifier>, [seconds_before_pruning]), if seconds_before_pruning = 0 then item never gets pruned
agent_get_item_cur_ammo = 1977 # (agent_get_item_cur_ammo, <destination>, <agent_id>, <slot_no>),
cur_tableau_add_tableau_mesh = 1980 # (cur_tableau_add_tableau_mesh, <tableau_material_id>, <value>, <position_register_no>), value is passed to tableau_material
cur_item_set_tableau_material = 1981 # (cur_item_set_tableu_material, <tableau_material_id>, <instance_code>), only call inside ti_on_init_item in module_items
cur_scene_prop_set_tableau_material = 1982 # (cur_scene_prop_set_tableau_material, <tableau_material_id>, <instance_code>), only call inside ti_on_init_scene_prop in module_scene_props
cur_map_icon_set_tableau_material = 1983 # (cur_map_icon_set_tableau_material, <tableau_material_id>, <instance_code>), only call inside ti_on_init_map_icon in module_scene_props
cur_tableau_render_as_alpha_mask = 1984 # (cur_tableau_render_as_alpha_mask)
cur_tableau_set_background_color = 1985 # (cur_tableau_set_background_color, <value>),
cur_agent_set_banner_tableau_material = 1986 # (cur_agent_set_banner_tableau_material, <tableau_material_id>)
cur_tableau_set_ambient_light = 1987 # (cur_tableau_set_ambient_light, <red_fixed_point>, <green_fixed_point>, <blue_fixed_point>),
cur_tableau_set_camera_position = 1988 # (cur_tableau_set_camera_position, <position_no>),
cur_tableau_set_camera_parameters = 1989 # (cur_tableau_set_camera_parameters,<is_perspective>,<camera_width*1000>, <camera_height*1000>, <camera_near*1000>, <camera_far*1000>),
cur_tableau_add_point_light = 1990 # (cur_tableau_add_point_light, <map_icon_id>, <position_no>, <red_fixed_point>, <green_fixed_point>, <blue_fixed_point>),
cur_tableau_add_sun_light = 1991 # (cur_tableau_add_sun_light, <map_icon_id>, <position_no>, <red_fixed_point>, <green_fixed_point>, <blue_fixed_point>),
cur_tableau_add_mesh = 1992 # (cur_tableau_add_mesh, <mesh_id>, <position_no>, <value_fixed_point>, <value_fixed_point>),
# first value fixed point is the scale factor, second value fixed point is alpha. use 0 for default values
cur_tableau_add_mesh_with_vertex_color = 1993 # (cur_tableau_add_mesh_with_vertex_color, <mesh_id>, <position_no>, <value_fixed_point>, <value_fixed_point>, <value>),
# first value fixed point is the scale factor, second value fixed point is alpha. value is vertex color. use 0 for default values. vertex_color has no default value.
cur_tableau_add_map_icon = 1994 # (cur_tableau_add_map_icon, <map_icon_id>, <position_no>, <value_fixed_point>), value fixed point is the scale factor
cur_tableau_add_troop = 1995 # (cur_tableau_add_troop, <troop_id>, <position_no>, <animation_id>, <instance_no>),
# if instance_no value is 0 or less, then the face is not generated randomly (important for heroes)
cur_tableau_add_horse = 1996 # (cur_tableau_add_horse, <item_id>, <position_no>, <animation_id>),
cur_tableau_set_override_flags = 1997 # (cur_tableau_set_override_flags, <value>),
cur_tableau_clear_override_items = 1998 # (cur_tableau_clear_override_items),
cur_tableau_add_override_item = 1999 # (cur_tableau_add_override_item, <item_kind_id>),
cur_tableau_add_mesh_with_scale_and_vertex_color = 2000 # (cur_tableau_add_mesh_with_scale_and_vertex_color, <mesh_id>, <position_no>, <position_no>, <value_fixed_point>, <value>),
# second position_no is x,y,z scale factors (with fixed point values). value fixed point is alpha. value is vertex color. use 0 for default values. scale and vertex_color has no default values.
mission_cam_set_mode = 2001 # (mission_cam_set_mode, <mission_cam_mode>, <duration-in-1/1000-seconds>, <value>)
# when leaving manual mode, duration defines the animation time from the initial position to the new position.
# set as 0 for instant camera position update. if value = 0, then camera velocity will be linear, else it will be non-linear
mission_get_time_speed = 2002 # (mission_get_time_speed, <destination_fixed_point>),
mission_set_time_speed = 2003 # (mission_set_time_speed, <value_fixed_point>) this works only when cheat mode is enabled
mission_time_speed_move_to_value = 2004 # (mission_speed_move_to_value, <value_fixed_point>, <duration-in-1/1000-seconds>) this works only when cheat mode is enabled
mission_set_duel_mode = 2006 # (mission_set_duel_mode, <value>), value: 0 = off, 1 = on
mission_cam_set_screen_color = 2008 # (mission_cam_set_screen_color, <value>), value is color together with alpha
mission_cam_animate_to_screen_color = 2009 # (mission_cam_animate_to_screen_color, <value>, <duration-in-1/1000-seconds>), value is color together with alpha
mission_cam_get_position = 2010 # (mission_cam_get_position, <position_register_no>)
mission_cam_set_position = 2011 # (mission_cam_set_position, <position_register_no>)
mission_cam_animate_to_position = 2012 # (mission_cam_animate_to_position, <position_register_no>, <duration-in-1/1000-seconds>, <value>)
# if value = 0, then camera velocity will be linear. else it will be non-linear
mission_cam_get_aperture = 2013 # (mission_cam_get_aperture, <destination>)
mission_cam_set_aperture = 2014 # (mission_cam_set_aperture, <value>)
mission_cam_animate_to_aperture = 2015 # (mission_cam_animate_to_aperture, <value>, <duration-in-1/1000-seconds>, <value>)
# if value = 0, then camera velocity will be linear. else it will be non-linear
mission_cam_animate_to_position_and_aperture = 2016 # (mission_cam_animate_to_position_and_aperture, <position_register_no>, <value>, <duration-in-1/1000-seconds>, <value>)
# if value = 0, then camera velocity will be linear. else it will be non-linear
mission_cam_set_target_agent = 2017 # (mission_cam_set_target_agent, <agent_id>, <value>) if value = 0 then do not use agent's rotation, else use agent's rotation
mission_cam_clear_target_agent = 2018 # (mission_cam_clear_target_agent)
mission_cam_set_animation = 2019 # (mission_cam_set_animation, <anim_id>),
talk_info_show = 2020 # (talk_info_show, <hide_or_show>) :0=hide 1=show
talk_info_set_relation_bar = 2021 # (talk_info_set_relation_bar, <value>) set relation bar to a value between -100 to 100, enter an invalid value to hide the bar.
talk_info_set_line = 2022 # (talk_info_set_line, <line_no>, <string_no>)
set_background_mesh = 2031 # (set_background_mesh, <mesh_id>),
set_game_menu_tableau_mesh = 2032 # (set_game_menu_tableau_mesh, <tableau_material_id>, <value>, <position_register_no>), value is passed to tableau_material
# position contains the following information: x = x position of the mesh, y = y position of the mesh, z = scale of the mesh
change_screen_return = 2040 # (change_screen_return),
change_screen_loot = 2041 # (change_screen_loot, <troop_id>),
change_screen_trade = 2042 # (change_screen_trade, <troop_id>),
change_screen_exchange_members = 2043 # (change_screen_exchange_members, [0,1 = exchange_leader], [party_id]), if party id is not given, current party will be used
change_screen_trade_prisoners = 2044 # (change_screen_trade_prisoners),
change_screen_buy_mercenaries = 2045 # (change_screen_buy_mercenaries),
change_screen_view_character = 2046 # (change_screen_view_character),
change_screen_training = 2047 # (change_screen_training),
change_screen_mission = 2048 # (change_screen_mission),
change_screen_map_conversation = 2049 # (change_screen_map_conversation, <troop_id>),
change_screen_exchange_with_party = 2050 # (change_screen_exchange_with_party, <party_id>),
change_screen_equip_other = 2051 # (change_screen_equip_other, <troop_id>),
change_screen_map = 2052 # (change_screen_map),
change_screen_notes = 2053 # (change_screen_notes, <note_type>, <object_id>), Note type can be 1 = troops, 2 = factions, 3 = parties, 4 = quests, 5 = info_pages
change_screen_quit = 2055 # (change_screen_quit),
change_screen_give_members = 2056 # (change_screen_give_members, [party_id]), if party id is not given, current party will be used
change_screen_controls = 2057 # (change_screen_controls),
change_screen_options = 2058 # (change_screen_options),
jump_to_menu = 2060 # (jump_to_menu,<menu_id>),
disable_menu_option = 2061 # (disable_menu_option),
store_trigger_param = 2070 # (store_trigger_param, <destination>, <trigger_param_no>),
store_trigger_param_1 = 2071 # (store_trigger_param_1,<destination>),
store_trigger_param_2 = 2072 # (store_trigger_param_2,<destination>),
store_trigger_param_3 = 2073 # (store_trigger_param_3,<destination>),
set_trigger_result = 2075 # (set_trigger_result, <value>),
agent_ai_get_look_target = 2080 # (agent_ai_get_look_target, <destination>, <agent_id>),
agent_ai_get_move_target = 2081 # (agent_ai_get_move_target, <destination>, <agent_id>),
agent_ai_get_behavior_target = 2082 # (agent_ai_get_behavior_target, <destination>, <agent_id>),
agent_ai_set_can_crouch = 2083 # (agent_ai_set_can_crouch, <agent_id>, <value>), 0 for false, 1 for true.
agent_set_max_hit_points = 2090 # (agent_set_max_hit_points,<agent_id>,<value>,[absolute]), set absolute to 1 if value is absolute, otherwise value will be treated as relative number in range [0..100]
agent_set_damage_modifier = 2091 # (agent_set_damage_modifier, <agent_id>, <value>), value is in percentage, 100 is default
agent_set_accuracy_modifier = 2092 # (agent_set_accuracy_modifier, <agent_id>, <value>), value is in percentage, 100 is default, value can be between [0..1000]
agent_set_speed_modifier = 2093 # (agent_set_speed_modifier, <agent_id>, <value>), value is in percentage, 100 is default, value can be between [0..1000]
agent_set_reload_speed_modifier = 2094 # (agent_set_reload_speed_modifier, <agent_id>, <value>), value is in percentage, 100 is default, value can be between [0..1000]
agent_set_use_speed_modifier = 2095 # (agent_set_use_speed_modifier, <agent_id>, <value>), value is in percentage, 100 is default, value can be between [0..1000]
agent_set_visibility = 2096 # (agent_set_visibility, <agent_id>, <value>), 0 for invisible, 1 for visible.
agent_get_crouch_mode = 2097 # (agent_ai_get_crouch_mode, <destination>, <agent_id>),
agent_set_crouch_mode = 2098 # (agent_ai_set_crouch_mode, <agent_id>, <value>), 0 for false, 1 for true.
agent_set_ranged_damage_modifier = 2099 # (agent_set_ranged_damage_modifier, <agent_id>, <value>), value is in percentage, 100 is default
val_lshift = 2100 # (val_lshift, <destination>, <value>), shifts the bits of destination to left by value amount.
val_rshift = 2101 # (val_rshift, <destination>, <value>), shifts the bits of destination to right by value amount.
val_add = 2105 # (val_add,<destination>,<value>), destination = destination + value
val_sub = 2106 # (val_sub,<destination>,<value>), destination = destination - value
val_mul = 2107 # (val_mul,<destination>,<value>), destination = destination * value
val_div = 2108 # (val_div,<destination>,<value>), destination = destination / value
val_mod = 2109 # (val_mod,<destination>,<value>), destination = destination % value
val_min = 2110 # (val_min,<destination>,<value>), destination = min(destination, value)
val_max = 2111 # (val_max,<destination>,<value>), destination = max(destination, value)
val_clamp = 2112 # (val_clamp,<destination>,<lower_bound>, <upper_bound>), destination = max(min(destination,<upper_bound> - 1),<lower_bound>)
val_abs = 2113 # (val_abs,<destination>), destination = abs(value)
val_or = 2114 # (val_or,<destination>,<value>), destination = destination | value
val_and = 2115 # (val_and,<destination>,<value>), destination = destination & value
store_or = 2116 # (store_or,<destination>,<value_1>,<value_2>), destination = value_1 | value_2
store_and = 2117 # (store_and,<destination>,<value_1>,<value_2>), destination = value_1 & value_2
store_mod = 2119 # (store_mod,<destination>,<value_1>,<value_2>), destination = value_1 % value_2
store_add = 2120 # (store_add,<destination>,<value_1>,<value_2>), destination = value_1 + value_2
store_sub = 2121 # (store_sub,<destination>,<value_1>,<value_2>), destination = value_1 - value_2
store_mul = 2122 # (store_mul,<destination>,<value_1>,<value_2>), destination = value_1 * value_2
store_div = 2123 # (store_div,<destination>,<value_1>,<value_2>), destination = value_1 / value_2
set_fixed_point_multiplier = 2124 # (set_fixed_point_multiplier, <value>), sets precision of values named as value_fixed_point or destination_fixed_point, default is 100
store_sqrt = 2125 # (store_sqrt, <destination_fixed_point>, <value_fixed_point>), takes square root of the value
store_pow = 2126 # (store_pow, <destination_fixed_point>, <value_fixed_point>, <value_fixed_point), takes square root of the value
store_sin = 2127 # (store_sin, <destination_fixed_point>, <value_fixed_point>), takes sine of the value that is in degrees
store_cos = 2128 # (store_cos, <destination_fixed_point>, <value_fixed_point>), takes cosine of the value that is in degrees
store_tan = 2129 # (store_tan, <destination_fixed_point>, <value_fixed_point>), takes tangent of the value that is in degrees
convert_to_fixed_point = 2130 # (convert_to_fixed_point, <destination_fixed_point>), multiplies the value with the fixed point multiplier
convert_from_fixed_point = 2131 # (convert_from_fixed_point, <destination>), divides the value with the fixed point multiplier
assign = 2133 # (assign,<destination>,<value>), had to put this here so that it can be called from conditions.
shuffle_range = 2134 # (shuffle_range,<reg_no>,<reg_no>),
store_random = 2135 # (store_random,<destination>,<range_high>), depreciated: gets random number in range [0, range_high - 1]
store_random_in_range = 2136 # (store_random_in_range,<destination>,<range_low>,<range_high>), gets random number in range [range_low, range_high - 1]
store_asin = 2140 # (store_asin, <destination_fixed_point>, <value_fixed_point>),
store_acos = 2141 # (store_acos, <destination_fixed_point>, <value_fixed_point>),
store_atan = 2142 # (store_atan, <destination_fixed_point>, <value_fixed_point>),
store_atan2 = 2143 # (store_atan2, <destination_fixed_point>, <value_fixed_point>, <value_fixed_point>), first value is y, second is x
store_troop_gold = 2149 # (store_troop_gold,<destination>,<troop_id>),
store_num_free_stacks = 2154 # (store_num_free_stacks,<destination>,<party_id>),
store_num_free_prisoner_stacks = 2155 # (store_num_free_prisoner_stacks,<destination>,<party_id>),
store_party_size = 2156 # (store_party_size,<destination>,[party_id]),
store_party_size_wo_prisoners = 2157 # (store_party_size_wo_prisoners,<destination>,[party_id]),
store_troop_kind_count = 2158 # (store_troop_kind_count,<destination>,<troop_id>), deprecated: use party_count_members_of_type instead
store_num_regular_prisoners = 2159 # (store_mum_regular_prisoners,<destination>,<party_id>),
store_troop_count_companions = 2160 # (store_troop_count_companions,<destination>,<troop_id>,[party_id]),
store_troop_count_prisoners = 2161 # (store_troop_count_prisoners,<destination>,<troop_id>,[party_id]),
store_item_kind_count = 2165 # (store_item_kind_count,<destination>,<item_id>,[troop_id]),
store_free_inventory_capacity = 2167 # (store_free_inventory_capacity,<destination>,[troop_id]),
store_skill_level = 2170 # (store_skill_level,<destination>,<skill_id>,[troop_id]),
store_character_level = 2171 # (store_character_level,<destination>,[troop_id]),
store_attribute_level = 2172 # (store_attribute_level,<destination>,<troop_id>,<attribute_id>),
store_troop_faction = 2173 # (store_troop_faction,<destination>,<troop_id>),
store_faction_of_troop = 2173 # (store_troop_faction,<destination>,<troop_id>),
store_troop_health = 2175 # (store_troop_health,<destination>,<troop_id>,[absolute]),
# set absolute to 1 to get actual health; otherwise this will return percentage health in range (0-100)
store_proficiency_level = 2176 # (store_proficiency_level,<destination>,<troop_id>,<attribute_id>),
store_relation = 2190 # (store_relation,<destination>,<faction_id_1>,<faction_id_2>),
set_conversation_speaker_troop = 2197 # (set_conversation_speaker_troop, <troop_id>),
set_conversation_speaker_agent = 2198 # (set_conversation_speaker_troop, <agent_id>),
store_conversation_agent = 2199 # (store_conversation_agent,<destination>),
store_conversation_troop = 2200 # (store_conversation_troop,<destination>),
store_partner_faction = 2201 # (store_partner_faction,<destination>),
store_encountered_party = 2202 # (store_encountered_party,<destination>),
store_encountered_party2 = 2203 # (store_encountered_party2,<destination>),
store_faction_of_party = 2204 # (store_faction_of_party, <destination>, <party_id>),
set_encountered_party = 2205 # (set_encountered_party,<destination>),
store_current_scene = 2211 # (store_current_scene,<destination>),
store_zoom_amount = 2220 # (store_zoom_amount, <destination_fixed_point>),
set_zoom_amount = 2221 # (set_zoom_amount, <value_fixed_point>),
is_zoom_disabled = 2222 # (is_zoom_disabled),
store_item_value = 2230 # (store_item_value,<destination>,<item_id>),
store_troop_value = 2231 # (store_troop_value,<destination>,<troop_id>),
store_partner_quest = 2240 # (store_partner_quest,<destination>),
store_random_quest_in_range = 2250 # (store_random_quest_in_range,<destination>,<lower_bound>,<upper_bound>),
store_random_troop_to_raise = 2251 # (store_random_troop_to_raise,<destination>,<lower_bound>,<upper_bound>),
store_random_troop_to_capture = 2252 # (store_random_troop_to_capture,<destination>,<lower_bound>,<upper_bound>),
store_random_party_in_range = 2254 # (store_random_party_in_range,<destination>,<lower_bound>,<upper_bound>),
store01_random_parties_in_range = 2255 # (store01_random_parties_in_range,<lower_bound>,<upper_bound>), stores two random, different parties in a range to reg0 and reg1.
store_random_horse = 2257 # (store_random_horse,<destination>)
store_random_equipment = 2258 # (store_random_equipment,<destination>)
store_random_armor = 2259 # (store_random_armor,<destination>)
store_quest_number = 2261 # (store_quest_number,<destination>,<quest_id>),
store_quest_item = 2262 # (store_quest_item,<destination>,<item_id>),
store_quest_troop = 2263 # (store_quest_troop,<destination>,<troop_id>),
store_current_hours = 2270 # (store_current_hours,<destination>),
store_time_of_day = 2271 # (store_time_of_day,<destination>),
store_current_day = 2272 # (store_current_day,<destination>),
is_currently_night = 2273 # (is_currently_night),
store_distance_to_party_from_party = 2281 # (store_distance_to_party_from_party,<destination>,<party_id>,<party_id>),
get_party_ai_behavior = 2290 # (get_party_ai_behavior,<destination>,<party_id>),
get_party_ai_object = 2291 # (get_party_ai_object,<destination>,<party_id>),
party_get_ai_target_position = 2292 # (party_get_ai_target_position,<position_no>,<party_id>),
get_party_ai_current_behavior = 2293 # (get_party_ai_current_behavior,<destination>,<party_id>),
get_party_ai_current_object = 2294 # (get_party_ai_current_object,<destination>,<party_id>),
store_num_parties_created = 2300 # (store_num_parties_created,<destination>,<party_template_id>),
store_num_parties_destroyed = 2301 # (store_num_parties_destroyed,<destination>,<party_template_id>),
store_num_parties_destroyed_by_player = 2302 # (store_num_parties_destroyed_by_player,<destination>,<party_template_id>),
store_num_parties_of_template = 2310 # (store_num_parties_of_template,<destination>,<party_template_id>),
store_random_party_of_template = 2311 # (store_random_party_of_template,<destination>,<party_template_id>), fails if no party exists with tempolate_id (expensive)
str_is_empty = 2318 # (str_is_empty, <string_register>),
str_clear = 2319 # (str_clear, <string_register>)
str_store_string = 2320 # (str_store_string,<string_register>,<string_id>),
str_store_string_reg = 2321 # (str_store_string,<string_register>,<string_no>), copies one string register to another.
str_store_troop_name = 2322 # (str_store_troop_name,<string_register>,<troop_id>),
str_store_troop_name_plural = 2323 # (str_store_troop_name_plural,<string_register>,<troop_id>),
str_store_troop_name_by_count = 2324 # (str_store_troop_name_by_count,<string_register>,<troop_id>,<number>),
str_store_item_name = 2325 # (str_store_item_name,<string_register>,<item_id>),
str_store_item_name_plural = 2326 # (str_store_item_name_plural,<string_register>,<item_id>),
str_store_item_name_by_count = 2327 # (str_store_item_name_by_count,<string_register>,<item_id>),
str_store_party_name = 2330 # (str_store_party_name,<string_register>,<party_id>),
str_store_agent_name = 2332 # (str_store_agent_name,<string_register>,<agent_id>),
str_store_faction_name = 2335 # (str_store_faction_name,<string_register>,<faction_id>),
str_store_quest_name = 2336 # (str_store_quest_name,<string_register>,<quest_id>),
str_store_info_page_name = 2337 # (str_store_info_page_name,<string_register>,<info_page_id>),
str_store_date = 2340 # (str_store_date,<string_register>,<number_of_hours_to_add_to_the_current_date>),
str_store_troop_name_link = 2341 # (str_store_troop_name_link,<string_register>,<troop_id>),
str_store_party_name_link = 2342 # (str_store_party_name_link,<string_register>,<party_id>),
str_store_faction_name_link = 2343 # (str_store_faction_name_link,<string_register>,<faction_id>),
str_store_quest_name_link = 2344 # (str_store_quest_name_link,<string_register>,<quest_id>),
str_store_info_page_name_link = 2345 # (str_store_info_page_name_link,<string_register>,<info_page_id>),
str_store_class_name = 2346 # (str_store_class_name,<stribg_register>,<class_id>)
str_store_player_username = 2350 # (str_store_player_username,<string_register>,<player_id>), used in multiplayer mode only
str_store_server_password = 2351 # (str_store_server_password, <string_register>),
str_store_server_name = 2352 # (str_store_server_name, <string_register>),
str_store_welcome_message = 2353 # (str_store_welcome_message, <string_register>),
str_encode_url = 2355 # (str_encode_url, <string_register>),
store_remaining_team_no = 2360 # (store_remaining_team_no,<destination>),
store_mission_timer_a_msec = 2365 # (store_mission_timer_a_msec,<destination>),
store_mission_timer_b_msec = 2366 # (store_mission_timer_b_msec,<destination>),
store_mission_timer_c_msec = 2367 # (store_mission_timer_c_msec,<destination>),
store_mission_timer_a = 2370 # (store_mission_timer_a,<destination>),
store_mission_timer_b = 2371 # (store_mission_timer_b,<destination>),
store_mission_timer_c = 2372 # (store_mission_timer_c,<destination>),
reset_mission_timer_a = 2375 # (reset_mission_timer_a),
reset_mission_timer_b = 2376 # (reset_mission_timer_b),
reset_mission_timer_c = 2377 # (reset_mission_timer_c),
set_cheer_at_no_enemy = 2379 # (set_cheer_at_no_enemy, <value>), values: 0 = do not cheer (do as commander says), 1 = cheer
store_enemy_count = 2380 # (store_enemy_count,<destination>),
store_friend_count = 2381 # (store_friend_count,<destination>),
store_ally_count = 2382 # (store_ally_count,<destination>),
store_defender_count = 2383 # (store_defender_count,<destination>),
store_attacker_count = 2384 # (store_attacker_count,<destination>),
store_normalized_team_count = 2385 # (store_normalized_team_count,<destination>, <team_no>),
# counts the number of agents belonging to a team and normalizes the result regarding battle_size and advantage.
set_postfx = 2386 # (set_postfx,<value>),
set_river_shader_to_mud = 2387 # (set_river_shader_to_mud), changes river material for muddy env
show_troop_details = 2388 # (show_troop_details, <troop_id>, <position>, <troop_price>),
set_skybox = 2389 # (set_skybox, <non_hdr_skybox_index>, <hdr_skybox_index>), forces selected skybox for a scene, use -1 to disable
set_startup_sun_light = 2390 # (set_startup_sun_light, <r>, <g>, <b>), changes the sun light color
set_startup_ambient_light = 2391 # (set_startup_ambient_light, <r>, <g>, <b>), changes the ambient light color
set_startup_ground_ambient_light = 2392 # (set_startup_ground_ambient_light, <r>, <g>, <b>), changes the ground ambient light color
rebuild_shadow_map = 2393 # (rebuild_shadow_map),
set_shader_param_int = 2400 # (set_shader_param_int, <parameter_name>, <value>), sets the int shader parameter <parameter_name> to <value>
set_shader_param_float = 2401 # (set_shader_param_float, <parameter_name>, <value>), sets the float shader parameter <parameter_name> to <value>
set_shader_param_float4 = 2402 # (set_shader_param_float4, <parameter_name>, <value_x>, <value_y>, <value_z>, <value_w>),
# sets the float4 shader parameter <parameter_name> to <value_x/y/z/w>
set_shader_param_float4x4 = 2403 # (set_shader_param_float4x4, <parameter_name>, [0][0], [0][1], [0][2], [1][0], [1][1], [1][2], [2][0], [2][1], [2][2], [3][0], [3][1], [3][2]),
# sets the float4x4 shader parameter <parameter_name> to the given values. w components are 0001 by default.
opcode_names = dict((opcode, name) for name, opcode in globals().iteritems() if isinstance(opcode, int))
def get_opcode_name(opcode):
prefix = None
try:
if opcode & this_or_next:
prefix = "this_or_next|"
opcode ^= this_or_next
if opcode & neg:
prefix = prefix + "neg|" if prefix else "neg|"
opcode ^= neg
opname = opcode_names[opcode]
except (KeyError, TypeError):
opname = repr(opcode)
return prefix + opname if prefix else opname
def print_operations_block(block):
indent = 0
for operation in block:
if isinstance(operation, (tuple, list)):
opcode = operation[0]
operation_list = [get_opcode_name(opcode)] + [repr(entry) for entry in operation[1:]]
else:
opcode = operation
operation_list = [get_opcode_name(opcode)]
if opcode in (else_try, try_end) and indent > 0:
indent -= 1
print "{0}({1}),".format(" " * indent, ", ".join(operation_list))
if opcode in try_begin_operations or opcode == else_try:
indent += 1
lhs_operations = frozenset([
try_for_range,
try_for_range_backwards,
try_for_parties,
try_for_agents,
store_script_param_1,
store_script_param_2,
store_script_param,
store_repeat_object,
get_global_cloud_amount,
get_global_haze_amount,
options_get_damage_to_player,
options_get_damage_to_friends,
options_get_combat_ai,
options_get_campaign_ai,
options_get_combat_speed,
profile_get_banner_id,
get_achievement_stat,
get_max_players,
player_get_team_no,
player_get_troop_id,
player_get_agent_id,
player_get_gold,
multiplayer_get_my_team,
multiplayer_get_my_troop,
multiplayer_get_my_gold,
multiplayer_get_my_player,
player_get_score,
player_get_kill_count,
player_get_death_count,
player_get_ping,
player_get_is_muted,
player_get_unique_id,
player_get_gender,
player_get_item_id,
player_get_banner_id,
game_get_reduce_campaign_ai,
multiplayer_find_spawn_point,
team_get_bot_kill_count,
team_get_bot_death_count,
team_get_kill_count,
team_get_score,
team_get_faction,
player_get_value_of_original_items,
server_get_renaming_server_allowed,
server_get_changing_game_type_allowed,
server_get_friendly_fire,
server_get_control_block_dir,
server_get_combat_speed,
server_get_add_to_game_servers_list,
server_get_ghost_mode,
server_get_max_num_players,
server_get_melee_friendly_fire,
server_get_friendly_fire_damage_self_ratio,
server_get_friendly_fire_damage_friend_ratio,
server_get_anti_cheat,
troop_get_slot,
party_get_slot,
faction_get_slot,
scene_get_slot,
party_template_get_slot,
agent_get_slot,
quest_get_slot,
item_get_slot,
player_get_slot,
team_get_slot,
scene_prop_get_slot,
store_last_sound_channel,
get_angle_between_positions,
get_distance_between_positions,
get_distance_between_positions_in_meters,
get_sq_distance_between_positions,
get_sq_distance_between_positions_in_meters,
get_sq_distance_between_position_heights,
position_get_x,
position_get_y,
position_get_z,
position_get_scale_x,
position_get_scale_y,
position_get_scale_z,
position_get_rotation_around_z,
position_normalize_origin,
position_get_rotation_around_x,
position_get_rotation_around_y,
position_get_distance_to_terrain,
position_get_distance_to_ground_level,
create_text_overlay,
create_mesh_overlay,
create_button_overlay,
create_image_button_overlay,
create_slider_overlay,
create_progress_overlay,
create_combo_button_overlay,
create_text_box_overlay,
create_check_box_overlay,
create_simple_text_box_overlay,
create_image_button_overlay_with_tableau_material,
create_mesh_overlay_with_tableau_material,
create_game_button_overlay,
create_in_game_button_overlay,
create_number_box_overlay,
create_listbox_overlay,
create_mesh_overlay_with_item_id,
overlay_get_position,
create_combo_label_overlay,
get_average_game_difficulty,
get_level_boundary,
faction_get_color,
troop_get_type,
troop_get_xp,
troop_get_class,
troop_inventory_slot_get_item_amount,
troop_inventory_slot_get_item_max_amount,
troop_get_inventory_capacity,
troop_get_inventory_slot,
troop_get_inventory_slot_modifier,
troop_get_upgrade_troop,
item_get_type,
party_get_num_companions,
party_get_num_prisoners,
party_get_current_terrain,
party_get_template_id,
party_count_members_of_type,
party_count_companions_of_type,
party_count_prisoners_of_type,
party_get_free_companions_capacity,
party_get_free_prisoners_capacity,
party_get_helpfulness,
party_get_ai_initiative,
party_get_num_companion_stacks,
party_get_num_prisoner_stacks,
party_stack_get_troop_id,
party_stack_get_size,
party_stack_get_num_wounded,
party_stack_get_troop_dna,
party_prisoner_stack_get_troop_id,
party_prisoner_stack_get_size,
party_prisoner_stack_get_troop_dna,
party_get_cur_town,
party_get_morale,
party_get_battle_opponent,
party_get_icon,
party_get_skill_level,
get_battle_advantage,
party_get_attached_to,
party_get_num_attached_parties,
party_get_attached_party_with_rank,
get_player_agent_no,
get_player_agent_kill_count,
get_player_agent_own_troop_kill_count,
agent_get_horse,
agent_get_rider,
agent_get_party_id,
agent_get_entry_no,
agent_get_troop_id,
agent_get_item_id,
store_agent_hit_points,
agent_get_kill_count,
agent_get_player_id,
agent_get_wielded_item,
agent_get_ammo,
agent_get_simple_behavior,
agent_get_combat_state,
agent_get_attached_scene_prop,
agent_get_time_elapsed_since_removed,
agent_get_number_of_enemies_following,
agent_get_attack_action,
agent_get_defend_action,
agent_get_group,
agent_get_action_dir,
agent_get_animation,
agent_get_team,
agent_get_class,
agent_get_division,
team_get_hold_fire_order,
team_get_movement_order,
team_get_riding_order,
team_get_weapon_usage_order,
team_get_leader,
agent_get_item_slot,
scene_prop_get_num_instances,
scene_prop_get_instance,
scene_prop_get_visibility,
scene_prop_get_hit_points,
scene_prop_get_max_hit_points,
scene_prop_get_team,
agent_get_ammo_for_slot,
agent_deliver_damage_to_agent_advanced,
team_get_gap_distance,
scene_item_get_num_instances,
scene_item_get_instance,
scene_spawned_item_get_num_instances,
scene_spawned_item_get_instance,
prop_instance_get_variation_id,
prop_instance_get_variation_id_2,
prop_instance_get_position,
prop_instance_get_starting_position,
prop_instance_get_scale,
prop_instance_get_scene_prop_kind,
prop_instance_is_animating,
prop_instance_get_animation_target_position,
agent_get_item_cur_ammo,
mission_get_time_speed,
mission_cam_get_aperture,
store_trigger_param,
store_trigger_param_1,
store_trigger_param_2,
store_trigger_param_3,
agent_ai_get_look_target,
agent_ai_get_move_target,
agent_ai_get_behavior_target,
agent_get_crouch_mode,
store_or,
store_and,
store_mod,
store_add,
store_sub,
store_mul,
store_div,
store_sqrt,
store_pow,
store_sin,
store_cos,
store_tan,
assign,
store_random,
store_random_in_range,
store_asin,
store_acos,
store_atan,
store_atan2,
store_troop_gold,
store_num_free_stacks,
store_num_free_prisoner_stacks,
store_party_size,
store_party_size_wo_prisoners,
store_troop_kind_count,
store_num_regular_prisoners,
store_troop_count_companions,
store_troop_count_prisoners,
store_item_kind_count,
store_free_inventory_capacity,
store_skill_level,
store_character_level,
store_attribute_level,
store_troop_faction,
store_troop_health,
store_proficiency_level,
store_relation,
store_conversation_agent,
store_conversation_troop,
store_partner_faction,
store_encountered_party,
store_encountered_party2,
store_faction_of_party,
store_current_scene,
store_zoom_amount,
store_item_value,
store_troop_value,
store_partner_quest,
store_random_quest_in_range,
store_random_troop_to_raise,
store_random_troop_to_capture,
store_random_party_in_range,
store_random_horse,
store_random_equipment,
store_random_armor,
store_quest_number,
store_quest_item,
store_quest_troop,
store_current_hours,
store_time_of_day,
store_current_day,
store_distance_to_party_from_party,
get_party_ai_behavior,
get_party_ai_object,
get_party_ai_current_behavior,
get_party_ai_current_object,
store_num_parties_created,
store_num_parties_destroyed,
store_num_parties_destroyed_by_player,
store_num_parties_of_template,
store_random_party_of_template,
store_remaining_team_no,
store_mission_timer_a_msec,
store_mission_timer_b_msec,
store_mission_timer_c_msec,
store_mission_timer_a,
store_mission_timer_b,
store_mission_timer_c,
store_enemy_count,
store_friend_count,
store_ally_count,
store_defender_count,
store_attacker_count,
store_normalized_team_count,
])
global_lhs_operations = frozenset([
val_lshift,
val_rshift,
val_add,
val_sub,
val_mul,
val_div,
val_max,
val_min,
val_mod,
] + list(lhs_operations))
can_fail_operations = frozenset([
ge,
eq,
gt,
is_between,
entering_town,
map_free,
encountered_party_is_attacker,
conversation_screen_is_active,
troop_is_hero,
troop_is_wounded,
key_is_down,
key_clicked,
game_key_is_down,
game_key_clicked,
hero_can_join,
hero_can_join_as_prisoner,
party_can_join,
party_can_join_as_prisoner,
troops_can_join,
troops_can_join_as_prisoner,
party_can_join_party,
main_party_has_troop,
party_is_in_town,
party_is_in_any_town,
party_is_active,
player_has_item,
troop_has_item_equipped,
troop_is_mounted,
troop_is_guarantee_ranged,
troop_is_guarantee_horse,
player_is_active,
multiplayer_is_server,
multiplayer_is_dedicated_server,
game_in_multiplayer_mode,
player_is_admin,
player_is_busy_with_menus,
player_item_slot_is_picked_up,
check_quest_active,
check_quest_finished,
check_quest_succeeded,
check_quest_failed,
check_quest_concluded,
is_trial_version,
is_edit_mode_enabled,
troop_slot_eq,
party_slot_eq,
faction_slot_eq,
scene_slot_eq,
party_template_slot_eq,
agent_slot_eq,
quest_slot_eq,
item_slot_eq,
player_slot_eq,
team_slot_eq,
scene_prop_slot_eq,
troop_slot_ge,
party_slot_ge,
faction_slot_ge,
scene_slot_ge,
party_template_slot_ge,
agent_slot_ge,
quest_slot_ge,
item_slot_ge,
player_slot_ge,
team_slot_ge,
scene_prop_slot_ge,
position_has_line_of_sight_to_position,
position_is_behind_position,
is_presentation_active,
all_enemies_defeated,
race_completed_by_player,
num_active_teams_le,
main_hero_fallen,
lt,
neq,
le,
teams_are_enemies,
agent_is_alive,
agent_is_wounded,
agent_is_human,
agent_is_ally,
agent_is_non_player,
agent_is_defender,
agent_is_active,
agent_is_routed,
agent_is_in_special_mode,
agent_is_in_parried_animation,
class_is_listening_order,
agent_check_offer_from_agent,
entry_point_is_auto_generated,
scene_prop_has_agent_on_it,
agent_is_alarmed,
agent_is_in_line_of_sight,
scene_prop_get_instance,
scene_item_get_instance,
scene_spawned_item_get_instance,
scene_allows_mounted_units,
prop_instance_is_valid,
prop_instance_intersects_with_prop_instance,
agent_has_item_equipped,
map_get_land_position_around_position,
map_get_water_position_around_position,
is_zoom_disabled,
is_currently_night,
store_random_party_of_template,
str_is_empty,
])
try_begin_operations = frozenset([
try_begin,
try_for_range,
try_for_range_backwards,
try_for_parties,
try_for_agents,
])
|
{
"content_hash": "4aad6c70fbbbf22484fb2f7816c9bec6",
"timestamp": "",
"source": "github",
"line_count": 1667,
"max_line_length": 337,
"avg_line_length": 84.36772645470906,
"alnum_prop": 0.5673452264986739,
"repo_name": "qt911025/pw_module_system",
"id": "7bf412e0f1bcd275470bbce5d9a9da5a6facd751",
"size": "141015",
"binary": false,
"copies": "2",
"ref": "refs/heads/pw",
"path": "header_operations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import cPickle
import gzip
import shutil
import pprint
from os import listdir
from os.path import isfile, join
from PIL import Image
from resizeimage import resizeimage
import numpy as np
def readFilesName(path):
return [f for f in listdir(path) if isfile(join(path, f))]
def arrayOfPngFile(file):
im = Image.open(join("../data/cvl.str/", file), 'r')
im = resizeimage.resize_height(im, 28)
pixel_values = np.array(np.asarray(list(im.getdata())), dtype=np.uint8)
return [1 - (np.average(pixel) / 255.0) for pixel in pixel_values], im.size
def convert(pklSize):
fileList = readFilesName("../data/cvl.str")
cvlstr = ([arrayOfPngFile(file) for file in fileList[0:pklSize]],
[file.split("-",1)[0] for file in fileList[0:pklSize]])
output = open('../data/cvl' + pklSize.__str__() + '.str.pkl', 'w')
# Pickle dictionary using protocol 0.
cPickle.dump(cvlstr, output)
output.close()
with open('../data/cvl' + pklSize.__str__() + '.str.pkl', 'r') as f_in, gzip.open('../data/cvl' + pklSize.__str__() + '.str.pkl.gz', 'w') as f_out:
shutil.copyfileobj(f_in, f_out)
def readAndPrint():
pkl_file = open('../data/data.pkl', 'r')
data1, data2, data3= cPickle.load(pkl_file)
pprint.pprint(data1)
pprint.pprint(data2)
pprint.pprint(data3)
data2 = cPickle.load(pkl_file)
pprint.pprint(data2)
pkl_file.close()
def readMnistAndPrint():
f = gzip.open('../data/mnist.pkl.gz', 'r')
training_data, validation_data, test_data = cPickle.load(f)
pprint.pprint(training_data)
pprint.pprint(validation_data)
pprint.pprint(test_data)
convert(5)
|
{
"content_hash": "2dae490fc2579158365b9b1444d900ec",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 151,
"avg_line_length": 26.285714285714285,
"alnum_prop": 0.6497584541062802,
"repo_name": "avicorp/firstLook",
"id": "403cf9a5ed113a95bdbc4f492f444205b5afa19d",
"size": "1656",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cvl_to_gzip.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "88319"
}
],
"symlink_target": ""
}
|
"""Config flow for Netatmo."""
import logging
import uuid
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_SHOW_ON_MAP
from homeassistant.core import callback
from homeassistant.helpers import config_entry_oauth2_flow, config_validation as cv
from .const import (
CONF_AREA_NAME,
CONF_LAT_NE,
CONF_LAT_SW,
CONF_LON_NE,
CONF_LON_SW,
CONF_NEW_AREA,
CONF_PUBLIC_MODE,
CONF_UUID,
CONF_WEATHER_AREAS,
DOMAIN,
)
class NetatmoFlowHandler(
config_entry_oauth2_flow.AbstractOAuth2FlowHandler, domain=DOMAIN
):
"""Config flow to handle Netatmo OAuth2 authentication."""
DOMAIN = DOMAIN
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return NetatmoOptionsFlowHandler(config_entry)
@property
def logger(self) -> logging.Logger:
"""Return logger."""
return logging.getLogger(__name__)
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
scopes = [
"read_camera",
"read_homecoach",
"read_presence",
"read_smokedetector",
"read_station",
"read_thermostat",
"write_camera",
"write_presence",
"write_thermostat",
]
if self.flow_impl.name != "Home Assistant Cloud":
scopes.extend(["access_camera", "access_presence"])
scopes.sort()
return {"scope": " ".join(scopes)}
async def async_step_user(self, user_input=None):
"""Handle a flow start."""
await self.async_set_unique_id(DOMAIN)
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
return await super().async_step_user(user_input)
class NetatmoOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle Netatmo options."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize Netatmo options flow."""
self.config_entry = config_entry
self.options = dict(config_entry.options)
self.options.setdefault(CONF_WEATHER_AREAS, {})
async def async_step_init(self, user_input=None):
"""Manage the Netatmo options."""
return await self.async_step_public_weather_areas()
async def async_step_public_weather_areas(self, user_input=None):
"""Manage configuration of Netatmo public weather areas."""
errors = {}
if user_input is not None:
new_client = user_input.pop(CONF_NEW_AREA, None)
areas = user_input.pop(CONF_WEATHER_AREAS, None)
user_input[CONF_WEATHER_AREAS] = {
area: self.options[CONF_WEATHER_AREAS][area] for area in areas
}
self.options.update(user_input)
if new_client:
return await self.async_step_public_weather(
user_input={CONF_NEW_AREA: new_client}
)
return self._create_options_entry()
weather_areas = list(self.options[CONF_WEATHER_AREAS])
data_schema = vol.Schema(
{
vol.Optional(
CONF_WEATHER_AREAS,
default=weather_areas,
): cv.multi_select(weather_areas),
vol.Optional(CONF_NEW_AREA): str,
}
)
return self.async_show_form(
step_id="public_weather_areas",
data_schema=data_schema,
errors=errors,
)
async def async_step_public_weather(self, user_input=None):
"""Manage configuration of Netatmo public weather sensors."""
if user_input is not None and CONF_NEW_AREA not in user_input:
self.options[CONF_WEATHER_AREAS][
user_input[CONF_AREA_NAME]
] = fix_coordinates(user_input)
self.options[CONF_WEATHER_AREAS][user_input[CONF_AREA_NAME]][
CONF_UUID
] = str(uuid.uuid4())
return await self.async_step_public_weather_areas()
orig_options = self.config_entry.options.get(CONF_WEATHER_AREAS, {}).get(
user_input[CONF_NEW_AREA], {}
)
default_longitude = self.hass.config.longitude
default_latitude = self.hass.config.latitude
default_size = 0.04
data_schema = vol.Schema(
{
vol.Optional(CONF_AREA_NAME, default=user_input[CONF_NEW_AREA]): str,
vol.Optional(
CONF_LAT_NE,
default=orig_options.get(
CONF_LAT_NE, default_latitude + default_size
),
): cv.latitude,
vol.Optional(
CONF_LON_NE,
default=orig_options.get(
CONF_LON_NE, default_longitude + default_size
),
): cv.longitude,
vol.Optional(
CONF_LAT_SW,
default=orig_options.get(
CONF_LAT_SW, default_latitude - default_size
),
): cv.latitude,
vol.Optional(
CONF_LON_SW,
default=orig_options.get(
CONF_LON_SW, default_longitude - default_size
),
): cv.longitude,
vol.Required(
CONF_PUBLIC_MODE,
default=orig_options.get(CONF_PUBLIC_MODE, "avg"),
): vol.In(["avg", "max"]),
vol.Required(
CONF_SHOW_ON_MAP,
default=orig_options.get(CONF_SHOW_ON_MAP, False),
): bool,
}
)
return self.async_show_form(step_id="public_weather", data_schema=data_schema)
def _create_options_entry(self):
"""Update config entry options."""
return self.async_create_entry(
title="Netatmo Public Weather", data=self.options
)
def fix_coordinates(user_input):
"""Fix coordinates if they don't comply with the Netatmo API."""
# Ensure coordinates have acceptable length for the Netatmo API
for coordinate in [CONF_LAT_NE, CONF_LAT_SW, CONF_LON_NE, CONF_LON_SW]:
if len(str(user_input[coordinate]).split(".")[1]) < 7:
user_input[coordinate] = user_input[coordinate] + 0.0000001
# Swap coordinates if entered in wrong order
if user_input[CONF_LAT_NE] < user_input[CONF_LAT_SW]:
user_input[CONF_LAT_NE], user_input[CONF_LAT_SW] = (
user_input[CONF_LAT_SW],
user_input[CONF_LAT_NE],
)
if user_input[CONF_LON_NE] < user_input[CONF_LON_SW]:
user_input[CONF_LON_NE], user_input[CONF_LON_SW] = (
user_input[CONF_LON_SW],
user_input[CONF_LON_NE],
)
return user_input
|
{
"content_hash": "01357c3f2024246e25829d5daca76f56",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 86,
"avg_line_length": 33.76303317535545,
"alnum_prop": 0.5606400898371702,
"repo_name": "tboyce021/home-assistant",
"id": "6217ef1a0e641e0147b47d89563eadb5f93f749e",
"size": "7124",
"binary": false,
"copies": "9",
"ref": "refs/heads/dev",
"path": "homeassistant/components/netatmo/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "28861968"
},
{
"name": "Shell",
"bytes": "4815"
}
],
"symlink_target": ""
}
|
from faker import Factory
from homes_to_let.models import LettingFavourite
from homes_to_let.factories.letting_factory import LettingFactory
from homes.factories.user_factory import UserFactory
import factory
fake = Factory.create('en_GB')
class LettingFavouriteFactory(factory.DjangoModelFactory):
class Meta:
model = LettingFavourite
property = factory.SubFactory(LettingFactory)
user = factory.SubFactory(UserFactory)
|
{
"content_hash": "19650cf3bc38cdba61d0c321e284cfba",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 65,
"avg_line_length": 24.88888888888889,
"alnum_prop": 0.7924107142857143,
"repo_name": "bertnotbob/django-property",
"id": "dba0bc1a0a63f65a92ccab3aa5b555e5ea9c8101",
"size": "448",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "homes_to_let/factories/letting_favourite_factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4936"
},
{
"name": "HTML",
"bytes": "51895"
},
{
"name": "JavaScript",
"bytes": "44306"
},
{
"name": "Python",
"bytes": "158696"
}
],
"symlink_target": ""
}
|
import socket
owner = "Krazy6446"
server = "irc.swiftirc.net"
botnick = "ItchyBot"
ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ircsock.connect((server, 6667))
ircsock.send("USER " + botnick + " " + botnick + " " + botnick + " :TwitchyBot by " + owner + "\n")
ircsock.send("NICK " + botnick + "\n")
def ping():
ircsock.send("PONG :Pong\n")
def sendmsg(rec, msg):
output = "PRIVMSG " + rec + " :" + msg + "\n"
print output
ircsock.send(output)
def join(chan):
ircsock.send("JOIN #" + chan + "\n")
def getfeed():
ircmsg = ircsock.recv(2048)
ircmsg = ircmsg.strip('\n\r')
return ircmsg
#simply joining a test chan
join("pitchbot")
|
{
"content_hash": "6f42b51e09563debc2fd33368b892789",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 99,
"avg_line_length": 21.78787878787879,
"alnum_prop": 0.5938803894297635,
"repo_name": "krazeh/Twitchy-Bot",
"id": "8a70241122f689920cc037a46a3d6f4aac8d98bc",
"size": "719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/communicator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3315"
}
],
"symlink_target": ""
}
|
import logging
# don't output warnings from scapy, kthx
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import IP, TCP, send
from random import randint
import argparse
import time
import os
# globals
global maxPort
global lastPosition
global fileSize
maxPort = 65535
asciiMax = 127
lastPosition = 0
fileSize = 0
# createPacketTwo - takes in two ASCII characters
# Turns both characters into binary strings, concatenates the strings
# and turns the result into an integer value. It then creates a TCP packet
# and sets the source port as the difference between 65535 and the integer.
# Returns a TCP packet created by scapy.
def createPacketTwo(char1, char2):
# get the binary values of both chars without the binary string indicator
binChar1 = bin(ord(char1))[2:].zfill(8)
binChar2 = bin(ord(char2))[2:].zfill(8)
print binChar1 + binChar2
# get the integer value of the concatenated binary values
intPortVal = int(binChar1 + binChar2, 2)
print "bin value " + str((bin(intPortVal)))
# craft the packet
packet = IP(dst=args.destIp)/TCP(dport=80, sport=maxPort - intPortVal)
return packet
# create a packet when we only have 1 character remaining in the file
# works exactly the same as createPacketTwo except we only have one character
# returns a TCP packet created by scapy.
def createPacketOne(char):
# get the binary value of the character
binChar = bin(ord(char))[2:].zfill(8)
print binChar
#get the integer value of that binary value
intPortVal = int(binChar, 2)
# craft the packet
packet = IP(dst=args.destIp)/TCP(dport=80, sport=maxPort -intPortVal)
return packet
# readOneByte - takes in a file descriptor of an open file
# accesses the global lastPosition variable, and seeks to that byte offset
# within the file. Then, read one byte from the file and update the lastPosition.
# Returns the byte read from the file.
def readOneByte(fileDescriptor):
global lastPosition
fileDescriptor.seek(lastPosition)
byte = fileDescriptor.read(1)
lastPosition = fileDescriptor.tell()
return byte
# sendPackets - loops through the file specified as a command line argument.
# Reads each byte from the file, calls the appropriate packet creation function
# and sends each packet. Between each send there is a sleep for a randomized amount
# of time within a range, also set as a command line argument.
def sendPackets():
global fileSize
global lastPosition
fileDescriptor = open(args.path, 'r')
while lastPosition < fileSize:
if lastPosition == fileSize - 1:
# the next byte we read contains the last character in the file
char = readOneByte(fileDescriptor)
packet = createPacketOne(char)
else:
# there is at least 2 characters left in the file
char1 = readOneByte(fileDescriptor)
char2 = readOneByte(fileDescriptor)
packet = createPacketTwo(char1, char2)
# scapy send
send(packet)
print "sport: " + str(packet.sport)
time.sleep(randint(1,int(args.sendInterval)))
# start of execution
parser = argparse.ArgumentParser(description='Covert Channel Client')
parser.add_argument('-p'
, '--path'
, dest='path'
, help='absolute path to file to watch.'
, required=True)
parser.add_argument('-d'
, '--destination'
, dest='destIp'
, help='IP address to covertly send data too.'
, required=True)
parser.add_argument('-i'
, '--interval'
, dest='sendInterval'
, help='Max interval to wait between sends, in seconds.'
, required=True)
args = parser.parse_args()
fileSize = os.path.getsize(args.path)
sendPackets()
|
{
"content_hash": "a86beda88e938f080d6f6d6a3944d669",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 84,
"avg_line_length": 36.66019417475728,
"alnum_prop": 0.698093220338983,
"repo_name": "cstyan/8505A1",
"id": "692449f82992142d11d0a144809cd3997ce322bc",
"size": "3776",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5878"
}
],
"symlink_target": ""
}
|
import gc
import os
import platform
import pwd
import sys
def get_sysinfo():
return {
'pid': get_pid(),
'mem': get_memory(),
'gcobj': get_biggest_gc_objects(),
}
def get_pid():
return os.getpid()
def get_memory(pid=None):
vsz, rss = 0, 0
if platform.system() == 'Linux':
f = open('/proc/%d/status' % (pid or os.getpid()))
for line in f:
if line.startswith('VmSize:'):
vsz = int(line.split()[1])
elif line.startswith('VmRSS:'):
rss = int(line.split()[1])
f.close()
return vsz, rss
def get_login(pid=None):
'''get login name when can not get USER or LOGNAME from os.environ'''
try:
login_name = os.getlogin()
except:
login_name = ''
if login_name:
return login_name
if platform.system() == 'Linux':
f = open('/proc/%d/status' % (pid or os.getpid()))
uid_line = [s for s in f.readlines() if s.startswith('Uid:')]
f.close()
if uid_line:
try:
uid = int(uid_line[0].split()[1])
login_name = pwd.getpwuid(uid)[0]
except:
pass
return login_name
def get_biggest_gc_objects(count=20):
objects = gc.get_objects()
lists = []
dicts = []
for i in objects:
if isinstance(i, list):
lists.append(i)
elif isinstance(i, dict):
dicts.append(i)
lists.sort(cmp=lambda x, y: cmp(len(x), len(y)), reverse=True)
dicts.sort(cmp=lambda x, y: cmp(len(x), len(y)), reverse=True)
return lists[:count], dicts[:count]
|
{
"content_hash": "bc16fbe6b3d55b0faf514fc45699116e",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 73,
"avg_line_length": 26.79032258064516,
"alnum_prop": 0.5219747140276941,
"repo_name": "douban/douban-utils",
"id": "ec2324041babcf0fc2044d47de6a67f6a9536ed6",
"size": "1702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "douban/utils/sysinfo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "62678"
}
],
"symlink_target": ""
}
|
import sys
import os
from rpython.config.config import OptionDescription, BoolOption, IntOption, ArbitraryOption, FloatOption
from rpython.config.config import ChoiceOption, StrOption, Config, ConflictConfigError
from rpython.config.config import ConfigError
from rpython.config.support import detect_number_of_processors
from rpython.translator.platform import platform as compiler
DEFL_INLINE_THRESHOLD = 32.4 # just enough to inline add__Int_Int()
# and just small enough to prevend inlining of some rlist functions.
DEFL_PROF_BASED_INLINE_THRESHOLD = 32.4
DEFL_CLEVER_MALLOC_REMOVAL_INLINE_THRESHOLD = 32.4
DEFL_LOW_INLINE_THRESHOLD = DEFL_INLINE_THRESHOLD / 2.0
DEFL_GC = "incminimark" # XXX
DEFL_ROOTFINDER_WITHJIT = "shadowstack"
## if sys.platform.startswith("linux"):
## _mach = os.popen('uname -m', 'r').read().strip()
## if _mach.startswith('x86') or _mach in ['i386', 'i486', 'i586', 'i686']:
## DEFL_ROOTFINDER_WITHJIT = "asmgcc" # only for Linux on x86 / x86-64
IS_64_BITS = sys.maxint > 2147483647
SUPPORT__THREAD = ( # whether the particular C compiler supports __thread
sys.platform.startswith("linux") or # Linux works
#sys.platform.startswith("darwin") or # OS/X >= 10.7 works (*)
False)
# Windows doesn't work. Please
# add other platforms here if it works on them.
# (*) NOTE: __thread on OS/X does not work together with
# pthread_key_create(): when the destructor is called, the __thread is
# already freed!
MAINDIR = os.path.dirname(os.path.dirname(__file__))
CACHE_DIR = os.path.realpath(os.path.join(MAINDIR, '_cache'))
PLATFORMS = [
'maemo',
'host',
'distutils',
'arm',
]
translation_optiondescription = OptionDescription(
"translation", "Translation Options", [
BoolOption("continuation", "enable single-shot continuations",
default=False, cmdline="--continuation",
requires=[("translation.type_system", "lltype")]),
ChoiceOption("type_system", "Type system to use when RTyping",
["lltype"], cmdline=None, default="lltype"),
ChoiceOption("backend", "Backend to use for code generation",
["c"], default="c",
requires={
"c": [("translation.type_system", "lltype")],
},
cmdline="-b --backend"),
BoolOption("shared", "Build as a shared library",
default=False, cmdline="--shared"),
BoolOption("log", "Include debug prints in the translation (PYPYLOG=...)",
default=True, cmdline="--log"),
# gc
ChoiceOption("gc", "Garbage Collection Strategy",
["boehm", "ref", "semispace", "statistics",
"generation", "hybrid", "minimark",'incminimark', "none"],
"ref", requires={
"ref": [("translation.rweakref", False), # XXX
("translation.gctransformer", "ref")],
"none": [("translation.rweakref", False), # XXX
("translation.gctransformer", "none")],
"semispace": [("translation.gctransformer", "framework")],
"statistics": [("translation.gctransformer", "framework")],
"generation": [("translation.gctransformer", "framework")],
"hybrid": [("translation.gctransformer", "framework")],
"boehm": [("translation.continuation", False), # breaks
("translation.gctransformer", "boehm")],
"minimark": [("translation.gctransformer", "framework")],
"incminimark": [("translation.gctransformer", "framework")],
},
cmdline="--gc"),
ChoiceOption("gctransformer", "GC transformer that is used - internal",
["boehm", "ref", "framework", "none"],
default="ref", cmdline=None,
requires={
"boehm": [("translation.gcrootfinder", "n/a"),
("translation.gcremovetypeptr", False)],
"ref": [("translation.gcrootfinder", "n/a"),
("translation.gcremovetypeptr", False)],
"none": [("translation.gcrootfinder", "n/a"),
("translation.gcremovetypeptr", False)],
}),
BoolOption("gcremovetypeptr", "Remove the typeptr from every object",
default=IS_64_BITS, cmdline="--gcremovetypeptr"),
ChoiceOption("gcrootfinder",
"Strategy for finding GC Roots (framework GCs only)",
["n/a", "shadowstack", "asmgcc"],
"shadowstack",
cmdline="--gcrootfinder",
requires={
"shadowstack": [("translation.gctransformer", "framework")],
"asmgcc": [("translation.gctransformer", "framework"),
("translation.backend", "c")],
}),
# other noticeable options
BoolOption("thread", "enable use of threading primitives",
default=False, cmdline="--thread"),
BoolOption("sandbox", "Produce a fully-sandboxed executable",
default=False, cmdline="--sandbox",
requires=[("translation.thread", False)],
suggests=[("translation.gc", "generation"),
("translation.gcrootfinder", "shadowstack")]),
BoolOption("rweakref", "The backend supports RPython-level weakrefs",
default=True),
# JIT generation: use -Ojit to enable it
BoolOption("jit", "generate a JIT",
default=False,
suggests=[("translation.gc", DEFL_GC),
("translation.gcrootfinder", DEFL_ROOTFINDER_WITHJIT),
("translation.list_comprehension_operations", True)]),
ChoiceOption("jit_backend", "choose the backend for the JIT",
["auto", "x86", "x86-without-sse2", 'arm'],
default="auto", cmdline="--jit-backend"),
ChoiceOption("jit_profiler", "integrate profiler support into the JIT",
["off", "oprofile"],
default="off"),
ChoiceOption("jit_opencoder_model", "the model limits the maximal length"
" of traces. Use big if you want to go bigger than "
"the default", ["big", "normal"], default="normal"),
BoolOption("check_str_without_nul",
"Forbid NUL chars in strings in some external function calls",
default=False, cmdline=None),
# misc
BoolOption("verbose", "Print extra information", default=False,
cmdline="--verbose"),
StrOption("cc", "Specify compiler to use for compiling generated C", cmdline="--cc"),
BoolOption("profopt", "Enable profile guided optimization. Defaults to enabling this for PyPy. For other training workloads, please specify them in profoptargs",
cmdline="--profopt", default=False),
StrOption("profoptargs", "Absolute path to the profile guided optimization training script + the necessary arguments of the script", cmdline="--profoptargs", default=None),
BoolOption("instrument", "internal: turn instrumentation on",
default=False, cmdline=None),
BoolOption("countmallocs", "Count mallocs and frees", default=False,
cmdline=None),
ChoiceOption("fork_before",
"(UNIX) Create restartable checkpoint before step",
["annotate", "rtype", "backendopt", "database", "source",
"pyjitpl"],
default=None, cmdline="--fork-before"),
BoolOption("dont_write_c_files",
"Make the C backend write everyting to /dev/null. " +
"Useful for benchmarking, so you don't actually involve the disk",
default=False, cmdline="--dont-write-c-files"),
ArbitraryOption("instrumentctl", "internal",
default=None),
StrOption("output", "Output file name", cmdline="--output"),
StrOption("secondaryentrypoints",
"Comma separated list of keys choosing secondary entrypoints",
cmdline="--entrypoints", default="main"),
BoolOption("dump_static_data_info", "Dump static data info",
cmdline="--dump_static_data_info",
default=False, requires=[("translation.backend", "c")]),
# portability options
BoolOption("no__thread",
"don't use __thread for implementing TLS",
default=not SUPPORT__THREAD, cmdline="--no__thread",
negation=False),
IntOption("make_jobs", "Specify -j argument to make for compilation"
" (C backend only)",
cmdline="--make-jobs", default=detect_number_of_processors()),
# Flags of the TranslationContext:
BoolOption("list_comprehension_operations",
"When true, look for and special-case the sequence of "
"operations that results from a list comprehension and "
"attempt to pre-allocate the list",
default=False,
cmdline='--listcompr'),
IntOption("withsmallfuncsets",
"Represent groups of less funtions than this as indices into an array",
default=0),
BoolOption("taggedpointers",
"When true, enable the use of tagged pointers. "
"If false, use normal boxing",
default=False),
BoolOption("keepgoing",
"Continue annotating when errors are encountered, and report "
"them all at the end of the annotation phase",
default=False, cmdline="--keepgoing"),
BoolOption("lldebug",
"If true, makes an lldebug build", default=False,
cmdline="--lldebug"),
BoolOption("lldebug0",
"If true, makes an lldebug0 build", default=False,
cmdline="--lldebug0"),
BoolOption("lto", "enable link time optimization",
default=False, cmdline="--lto",
requires=[("translation.gcrootfinder", "shadowstack")]),
StrOption("icon", "Path to the (Windows) icon to use for the executable"),
StrOption("libname",
"Windows: name and possibly location of the lib file to create"),
OptionDescription("backendopt", "Backend Optimization Options", [
# control inlining
BoolOption("inline", "Do basic inlining and malloc removal",
default=True),
FloatOption("inline_threshold", "Threshold when to inline functions",
default=DEFL_INLINE_THRESHOLD, cmdline="--inline-threshold"),
StrOption("inline_heuristic", "Dotted name of an heuristic function "
"for inlining",
default="rpython.translator.backendopt.inline.inlining_heuristic",
cmdline="--inline-heuristic"),
BoolOption("print_statistics", "Print statistics while optimizing",
default=False),
BoolOption("merge_if_blocks", "Merge if ... elif chains",
cmdline="--if-block-merge", default=True),
BoolOption("mallocs", "Remove mallocs", default=True),
BoolOption("constfold", "Constant propagation",
default=True),
# control profile based inlining
StrOption("profile_based_inline",
"Use call count profiling to drive inlining"
", specify arguments",
default=None), # cmdline="--prof-based-inline" fix me
FloatOption("profile_based_inline_threshold",
"Threshold when to inline functions "
"for profile based inlining",
default=DEFL_PROF_BASED_INLINE_THRESHOLD,
), # cmdline="--prof-based-inline-threshold" fix me
StrOption("profile_based_inline_heuristic",
"Dotted name of an heuristic function "
"for profile based inlining",
default="rpython.translator.backendopt.inline.inlining_heuristic",
), # cmdline="--prof-based-inline-heuristic" fix me
# control clever malloc removal
BoolOption("clever_malloc_removal",
"Drives inlining to remove mallocs in a clever way",
default=False,
cmdline="--clever-malloc-removal"),
FloatOption("clever_malloc_removal_threshold",
"Threshold when to inline functions in "
"clever malloc removal",
default=DEFL_CLEVER_MALLOC_REMOVAL_INLINE_THRESHOLD,
cmdline="--clever-malloc-removal-threshold"),
StrOption("clever_malloc_removal_heuristic",
"Dotted name of an heuristic function "
"for inlining in clever malloc removal",
default="rpython.translator.backendopt.inline.inlining_heuristic",
cmdline="--clever-malloc-removal-heuristic"),
BoolOption("remove_asserts",
"Remove operations that look like 'raise AssertionError', "
"which lets the C optimizer remove the asserts",
default=False),
BoolOption("really_remove_asserts",
"Really remove operations that look like 'raise AssertionError', "
"without relying on the C compiler",
default=False),
BoolOption("stack_optimization",
"Tranform graphs in SSI form into graphs tailored for "
"stack based virtual machines (only for backends that support it)",
default=True),
BoolOption("storesink", "Perform store sinking", default=True),
BoolOption("replace_we_are_jitted",
"Replace we_are_jitted() calls by False",
default=False, cmdline=None),
BoolOption("none",
"Do not run any backend optimizations",
requires=[('translation.backendopt.inline', False),
('translation.backendopt.inline_threshold', 0),
('translation.backendopt.merge_if_blocks', False),
('translation.backendopt.mallocs', False),
('translation.backendopt.constfold', False)])
]),
ChoiceOption("platform",
"target platform", ['host'] + PLATFORMS, default='host',
cmdline='--platform',
suggests={"arm": [("translation.gcrootfinder", "shadowstack"),
("translation.jit_backend", "arm")]}),
])
def get_combined_translation_config(other_optdescr=None,
existing_config=None,
overrides=None,
translating=False):
if overrides is None:
overrides = {}
d = BoolOption("translating",
"indicates whether we are translating currently",
default=False, cmdline=None)
if other_optdescr is None:
children = []
newname = ""
else:
children = [other_optdescr]
newname = other_optdescr._name
if existing_config is None:
children += [d, translation_optiondescription]
else:
children += [child for child in existing_config._cfgimpl_descr._children
if child._name != newname]
descr = OptionDescription("pypy", "all options", children)
config = Config(descr, **overrides)
if translating:
config.translating = True
if existing_config is not None:
for child in existing_config._cfgimpl_descr._children:
if child._name == newname:
continue
value = getattr(existing_config, child._name)
config._cfgimpl_values[child._name] = value
return config
# ____________________________________________________________
OPT_LEVELS = ['0', '1', 'size', 'mem', '2', '3', 'jit']
DEFAULT_OPT_LEVEL = '2'
OPT_TABLE_DOC = {
'0': 'No optimization. Uses the Boehm GC.',
'1': 'Enable a default set of optimizations. Uses the Boehm GC.',
'size': 'Optimize for the size of the executable. Uses the Boehm GC.',
'mem': 'Optimize for run-time memory usage and use a memory-saving GC.',
'2': 'Enable most optimizations and use a high-performance GC.',
'3': 'Enable all optimizations and use a high-performance GC.',
'jit': 'Enable the JIT.',
}
OPT_TABLE = {
#level: gc backend optimizations...
'0': 'boehm nobackendopt',
'1': 'boehm lowinline',
'size': 'boehm lowinline remove_asserts',
'mem': DEFL_GC + ' lowinline remove_asserts removetypeptr',
'2': DEFL_GC + ' extraopts',
'3': DEFL_GC + ' extraopts remove_asserts',
'jit': DEFL_GC + ' extraopts jit',
}
def set_opt_level(config, level):
"""Apply optimization suggestions on the 'config'.
The optimizations depend on the selected level and possibly on the backend.
"""
try:
opts = OPT_TABLE[level]
except KeyError:
raise ConfigError("no such optimization level: %r" % (level,))
words = opts.split()
gc = words.pop(0)
# set the GC (only meaningful with lltype)
# but only set it if it wasn't already suggested to be something else
if config.translation._cfgimpl_value_owners['gc'] != 'suggested':
config.translation.suggest(gc=gc)
# set the backendopts
for word in words:
if word == 'nobackendopt':
config.translation.backendopt.suggest(none=True)
elif word == 'lowinline':
config.translation.backendopt.suggest(inline_threshold=
DEFL_LOW_INLINE_THRESHOLD)
elif word == 'remove_asserts':
config.translation.backendopt.suggest(remove_asserts=True)
elif word == 'extraopts':
config.translation.suggest(withsmallfuncsets=5)
elif word == 'jit':
config.translation.suggest(jit=True)
elif word == 'removetypeptr':
config.translation.suggest(gcremovetypeptr=True)
else:
raise ValueError(word)
# list_comprehension_operations is needed for translation, because
# make_sure_not_resized often relies on it, so we always enable them
config.translation.suggest(list_comprehension_operations=True)
# finally, make the choice of the gc definitive. This will fail
# if we have specified strange inconsistent settings.
config.translation.gc = config.translation.gc
# disallow asmgcc on OS/X and on Win32
if config.translation.gcrootfinder == "asmgcc":
if sys.platform == "darwin" or sys.platform =="win32":
raise ConfigError("'asmgcc' not supported on this platform")
# ----------------------------------------------------------------
def set_platform(config):
from rpython.translator.platform import set_platform
set_platform(config.translation.platform, config.translation.cc)
def get_platform(config):
from rpython.translator.platform import pick_platform
opt = config.translation.platform
cc = config.translation.cc
return pick_platform(opt, cc)
# when running a translation, this is patched
# XXX evil global variable
_GLOBAL_TRANSLATIONCONFIG = None
def get_translation_config():
""" Return the translation config when translating. When running
un-translated returns None """
return _GLOBAL_TRANSLATIONCONFIG
|
{
"content_hash": "149c9188ef99843d3865d7fcbd8faa3d",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 176,
"avg_line_length": 46.74231678486998,
"alnum_prop": 0.5829455796075258,
"repo_name": "oblique-labs/pyVM",
"id": "b4e86424a6884efaeb6ae605ca07f54e11059d66",
"size": "19772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rpython/config/translationoption.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "161293"
},
{
"name": "Awk",
"bytes": "271"
},
{
"name": "Batchfile",
"bytes": "5289"
},
{
"name": "C",
"bytes": "771638"
},
{
"name": "C++",
"bytes": "12850"
},
{
"name": "Emacs Lisp",
"bytes": "3149"
},
{
"name": "HCL",
"bytes": "155"
},
{
"name": "M4",
"bytes": "12737"
},
{
"name": "Makefile",
"bytes": "35222"
},
{
"name": "Objective-C",
"bytes": "2224"
},
{
"name": "Python",
"bytes": "18329219"
},
{
"name": "Shell",
"bytes": "15396"
},
{
"name": "Vim script",
"bytes": "1107"
}
],
"symlink_target": ""
}
|
"""Unit Tests for volume transfers."""
import datetime
import mock
from cinder import context
from cinder import exception
from cinder import objects
from cinder import quota
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import utils
from cinder.transfer import api as transfer_api
QUOTAS = quota.QUOTAS
class VolumeTransferTestCase(test.TestCase):
"""Test cases for volume transfer code."""
def setUp(self):
super(VolumeTransferTestCase, self).setUp()
self.ctxt = context.RequestContext(user_id=fake.USER_ID,
project_id=fake.PROJECT_ID)
self.updated_at = datetime.datetime(1, 1, 1, 1, 1, 1)
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
def test_transfer_volume_create_delete(self, mock_notify):
tx_api = transfer_api.API()
volume = utils.create_volume(self.ctxt, updated_at=self.updated_at)
response = tx_api.create(self.ctxt, volume.id, 'Description')
volume = objects.Volume.get_by_id(self.ctxt, volume.id)
self.assertEqual('awaiting-transfer', volume['status'],
'Unexpected state')
calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"),
mock.call(self.ctxt, mock.ANY, "transfer.create.end")]
mock_notify.assert_has_calls(calls)
self.assertEqual(2, mock_notify.call_count)
tx_api.delete(self.ctxt, response['id'])
volume = objects.Volume.get_by_id(self.ctxt, volume.id)
self.assertEqual('available', volume['status'], 'Unexpected state')
calls = [mock.call(self.ctxt, mock.ANY, "transfer.delete.start"),
mock.call(self.ctxt, mock.ANY, "transfer.delete.end")]
mock_notify.assert_has_calls(calls)
self.assertEqual(4, mock_notify.call_count)
def test_transfer_invalid_volume(self):
tx_api = transfer_api.API()
volume = utils.create_volume(self.ctxt, status='in-use',
updated_at=self.updated_at)
self.assertRaises(exception.InvalidVolume,
tx_api.create,
self.ctxt, volume.id, 'Description')
volume = objects.Volume.get_by_id(self.ctxt, volume.id)
self.assertEqual('in-use', volume['status'], 'Unexpected state')
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
def test_transfer_accept_invalid_authkey(self, mock_notify):
svc = self.start_service('volume', host='test_host')
self.addCleanup(svc.stop)
tx_api = transfer_api.API()
volume = utils.create_volume(self.ctxt, updated_at=self.updated_at)
transfer = tx_api.create(self.ctxt, volume.id, 'Description')
volume = objects.Volume.get_by_id(self.ctxt, volume.id)
self.assertEqual('awaiting-transfer', volume['status'],
'Unexpected state')
self.assertRaises(exception.TransferNotFound,
tx_api.accept,
self.ctxt, '2', transfer['auth_key'])
self.assertRaises(exception.InvalidAuthKey,
tx_api.accept,
self.ctxt, transfer['id'], 'wrong')
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
def test_transfer_accept_invalid_volume(self, mock_notify):
svc = self.start_service('volume', host='test_host')
self.addCleanup(svc.stop)
tx_api = transfer_api.API()
volume = utils.create_volume(self.ctxt, updated_at=self.updated_at)
transfer = tx_api.create(self.ctxt, volume.id, 'Description')
volume = objects.Volume.get_by_id(self.ctxt, volume.id)
self.assertEqual('awaiting-transfer', volume['status'],
'Unexpected state')
calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"),
mock.call(self.ctxt, mock.ANY, "transfer.create.end")]
mock_notify.assert_has_calls(calls)
self.assertEqual(2, mock_notify.call_count)
volume.status = 'wrong'
volume.save()
self.assertRaises(exception.InvalidVolume,
tx_api.accept,
self.ctxt, transfer['id'], transfer['auth_key'])
volume.status = 'awaiting-transfer'
volume.save()
# Because the InvalidVolume exception is raised in tx_api, so there is
# only transfer.accept.start called and missing transfer.accept.end.
calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start")]
mock_notify.assert_has_calls(calls)
self.assertEqual(3, mock_notify.call_count)
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
def test_transfer_accept_volume_in_consistencygroup(self, mock_notify):
svc = self.start_service('volume', host='test_host')
self.addCleanup(svc.stop)
tx_api = transfer_api.API()
consistencygroup = utils.create_consistencygroup(self.ctxt)
volume = utils.create_volume(self.ctxt,
updated_at=self.updated_at,
consistencygroup_id=
consistencygroup.id)
transfer = tx_api.create(self.ctxt, volume.id, 'Description')
self.assertRaises(exception.InvalidVolume,
tx_api.accept,
self.ctxt, transfer['id'], transfer['auth_key'])
@mock.patch.object(QUOTAS, "limit_check")
@mock.patch.object(QUOTAS, "reserve")
@mock.patch.object(QUOTAS, "add_volume_type_opts")
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
def test_transfer_accept(self, mock_notify, mock_quota_voltype,
mock_quota_reserve, mock_quota_limit):
svc = self.start_service('volume', host='test_host')
self.addCleanup(svc.stop)
tx_api = transfer_api.API()
volume = utils.create_volume(self.ctxt,
volume_type_id=fake.VOLUME_TYPE_ID,
updated_at=self.updated_at)
transfer = tx_api.create(self.ctxt, volume.id, 'Description')
self.ctxt.user_id = fake.USER2_ID
self.ctxt.project_id = fake.PROJECT2_ID
response = tx_api.accept(self.ctxt,
transfer['id'],
transfer['auth_key'])
volume = objects.Volume.get_by_id(self.ctxt, volume.id)
self.assertEqual(fake.PROJECT2_ID, volume.project_id)
self.assertEqual(fake.USER2_ID, volume.user_id)
self.assertEqual(response['volume_id'], volume.id,
'Unexpected volume id in response.')
self.assertEqual(response['id'], transfer['id'],
'Unexpected transfer id in response.')
calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start"),
mock.call(self.ctxt, mock.ANY, "transfer.accept.end")]
mock_notify.assert_has_calls(calls)
# The notify_about_volume_usage is called twice at create(),
# and twice at accept().
self.assertEqual(4, mock_notify.call_count)
# Check QUOTAS reservation calls
# QUOTAS.add_volume_type_opts
reserve_opt = {'volumes': 1, 'gigabytes': 1}
release_opt = {'volumes': -1, 'gigabytes': -1}
calls = [mock.call(self.ctxt, reserve_opt, fake.VOLUME_TYPE_ID),
mock.call(self.ctxt, release_opt, fake.VOLUME_TYPE_ID)]
mock_quota_voltype.assert_has_calls(calls)
# QUOTAS.reserve
calls = [mock.call(mock.ANY, **reserve_opt),
mock.call(mock.ANY, project_id=fake.PROJECT_ID,
**release_opt)]
mock_quota_reserve.assert_has_calls(calls)
# QUOTAS.limit_check
values = {'per_volume_gigabytes': 1}
mock_quota_limit.assert_called_once_with(self.ctxt,
project_id=fake.PROJECT2_ID,
**values)
@mock.patch.object(QUOTAS, "reserve")
@mock.patch.object(QUOTAS, "add_volume_type_opts")
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
def test_transfer_accept_over_quota(self, mock_notify, mock_quota_voltype,
mock_quota_reserve):
svc = self.start_service('volume', host='test_host')
self.addCleanup(svc.stop)
tx_api = transfer_api.API()
volume = utils.create_volume(self.ctxt,
volume_type_id=fake.VOLUME_TYPE_ID,
updated_at=self.updated_at)
transfer = tx_api.create(self.ctxt, volume.id, 'Description')
fake_overs = ['volumes_lvmdriver-3']
fake_quotas = {'gigabytes_lvmdriver-3': 1,
'volumes_lvmdriver-3': 10}
fake_usages = {'gigabytes_lvmdriver-3': {'reserved': 0, 'in_use': 1},
'volumes_lvmdriver-3': {'reserved': 0, 'in_use': 1}}
mock_quota_reserve.side_effect = exception.OverQuota(
overs=fake_overs,
quotas=fake_quotas,
usages=fake_usages)
self.ctxt.user_id = fake.USER2_ID
self.ctxt.project_id = fake.PROJECT2_ID
self.assertRaises(exception.VolumeLimitExceeded,
tx_api.accept,
self.ctxt,
transfer['id'],
transfer['auth_key'])
# notification of transfer.accept is sent only after quota check
# passes
self.assertEqual(2, mock_notify.call_count)
@mock.patch.object(QUOTAS, "limit_check")
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
def test_transfer_accept_over_quota_check_limit(self, mock_notify,
mock_quota_limit):
svc = self.start_service('volume', host='test_host')
self.addCleanup(svc.stop)
tx_api = transfer_api.API()
volume = utils.create_volume(self.ctxt,
volume_type_id=fake.VOLUME_TYPE_ID,
updated_at=self.updated_at)
transfer = tx_api.create(self.ctxt, volume.id, 'Description')
fake_overs = ['per_volume_gigabytes']
fake_quotas = {'per_volume_gigabytes': 1}
fake_usages = {}
mock_quota_limit.side_effect = exception.OverQuota(
overs=fake_overs,
quotas=fake_quotas,
usages=fake_usages)
self.ctxt.user_id = fake.USER2_ID
self.ctxt.project_id = fake.PROJECT2_ID
self.assertRaises(exception.VolumeSizeExceedsLimit,
tx_api.accept,
self.ctxt,
transfer['id'],
transfer['auth_key'])
# notification of transfer.accept is sent only after quota check
# passes
self.assertEqual(2, mock_notify.call_count)
def test_transfer_get(self):
tx_api = transfer_api.API()
volume = utils.create_volume(self.ctxt, updated_at=self.updated_at)
transfer = tx_api.create(self.ctxt, volume['id'], 'Description')
t = tx_api.get(self.ctxt, transfer['id'])
self.assertEqual(t['id'], transfer['id'], 'Unexpected transfer id')
ts = tx_api.get_all(self.ctxt)
self.assertEqual(1, len(ts), 'Unexpected number of transfers.')
nctxt = context.RequestContext(user_id=fake.USER2_ID,
project_id=fake.PROJECT2_ID)
utils.create_volume(nctxt, updated_at=self.updated_at)
self.assertRaises(exception.TransferNotFound,
tx_api.get,
nctxt,
transfer['id'])
ts = tx_api.get_all(nctxt)
self.assertEqual(0, len(ts), 'Unexpected transfers listed.')
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
def test_delete_transfer_with_deleted_volume(self, mock_notify):
# create a volume
volume = utils.create_volume(self.ctxt, updated_at=self.updated_at)
# create a transfer
tx_api = transfer_api.API()
transfer = tx_api.create(self.ctxt, volume['id'], 'Description')
t = tx_api.get(self.ctxt, transfer['id'])
self.assertEqual(t['id'], transfer['id'], 'Unexpected transfer id')
calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"),
mock.call(self.ctxt, mock.ANY, "transfer.create.end")]
mock_notify.assert_has_calls(calls)
self.assertEqual(2, mock_notify.call_count)
# force delete volume
volume.destroy()
# Make sure transfer has been deleted.
self.assertRaises(exception.TransferNotFound,
tx_api.get,
self.ctxt,
transfer['id'])
|
{
"content_hash": "ae20b0dd4a7fb460a2bfe889f9dabff8",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 78,
"avg_line_length": 45.92982456140351,
"alnum_prop": 0.5814362108479756,
"repo_name": "cloudbase/cinder",
"id": "ccc7bfe7cfc70381c91758f0fd0fba4ae9de5299",
"size": "13704",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/test_volume_transfer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17586629"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
}
|
import json
import pycurl
from io import BytesIO
import getopt
import errno
import re
import sys
import socket
import MySQLdb
class Job:
name = None
status = None
user = None
queue = None
start = None
finish = None
mapred = None
yarn = None
class JobNode:
elapsed = 0
map = 0
reduce = 0
class Node:
host = None
class Counter:
group = None
counter = None
base_mapred_url = ''
base_yarn_url = ''
base_yarn_urls = list()
db = None
dbhost = 'localhost'
dbname = 'bookkeeping'
dbuser = 'bookkeeping'
dbpassword = ''
debug = 0
gss = None
https = 0
query = ''
host = socket.getfqdn()
id = None
node_hosts = dict()
node_ids = dict()
counter_list = dict()
jobs = dict()
curl = pycurl.Curl()
def get_rest(base_url, url):
if debug >= 3:
print '# %s%s' % (base_url, url)
b = BytesIO()
curl.setopt(pycurl.URL, str(base_url + url))
# curl.setopt(pycurl.WRITEDATA, b)
curl.setopt(pycurl.WRITEFUNCTION, b.write)
curl.perform()
s = b.getvalue().decode('utf-8')
if curl.getinfo(curl.RESPONSE_CODE) != 200:
print s
print 'Status: %d' % curl.getinfo(curl.RESPONSE_CODE)
curl.close()
b.close()
raise Exception()
j = json.loads(s)
if debug >= 4:
print json.dumps(j, indent=4)
return j
def get_cluster_status(base_url):
try:
j = get_rest(base_url, '/ws/v1/cluster/info')
except pycurl.error:
if curl.getinfo(pycurl.OS_ERRNO) == errno.ECONNREFUSED:
j = json.loads('{"clusterInfo":{"state":"NO CONNETION"}}')
else:
raise
if not j['clusterInfo']:
if debug >= 3:
print 'Error with YARN RM'
return None
ci = j['clusterInfo']
if 'haState' not in ci.keys():
ci['haState'] = 'NONE'
if debug >= 3:
print '[YARN] state=%s, haState=%s' % (ci['state'], ci['haState'])
if ci['state'] != 'STARTED':
return None
if ci['haState'] != 'ACTIVE':
return None
return j
def gen_url(base_url, port_nossl, port_ssl):
if https:
schema = 'https://'
port = port_ssl
else:
schema = 'http://'
port = port_nossl
if not base_url:
base_url = host
if '://' not in base_url:
base_url = schema + base_url
if not re.match(r'.*:\d+$', base_url):
base_url = base_url + ':%d' % port
return base_url
try:
opts, args = getopt.getopt(sys.argv[1:], 'hb:c:d:g:j:m:y:sq:', ['help', 'base=', 'config=', 'db', 'dbhost=', 'dbname=', 'dbuser=', 'dbpassword=', 'debug=', 'gss=', 'jobid=', 'mapred=', 'yarn=', 'ssl', 'query='])
except getopt.GetoptError:
print 'Args error'
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print "jobs.py [OPTIONS]\n\
OPTIONS are:\n\
-h, --help ........ help message\n\
-b, --base ........ default hostname for YARN ans MapReduce\n\
-c, --config ...... config file\n\
-d, --debug LEVEL . debug output (2=progress, 3=trace, 4=json dumps)\n\
--db .............. enable database\n\
--dbhost\n\
--dbname\n\
--dbuser\n\
--dbpassword\n\
-g, --gss=0/1 ..... enable SPNEGO (default: according to ssl)\n\
-j, --jobid ....... single job query istead of list all\n\
-m, --mapred URL .. MapReduce Job History server\n\
-y, --yarn URL .... YARN Resource Manager\n\
-s, --ssl ......... enable default HTTPS schema and ports\n\
-q, --query ....... initial query parameter (only if -j is not used)"
sys.exit(0)
elif opt in ('-b', '--base'):
host = arg
elif opt in ('-c', '--config'):
f = open(arg, 'r')
for line in f:
cfg = line.rstrip().split('=')
if cfg[0] == 'base':
host = cfg[1]
elif cfg[0] == 'dbhost':
dbhost = cfg[1]
elif cfg[0] == 'db':
db = 1
elif cfg[0] == 'dbname':
dbname = cfg[1]
elif cfg[0] == 'dbuser':
dbuser = cfg[1]
elif cfg[0] == 'dbpassword':
dbpassword = cfg[1]
elif cfg[0] == 'debug':
debug = int(cfg[1])
elif cfg[0] == 'gss':
gss = int(cfg[1])
elif cfg[0] == 'mapred':
base_mapred_url = cfg[1]
elif cfg[0] == 'yarn':
base_yarn_urls.append(cfg[1])
elif cfg[0] == 'ssl':
https = int(cfg[1])
elif opt in ('-d', '--debug'):
debug = int(arg)
elif opt in ('--db'):
db = 1
elif opt in ('--dbhost'):
dbhost = arg
elif opt in ('--dbname'):
dbname = arg
elif opt in ('--dbuser'):
dbuser = arg
elif opt in ('--dbpassword'):
dbpassword = arg
elif opt in ('-g', '--gss'):
gss = int(arg)
elif opt in ('-j', '--jobid'):
id = arg
elif opt in ('-m', '--mapred'):
base_mapred_url = arg
elif opt in ('-y', '--yarn'):
base_yarn_urls.append(arg)
elif opt in ('-s', '--ssl'):
https = 1
elif opt in ('-q', '--query'):
query = '?%s' % arg
else:
print 'Args error'
sys.exit(2)
if gss is None:
gss = https
if gss:
if debug >= 2:
print '[CURL] SPNEGO enabled'
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_GSSNEGOTIATE)
curl.setopt(pycurl.USERPWD, ":")
for i in range(len(base_yarn_urls)):
base_yarn_urls[i] = gen_url(base_yarn_urls[i], 8088, 8090)
if debug >= 2:
print '[YARN] URL %d: %s' % (i, base_yarn_urls[i])
for i in range(len(base_yarn_urls)):
j = get_cluster_status(base_yarn_urls[i])
if j:
base_yarn_url = base_yarn_urls[i]
break
if not base_yarn_url:
print '[YARN] probem with RM'
sys.exit(2)
base_mapred_url = gen_url(base_mapred_url, 19888, 19890)
if debug >= 2:
print '[MR] URL: ' + base_mapred_url
print '[YARN] URL: ' + base_yarn_url
regJob = re.compile('^job_')
regApp = re.compile('^application_')
regAtt = re.compile('^attempt_')
if id:
if regJob.match(id):
id = regJob.sub('', id)
if regApp.match(id):
id = regApp.sub('', id)
mapred_url = base_mapred_url + '/ws/v1/history/mapreduce/jobs/job_%s' % id
yarn_url = base_yarn_url + '/ws/v1/cluster/apps/application_%s' % id
try:
j1 = get_rest(mapred_url, '')
except Exception:
j1 = None
try:
j2 = get_rest(yarn_url, '')
except Exception:
j2 = None
jcounter = 0
if j1 and j1['job']:
if id not in jobs:
job = Job()
jobs[id] = job
else:
job = jobs[id]
job.mapred = j1['job']
jcounter += 1
if debug >= 2:
print '[MR] %d jobs' % jcounter
jcounter = 0
if j2 and j2['app']:
if id not in jobs:
job = Job()
jobs[id] = job
else:
job = jobs[id]
job.yarn = j2['app']
jcounter += 1
if debug >= 2:
print '[YARN] %d jobs' % jcounter
else:
mapred_url = base_mapred_url + '/ws/v1/history/mapreduce/jobs' + query
yarn_url = base_yarn_url + '/ws/v1/cluster/apps' + query
j1 = get_rest(mapred_url, '')
j2 = get_rest(yarn_url, '')
jcounter = 0
if j1["jobs"]:
for j in j1["jobs"]["job"]:
id = regJob.sub('', j['id'])
if id not in jobs:
job = Job()
jobs[id] = job
else:
job = jobs[id]
job.mapred = j
jcounter += 1
if debug >= 2:
print '[MR] %d jobs' % jcounter
jcounter = 0
if j2["apps"]:
for j in j2["apps"]["app"]:
id = regApp.sub('', j['id'])
if id not in jobs:
job = Job()
jobs[id] = job
else:
job = jobs[id]
job.yarn = j
jcounter += 1
if debug >= 2:
print '[YARN] %d jobs' % jcounter
if db:
db = MySQLdb.connect(dbhost, dbuser, dbpassword, dbname)
st = db.cursor()
data = st.execute('SELECT id, host FROM nodes')
while 1:
data = st.fetchone()
if data:
node = Node()
node.id = data[0]
node.host = data[1]
node_ids[node.id] = node
node_hosts[node.host] = node
else:
break
data = st.execute('SELECT id, groupName, name FROM counters')
while 1:
data = st.fetchone()
if data:
counter_name = '%s/%s' % (data[1], data[2])
counter_list[counter_name] = data[0]
else:
break
regHost = re.compile(':\\d+')
jcounter = 0
for id, job in jobs.iteritems():
jcounter += 1
if job.mapred:
job.name = job.mapred['name']
job.status = job.mapred['state']
job.user = job.mapred['user']
job.queue = job.mapred['queue']
job.start = job.mapred['startTime']
job.finish = job.mapred['finishTime']
if db:
changed = 0
changed_counters = 0
ID = 0
NAME = 1
USER = 2
STATUS = 3
QUEUE = 4
SUBMIT = 5
START = 6
FINISH = 7
MEMORY = 8
CPU = 9
MAP = 10
REDUCE = 11
st.execute("SELECT id, name, user, status, queue, submit, start, finish, memory_seconds, cpu_seconds, map, reduce FROM jobs WHERE id=%s", [id])
data = st.fetchone()
if data:
if not job.name:
job.name = data[NAME]
if not job.status or job.status == 'UNDEFINED':
job.status = data[STATUS]
if not job.user:
job.user = data[USER]
if not job.queue:
job.queue = data[QUEUE]
if not job.start:
job.start = data[START]
if not job.finish:
job.finish = data[FINISH]
if job.yarn:
if not job.name:
job.name = job.yarn['name']
if not job.status or job.status == 'UNDEFINED':
job.status = job.yarn['state']
if not job.user:
job.user = job.yarn['user']
if not job.queue:
job.queue = job.yarn['queue']
if not job.start:
job.start = job.yarn['startedTime']
if debug >= 2:
print '[MR] missing start time of %s completed from YARN (%d)' % (id, job.start)
if not job.finish:
job.finish = job.yarn['finishedTime']
if debug >= 2:
print '[MR] missing finish time of %s completed from YARN (%d)' % (id, job.finish)
if debug >= 1:
print 'job %s (%d):' % (id, jcounter)
print ' name: %s' % job.name
print ' status: %s' % job.status
print ' user: %s' % job.user
print ' queue: %s' % job.queue
if job.mapred:
print ' submit: %d, start: %d, finish: %d' % (job.mapred['submitTime'], job.mapred['startTime'], job.mapred['finishTime'])
print ' elapsed: %.3f s' % ((job.mapred['finishTime'] - job.mapred['startTime']) / 1000.0)
print ' finished: %.3f s' % ((job.mapred['finishTime'] - job.mapred['submitTime']) / 1000.0)
if job.yarn and 'memorySeconds' in job.yarn.keys():
print ' MB x s: %d' % job.yarn['memorySeconds']
print ' CPU x s: %d' % job.yarn['vcoreSeconds']
if db:
if data:
if data[NAME] == job.name and data[USER] == job.user and data[STATUS] == job.status and data[QUEUE] == job.queue and data[START] == job.start and data[FINISH] == job.finish:
if debug >= 3:
print '[db] job %s found' % id
else:
st.execute("UPDATE jobs SET name=%s, user=%s, status=%s, queue=%s, start=%s, finish=%s WHERE id=%s", (job.name, job.user, job.status, job.queue, job.start, job.finish, id))
if debug >= 3:
print '[db] job %s updated' % id
changed = 1
else:
st.execute("INSERT INTO jobs (id, name, user, status, queue, start, finish) VALUES(%s, %s, %s, %s, %s, %s, %s)", (id, job.name, job.user, job.status, job.queue, job.start, job.finish))
if debug >= 3:
print '[db] job %s inserted' % id
changed = 1
if job.mapred:
if data and data[SUBMIT] == job.mapred['submitTime'] and data[MAP] == job.mapred['mapsTotal'] and data[REDUCE] == job.mapred['reducesTotal']:
if debug >= 3:
print '[db] job %s mapred is actual' % id
else:
st.execute("UPDATE jobs SET submit=%s, map=%s, reduce=%s WHERE id=%s", (job.mapred['submitTime'], job.mapred['mapsTotal'], job.mapred['reducesTotal'], id))
if debug >= 3:
print '[db] job %s mapred updated' % id
changed = 1
if job.yarn and 'memorySeconds' in job.yarn.keys():
if data and data[MEMORY] == job.yarn['memorySeconds'] and data[CPU] == job.yarn['vcoreSeconds']:
if debug >= 3:
print '[db] job %s yarn is actual' % id
else:
st.execute("UPDATE jobs SET memory_seconds=%s, cpu_seconds=%s WHERE id=%s", (job.yarn['memorySeconds'], job.yarn['vcoreSeconds'], id))
if debug >= 3:
print '[db] job %s yarn updated' % id
changed = 1
# check for details in DB, set changed flag if missing
st.execute('SELECT * FROM jobnodes WHERE jobid=%s', [id])
data = st.fetchone()
if data:
st.execute('SELECT * FROM subjobs WHERE jobid=%s', [id])
data = st.fetchone()
if not data:
changed = 1
st.execute('SELECT * FROM jobcounters WHERE jobid=%s', [id])
data = st.fetchone()
if not data:
changed_counters = 1
# get details (intensive!), if new job or any other difference
jobnodes = dict()
subjobs = list()
if job.mapred and (not db or changed):
t = get_rest(base_mapred_url, '/ws/v1/history/mapreduce/jobs/job_%s/tasks' % id)
if t['tasks']:
aggregate = 0
for task in t['tasks']['task']:
# print 'taskid: %s, elapsed: %d' % (task['id'], task['elapsedTime'])
aggregate += task['elapsedTime']
a = get_rest(base_mapred_url, '/ws/v1/history/mapreduce/jobs/job_%s/tasks/%s/attempts' % (id, task['id']))
if a['taskAttempts']:
for attempt in a['taskAttempts']['taskAttempt']:
if regAtt.match(attempt['id']):
attempt['id'] = regAtt.sub('', attempt['id'])
nodeHost = regHost.sub('', attempt['nodeHttpAddress'])
attempt['nodeHttpAddress'] = nodeHost
if nodeHost not in jobnodes:
jobnodes[nodeHost] = JobNode()
jobnodes[nodeHost].elapsed += attempt['elapsedTime']
if attempt['type'] == 'MAP':
jobnodes[nodeHost].map += 1
elif attempt['type'] == 'REDUCE':
jobnodes[nodeHost].reduce += 1
else:
raise Exception('unknown type %s' % attempt['type'])
# print 'tasks elapsed: %d' % aggregate
subjobs.append(attempt)
aggregate = 0
for nodename, jobnode in jobnodes.iteritems():
if debug >= 1:
print ' node %s: %d' % (nodename, jobnode.elapsed)
aggregate += jobnode.elapsed
if debug >= 1:
print ' subjobs: %d' % len(subjobs)
print ' ==> aggregated %d' % aggregate
counters = list()
counters_print = list()
if job.mapred and (not db or changed_counters or changed):
cs = get_rest(base_mapred_url, '/ws/v1/history/mapreduce/jobs/job_%s/counters' % id)
if cs and 'jobCounters' in cs.keys():
if 'counterGroup' in cs['jobCounters'].keys():
for cg in cs['jobCounters']['counterGroup']:
for c in cg['counter']:
counter = Counter()
counter.group = cg['counterGroupName']
counter.counter = c
counters.append(counter)
counters_print.append('(%s=%d,%d,%d)' % (c['name'], c['reduceCounterValue'], c['mapCounterValue'], c['totalCounterValue']))
if counters_print and debug >= 1:
print ' counters: ' + ''.join(counters_print)
if jobnodes and db:
st.execute("DELETE FROM jobnodes WHERE jobid=%s", [id])
for nodename, jobnode in jobnodes.iteritems():
if nodename not in node_hosts.keys():
st.execute('INSERT INTO nodes (host) VALUES (%s)', [nodename])
node = Node()
node.id = db.insert_id()
node.host = nodename
node_hosts[nodename] = node
node_ids[node.id] = node
st.execute("INSERT INTO jobnodes (jobid, nodeid, elapsed, map, reduce) VALUES (%s, %s, %s, %s, %s)", (id, node_hosts[nodename].id, jobnode.elapsed, jobnode.map, jobnode.reduce))
if debug >= 3:
print '[db] job %s nodes updated' % id
st.execute("DELETE FROM subjobs WHERE jobid=%s", [id])
for subjob in subjobs:
nodename = subjob['nodeHttpAddress']
st.execute('INSERT INTO subjobs (id, jobid, nodeid, state, type, start, finish) VALUES (%s, %s, %s, %s, %s, %s, %s)', (subjob['id'], id, node_hosts[nodename].id, subjob['state'], subjob['type'], subjob['startTime'], subjob['finishTime']))
if debug >= 3:
print '[db] job %s subjobs updated' % id
if counters and db:
st.execute('DELETE FROM jobcounters WHERE jobid=%s', [id])
for counter in counters:
counter_name = '%s/%s' % (counter.group, counter.counter['name'])
if counter_name not in counter_list.keys():
st.execute('INSERT INTO counters (groupName, name) VALUES (%s, %s)', (counter.group, counter.counter['name']))
counter_list[counter_name] = db.insert_id()
if debug >= 3:
print '[db] new counter %s inserted' % counter_name
st.execute('INSERT INTO jobcounters (jobid, counterid, reduce, map, total) VALUES (%s, %s, %s, %s, %s)', (id, counter_list[counter_name], counter.counter['reduceCounterValue'], counter.counter['mapCounterValue'], counter.counter['totalCounterValue']))
if debug >= 3:
print '[db] job %s counters updated' % id
# better to update timestamp again explicitly on the end of the transaction
if db and (jobnodes or counters):
st.execute('UPDATE jobs SET changed=NOW() WHERE id=%s', [id])
if db:
db.commit()
if debug >= 1:
print
if db:
db.close()
curl.close()
|
{
"content_hash": "7a678770b211931cabc708111941c722",
"timestamp": "",
"source": "github",
"line_count": 578,
"max_line_length": 254,
"avg_line_length": 27.71280276816609,
"alnum_prop": 0.6138094643526033,
"repo_name": "MetaCenterCloudPuppet/cesnet-site_hadoop",
"id": "1be9d6721dc9846b7267456ebd7c5979b535b23f",
"size": "16039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "files/bookkeeping/jobs.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "2104"
},
{
"name": "HTML",
"bytes": "3730"
},
{
"name": "Puppet",
"bytes": "39504"
},
{
"name": "Python",
"bytes": "19466"
},
{
"name": "Ruby",
"bytes": "10725"
},
{
"name": "Shell",
"bytes": "4911"
},
{
"name": "TSQL",
"bytes": "3998"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers
from test_rest.models import Animal
from test_rest.models import Plant
class AnimalSerializer(serializers.ModelSerializer):
class Meta:
model = Animal
class PlantSerializer(serializers.ModelSerializer):
class Meta:
model = Plant
|
{
"content_hash": "d4492da22a20bb8e268258a51a82528e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 52,
"avg_line_length": 22.76923076923077,
"alnum_prop": 0.7601351351351351,
"repo_name": "sorenh/angular-django-rest-resource",
"id": "d050ebbce800c4f88202e04c8f397cc6a181e376",
"size": "296",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/django_project/test_rest/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1840"
},
{
"name": "Python",
"bytes": "5017"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Comment.author'
db.add_column('blogs_comment', 'author',
self.gf('django.db.models.fields.related.ForeignKey')(related_name='Comment_author', null=True, to=orm['auth.User']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Comment.author'
db.delete_column('blogs_comment', 'author_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blogs.blog': {
'Meta': {'object_name': 'Blog'},
'block_css': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_footer': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_header': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_left': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_middle': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_navbar': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_other_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_other_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_other_3': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right_bottom': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right_middle_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right_middle_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right_top': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_single_left': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_subscribe_button': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_subscribe_text': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_title': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'custom_domain': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'has_template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_online': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '30'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True', 'blank': 'True'})
},
'blogs.category': {
'Meta': {'object_name': 'Category'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '140'}),
'top_level_cat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'})
},
'blogs.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Comment_author'", 'null': 'True', 'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'comment_status': ('django.db.models.fields.CharField', [], {'default': "'pe'", 'max_length': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'notify_me': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Post']", 'null': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'})
},
'blogs.info_email': {
'Meta': {'object_name': 'Info_email'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'We'", 'max_length': '2', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'subject': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'subscribers': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '2', 'null': 'True'})
},
'blogs.language': {
'Meta': {'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'language_name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'blogs.page': {
'Meta': {'object_name': 'Page'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.post': {
'Meta': {'object_name': 'Post'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'base62id': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_0': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_01': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_3': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_4': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_5': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_6': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_video': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_ready': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_top': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'layout_type': ('django.db.models.fields.CharField', [], {'default': "'s'", 'max_length': '1'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'pic': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_0': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_04': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_1': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_10': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_11': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_12': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_13': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_14': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_15': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_16': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_17': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_18': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_19': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_2': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_20': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_21': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_22': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_23': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_24': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_3': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_4': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_5': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_6': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_7': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_8': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_9': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'publish_on_facebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '2', 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'translated_content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'translated_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'youtube_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'youtube_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'})
},
'blogs.subscription': {
'Meta': {'object_name': 'Subscription'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_new': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'blogs.tag': {
'Meta': {'object_name': 'Tag'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '140'})
},
'blogs.translation': {
'Meta': {'object_name': 'Translation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'origin_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_origin_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'translated_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_translated_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blogs']
|
{
"content_hash": "1525df0f7c781b2ed8ffd4f8935962a9",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 182,
"avg_line_length": 88.84453781512605,
"alnum_prop": 0.5394655947032395,
"repo_name": "carquois/blobon",
"id": "82f2f6a436cb4cf44375136fa83e165599c1ea6c",
"size": "21169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blobon/blogs/migrations/0054_auto__add_field_comment_author.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""Download NYC election results from the Board of Elections website.
@author n.o.franklin (at) gmail.com
@date 2018-01-20
This code is governed by the Apache License at http://www.apache.org/licenses/LICENSE-2.0.txt.
Copyright 2018 Google.
"""
import csv
import logging
import pandas as pd
import re
import sys
from bs4 import BeautifulSoup
from requests import get
from tqdm import tqdm
SPECIAL_TYPES = ['Absentee / Military',
'Absentee/Military',
'Public Counter',
'Scattered',
'Manually Counted Emergency',
'Special Presidential',
'Emergency',
'Affidavit',
'Federal',
'Yes',
'No']
URLS = [
'http://vote.nyc.ny.us/html/results/results.shtml', # 2018
'http://vote.nyc.ny.us/html/results/2017.shtml',
'http://vote.nyc.ny.us/html/results/2016.shtml',
'http://vote.nyc.ny.us/html/results/2015.shtml',
'http://vote.nyc.ny.us/html/results/2014.shtml'
]
def download_parse(url):
logging.info("Downloading {}".format(url))
response = get(url)
logging.info(
"Got {:0,.0f} kb, parsing.".format(
sys.getsizeof(
response.text) /
1024))
response.raise_for_status()
soup = BeautifulSoup(response.text, "lxml")
# Step through all links in page and find CSVs
urls = []
for link in soup.find_all('a', href=True):
href = link['href']
if '.csv' in href:
if href[0] == '/':
urls.append(u"http://vote.nyc.ny.us{}".format(href))
else:
urls.append(
u"http://vote.nyc.ny.us/html/results/{}".format(href))
# Download each CSV and append results to list
logging.info("Downloading and parsing individual CSVs.")
recap_output, ed_output = [], []
for url in tqdm(urls):
response = get(url)
response.raise_for_status()
lines = response.text.encode('utf-8').splitlines()
reader = csv.DictReader(lines)
for row in reader:
#if 'Recap' in url:
# recap_output.append(row)
if 'EDLevel' in url:
ed_output.append(row)
logging.info("Got {} precinct-level results.".format(len(ed_output)))
# Convert to DataFrame
return pd.DataFrame(ed_output).rename(
columns={
'District Key': 'DistrictKey',
'EDAD Status': 'EDADStatus',
'Office/Position Title': 'OfficePositionTitle',
'Party/Independent Body': 'PartyIndependentBody',
'Unit Name': 'UnitName'})
def get_date(row):
m = re.match(r'(.*) - (\d{2})/(\d{2})/(\d{4})', row['Event'])
if m:
return "{}{}{}".format(m.group(4), m.group(2), m.group(3))
else:
return None
def get_election_type(row):
m = re.match(r'(.*) Election.*(\d{2})/(\d{2})/(\d{4})', row['Event'])
if m:
return m.group(1).lower()
m = re.match(r'(.*) - .*', row['Event'])
if m:
return m.group(1).lower().replace(' ', '_')
else:
return "special"
def get_candidate(row):
if (row['election_type'] == 'primary') or (row['UnitName'] in SPECIAL_TYPES):
return row['UnitName']
else:
m = re.match(r"(.*)\W+\((.*)\)", row['UnitName'].strip())
if m:
return m.group(1)
else:
return row['UnitName'].strip()
def get_party(row):
if row['election_type'] == 'primary':
if 'PartyIndependentBody' in row:
return row['PartyIndependentBody']
if 'Party/Independent Body' in row:
return row['Party/Independent Body']
else:
return None
if row['UnitName'] in SPECIAL_TYPES:
return None
else:
m = re.match(r"(.*)\W+\((.*)\)", row['UnitName'])
if m:
return m.group(2)
else:
return None
def get_transformed_df(df, filename):
output = df.loc[df['filename'] == filename,
['County',
'precinct',
'OfficePositionTitle',
'DistrictKey',
'candidate',
'party',
'Tally']].rename(columns={'County': 'county',
'OfficePositionTitle': 'office',
'DistrictKey': 'district',
'Tally': 'votes'})
return output.drop_duplicates()
def run():
for url in URLS:
df = download_parse(url)
logging.info("Converting dates.")
df['date'] = df.apply(get_date, axis=1)
logging.info("Converting election types.")
df['election_type'] = df.apply(get_election_type, axis=1)
logging.info("Generating filenames.")
df['filename'] = df.apply(lambda row: "{}__ny__{}__{}__precinct.csv".format(
row['date'], row['election_type'], row['County'].lower().replace(' ', '_')), axis=1)
logging.info("Converting precinct codes.")
df['precinct'] = df.apply(lambda row: "{:03.0f}/{:02.0f}".format(
int(row['ED']), int(row['AD'])), axis=1)
logging.info("Converting candidate names.")
df['candidate'] = df.apply(lambda row: get_candidate(row), axis=1)
logging.info("Converting party names.")
df['party'] = df.apply(lambda row: get_party(row), axis=1)
files = df.loc[:, 'filename'].drop_duplicates()
logging.info("Writing files.")
for filename in tqdm(files):
output = get_transformed_df(df, filename)
output.to_csv(filename, index=False, encoding='utf-8')
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
run()
|
{
"content_hash": "1c24be5c2eb86aa08d57177cbace645f",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 96,
"avg_line_length": 33.056497175141246,
"alnum_prop": 0.5337549136899675,
"repo_name": "UnitedThruAction/Data",
"id": "900c4acf1684da8342a518403c6df0baf0af8cb7",
"size": "5851",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tools/NYCElectionResults.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "67267"
},
{
"name": "Shell",
"bytes": "335"
}
],
"symlink_target": ""
}
|
import unittest
import sqlite3 as sqlite
def func_returntext():
return "foo"
def func_returnunicode():
return "bar"
def func_returnint():
return 42
def func_returnfloat():
return 3.14
def func_returnnull():
return None
def func_returnblob():
return b"blob"
def func_raiseexception():
5/0
def func_isstring(v):
return type(v) is str
def func_isint(v):
return type(v) is int
def func_isfloat(v):
return type(v) is float
def func_isnone(v):
return type(v) is type(None)
def func_isblob(v):
return isinstance(v, (bytes, memoryview))
class AggrNoStep:
def __init__(self):
pass
def finalize(self):
return 1
class AggrNoFinalize:
def __init__(self):
pass
def step(self, x):
pass
class AggrExceptionInInit:
def __init__(self):
5/0
def step(self, x):
pass
def finalize(self):
pass
class AggrExceptionInStep:
def __init__(self):
pass
def step(self, x):
5/0
def finalize(self):
return 42
class AggrExceptionInFinalize:
def __init__(self):
pass
def step(self, x):
pass
def finalize(self):
5/0
class AggrCheckType:
def __init__(self):
self.val = None
def step(self, whichType, val):
theType = {"str": str, "int": int, "float": float, "None": type(None),
"blob": bytes}
self.val = int(theType[whichType] is type(val))
def finalize(self):
return self.val
class AggrSum:
def __init__(self):
self.val = 0.0
def step(self, val):
self.val += val
def finalize(self):
return self.val
class FunctionTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
self.con.create_function("returntext", 0, func_returntext)
self.con.create_function("returnunicode", 0, func_returnunicode)
self.con.create_function("returnint", 0, func_returnint)
self.con.create_function("returnfloat", 0, func_returnfloat)
self.con.create_function("returnnull", 0, func_returnnull)
self.con.create_function("returnblob", 0, func_returnblob)
self.con.create_function("raiseexception", 0, func_raiseexception)
self.con.create_function("isstring", 1, func_isstring)
self.con.create_function("isint", 1, func_isint)
self.con.create_function("isfloat", 1, func_isfloat)
self.con.create_function("isnone", 1, func_isnone)
self.con.create_function("isblob", 1, func_isblob)
def tearDown(self):
self.con.close()
def CheckFuncErrorOnCreate(self):
try:
self.con.create_function("bla", -100, lambda x: 2*x)
self.fail("should have raised an OperationalError")
except sqlite.OperationalError:
pass
def CheckFuncRefCount(self):
def getfunc():
def f():
return 1
return f
f = getfunc()
globals()["foo"] = f
# self.con.create_function("reftest", 0, getfunc())
self.con.create_function("reftest", 0, f)
cur = self.con.cursor()
cur.execute("select reftest()")
def CheckFuncReturnText(self):
cur = self.con.cursor()
cur.execute("select returntext()")
val = cur.fetchone()[0]
self.failUnlessEqual(type(val), str)
self.failUnlessEqual(val, "foo")
def CheckFuncReturnUnicode(self):
cur = self.con.cursor()
cur.execute("select returnunicode()")
val = cur.fetchone()[0]
self.failUnlessEqual(type(val), str)
self.failUnlessEqual(val, "bar")
def CheckFuncReturnInt(self):
cur = self.con.cursor()
cur.execute("select returnint()")
val = cur.fetchone()[0]
self.failUnlessEqual(type(val), int)
self.failUnlessEqual(val, 42)
def CheckFuncReturnFloat(self):
cur = self.con.cursor()
cur.execute("select returnfloat()")
val = cur.fetchone()[0]
self.failUnlessEqual(type(val), float)
if val < 3.139 or val > 3.141:
self.fail("wrong value")
def CheckFuncReturnNull(self):
cur = self.con.cursor()
cur.execute("select returnnull()")
val = cur.fetchone()[0]
self.failUnlessEqual(type(val), type(None))
self.failUnlessEqual(val, None)
def CheckFuncReturnBlob(self):
cur = self.con.cursor()
cur.execute("select returnblob()")
val = cur.fetchone()[0]
self.failUnlessEqual(type(val), bytes)
self.failUnlessEqual(val, b"blob")
def CheckFuncException(self):
cur = self.con.cursor()
try:
cur.execute("select raiseexception()")
cur.fetchone()
self.fail("should have raised OperationalError")
except sqlite.OperationalError as e:
self.failUnlessEqual(e.args[0], 'user-defined function raised exception')
def CheckParamString(self):
cur = self.con.cursor()
cur.execute("select isstring(?)", ("foo",))
val = cur.fetchone()[0]
self.failUnlessEqual(val, 1)
def CheckParamInt(self):
cur = self.con.cursor()
cur.execute("select isint(?)", (42,))
val = cur.fetchone()[0]
self.failUnlessEqual(val, 1)
def CheckParamFloat(self):
cur = self.con.cursor()
cur.execute("select isfloat(?)", (3.14,))
val = cur.fetchone()[0]
self.failUnlessEqual(val, 1)
def CheckParamNone(self):
cur = self.con.cursor()
cur.execute("select isnone(?)", (None,))
val = cur.fetchone()[0]
self.failUnlessEqual(val, 1)
def CheckParamBlob(self):
cur = self.con.cursor()
cur.execute("select isblob(?)", (memoryview(b"blob"),))
val = cur.fetchone()[0]
self.failUnlessEqual(val, 1)
class AggregateTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
cur = self.con.cursor()
cur.execute("""
create table test(
t text,
i integer,
f float,
n,
b blob
)
""")
cur.execute("insert into test(t, i, f, n, b) values (?, ?, ?, ?, ?)",
("foo", 5, 3.14, None, memoryview(b"blob"),))
self.con.create_aggregate("nostep", 1, AggrNoStep)
self.con.create_aggregate("nofinalize", 1, AggrNoFinalize)
self.con.create_aggregate("excInit", 1, AggrExceptionInInit)
self.con.create_aggregate("excStep", 1, AggrExceptionInStep)
self.con.create_aggregate("excFinalize", 1, AggrExceptionInFinalize)
self.con.create_aggregate("checkType", 2, AggrCheckType)
self.con.create_aggregate("mysum", 1, AggrSum)
def tearDown(self):
#self.cur.close()
#self.con.close()
pass
def CheckAggrErrorOnCreate(self):
try:
self.con.create_function("bla", -100, AggrSum)
self.fail("should have raised an OperationalError")
except sqlite.OperationalError:
pass
def CheckAggrNoStep(self):
cur = self.con.cursor()
try:
cur.execute("select nostep(t) from test")
self.fail("should have raised an AttributeError")
except AttributeError as e:
self.failUnlessEqual(e.args[0], "'AggrNoStep' object has no attribute 'step'")
def CheckAggrNoFinalize(self):
cur = self.con.cursor()
try:
cur.execute("select nofinalize(t) from test")
val = cur.fetchone()[0]
self.fail("should have raised an OperationalError")
except sqlite.OperationalError as e:
self.failUnlessEqual(e.args[0], "user-defined aggregate's 'finalize' method raised error")
def CheckAggrExceptionInInit(self):
cur = self.con.cursor()
try:
cur.execute("select excInit(t) from test")
val = cur.fetchone()[0]
self.fail("should have raised an OperationalError")
except sqlite.OperationalError as e:
self.failUnlessEqual(e.args[0], "user-defined aggregate's '__init__' method raised error")
def CheckAggrExceptionInStep(self):
cur = self.con.cursor()
try:
cur.execute("select excStep(t) from test")
val = cur.fetchone()[0]
self.fail("should have raised an OperationalError")
except sqlite.OperationalError as e:
self.failUnlessEqual(e.args[0], "user-defined aggregate's 'step' method raised error")
def CheckAggrExceptionInFinalize(self):
cur = self.con.cursor()
try:
cur.execute("select excFinalize(t) from test")
val = cur.fetchone()[0]
self.fail("should have raised an OperationalError")
except sqlite.OperationalError as e:
self.failUnlessEqual(e.args[0], "user-defined aggregate's 'finalize' method raised error")
def CheckAggrCheckParamStr(self):
cur = self.con.cursor()
cur.execute("select checkType('str', ?)", ("foo",))
val = cur.fetchone()[0]
self.failUnlessEqual(val, 1)
def CheckAggrCheckParamInt(self):
cur = self.con.cursor()
cur.execute("select checkType('int', ?)", (42,))
val = cur.fetchone()[0]
self.failUnlessEqual(val, 1)
def CheckAggrCheckParamFloat(self):
cur = self.con.cursor()
cur.execute("select checkType('float', ?)", (3.14,))
val = cur.fetchone()[0]
self.failUnlessEqual(val, 1)
def CheckAggrCheckParamNone(self):
cur = self.con.cursor()
cur.execute("select checkType('None', ?)", (None,))
val = cur.fetchone()[0]
self.failUnlessEqual(val, 1)
def CheckAggrCheckParamBlob(self):
cur = self.con.cursor()
cur.execute("select checkType('blob', ?)", (memoryview(b"blob"),))
val = cur.fetchone()[0]
self.failUnlessEqual(val, 1)
def CheckAggrCheckAggrSum(self):
cur = self.con.cursor()
cur.execute("delete from test")
cur.executemany("insert into test(i) values (?)", [(10,), (20,), (30,)])
cur.execute("select mysum(i) from test")
val = cur.fetchone()[0]
self.failUnlessEqual(val, 60)
def authorizer_cb(action, arg1, arg2, dbname, source):
if action != sqlite.SQLITE_SELECT:
return sqlite.SQLITE_DENY
if arg2 == 'c2' or arg1 == 't2':
return sqlite.SQLITE_DENY
return sqlite.SQLITE_OK
class AuthorizerTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
self.con.executescript("""
create table t1 (c1, c2);
create table t2 (c1, c2);
insert into t1 (c1, c2) values (1, 2);
insert into t2 (c1, c2) values (4, 5);
""")
# For our security test:
self.con.execute("select c2 from t2")
self.con.set_authorizer(authorizer_cb)
def tearDown(self):
pass
def CheckTableAccess(self):
try:
self.con.execute("select * from t2")
except sqlite.DatabaseError as e:
if not e.args[0].endswith("prohibited"):
self.fail("wrong exception text: %s" % e.args[0])
return
self.fail("should have raised an exception due to missing privileges")
def CheckColumnAccess(self):
try:
self.con.execute("select c2 from t1")
except sqlite.DatabaseError as e:
if not e.args[0].endswith("prohibited"):
self.fail("wrong exception text: %s" % e.args[0])
return
self.fail("should have raised an exception due to missing privileges")
def suite():
function_suite = unittest.makeSuite(FunctionTests, "Check")
aggregate_suite = unittest.makeSuite(AggregateTests, "Check")
authorizer_suite = unittest.makeSuite(AuthorizerTests, "Check")
return unittest.TestSuite((function_suite, aggregate_suite, authorizer_suite))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
|
{
"content_hash": "81f9f9fceb4157d9b00acbe5bc5dd486",
"timestamp": "",
"source": "github",
"line_count": 390,
"max_line_length": 102,
"avg_line_length": 31.407692307692308,
"alnum_prop": 0.5922932484284431,
"repo_name": "MalloyPower/parsing-python",
"id": "c165423727115a8d9e130b7ecdea025c5288303c",
"size": "13341",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.1/Lib/sqlite3/test/userfunctions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import platform
import os
import random
import re
import shlex
import signal
import subprocess
import sys
import tempfile
import elftools
# filetype needs to be imported here because pnacl-driver injects calls to
# filetype.ForceFileType into argument parse actions.
# TODO(dschuff): That's ugly. Find a better way.
import filetype
import pathtools
from driver_env import env
# TODO: import driver_log and change these references from 'foo' to
# 'driver_log.foo', or split driver_log further
from driver_log import Log, DriverOpen, DriverClose, StringifyCommand, DriverExit, FixArch
from driver_temps import TempFiles
from shelltools import shell
def ParseError(s, leftpos, rightpos, msg):
Log.Error("Parse Error: %s", msg)
Log.Error(' ' + s)
Log.Error(' ' + (' '*leftpos) + ('^'*(rightpos - leftpos + 1)))
DriverExit(1)
# Run a command with extra environment settings
def RunWithEnv(cmd, **kwargs):
env.push()
env.setmany(**kwargs)
ret = Run(cmd)
env.pop()
return ret
def SetExecutableMode(path):
if os.name == "posix":
realpath = pathtools.tosys(path)
# os.umask gets and sets at the same time.
# There's no way to get it without setting it.
umask = os.umask(0)
os.umask(umask)
os.chmod(realpath, 0755 & ~umask)
def FilterOutArchArgs(args):
while '-arch' in args:
i = args.index('-arch')
args = args[:i] + args[i+2:]
return args
# Parse and validate the target triple and return the architecture.
# We don't attempt to recognize all possible targets here, just the ones we
# support.
def ParseTriple(triple):
tokens = triple.split('-')
arch = tokens[0]
if arch != 'le32':
arch = FixArch(arch)
os = tokens[1]
# The machine/vendor field could be present or not.
if os != 'nacl' and len(tokens) >= 3:
os = tokens[2]
# Just check that the os is nacl.
if os == 'nacl':
return arch
Log.Fatal('machine/os ' + '-'.join(tokens[1:]) + ' not supported.')
def GetOSName():
if sys.platform == 'darwin':
os_name = 'mac'
elif sys.platform == 'nacl':
os_name = 'nacl'
elif sys.platform.startswith('linux'):
os_name = 'linux'
elif sys.platform in ('cygwin', 'win32'):
os_name = 'win'
else:
Log.Fatal('Machine: %s not supported.' % sys.platform)
return os_name
def GetArchNameShort():
machine = platform.machine().lower()
if machine.startswith('arm'):
return 'arm'
elif machine.startswith('mips'):
return 'mips'
elif (machine.startswith('x86')
or machine in ('amd32', 'i386', 'i686', 'ia32', '32', 'amd64', '64')):
return 'x86'
Log.Fatal('Architecture: %s not supported.' % machine)
return 'unknown'
def RunDriver(module_name, args, suppress_inherited_arch_args=False):
"""
RunDriver() is used to invoke "driver" tools, e.g.
those prefixed with "pnacl-"
It automatically appends some additional flags to the invocation
which were inherited from the current invocation.
Those flags were preserved by ParseArgs
"""
if isinstance(args, str):
args = shell.split(env.eval(args))
script = env.eval('${DRIVER_BIN}/%s' % module_name)
script = shell.unescape(script)
inherited_driver_args = env.get('INHERITED_DRIVER_ARGS')
if suppress_inherited_arch_args:
inherited_driver_args = FilterOutArchArgs(inherited_driver_args)
script = pathtools.tosys(script)
cmd = [script] + args + inherited_driver_args
Log.Info('Driver invocation: %s', repr(cmd))
module = __import__(module_name)
# Save the environment, reset the environment, run
# the driver module, and then restore the environment.
env.push()
env.reset()
DriverMain(module, cmd)
env.pop()
def memoize(f):
""" Memoize a function with no arguments """
saved = {}
def newf():
if len(saved) == 0:
saved[None] = f()
return saved[None]
newf.__name__ = f.__name__
return newf
@env.register
@memoize
def GetBuildOS():
name = platform.system().lower()
if name.startswith('cygwin_nt') or 'windows' in name:
name = 'windows'
if name not in ('linux', 'nacl', 'darwin', 'windows'):
Log.Fatal("Unsupported platform '%s'", name)
return name
@env.register
@memoize
def GetBuildArch():
m = platform.machine()
# Windows is special
if m == 'x86':
m = 'i686'
if m not in ('i386', 'i686', 'x86_64'):
Log.Fatal("Unsupported architecture '%s'", m)
return m
# Crawl backwards, starting from the directory containing this script,
# until we find a directory satisfying a filter function.
def FindBaseDir(function):
Depth = 0
cur = env.getone('DRIVER_BIN')
while not function(cur) and Depth < 16:
cur = pathtools.dirname(cur)
Depth += 1
if function(cur):
return cur
return None
@env.register
@memoize
def FindBaseNaCl():
""" Find native_client/ directory """
dir = FindBaseDir(lambda cur: pathtools.basename(cur) == 'native_client')
if dir is None:
Log.Fatal("Unable to find 'native_client' directory")
return shell.escape(dir)
@env.register
@memoize
def FindBaseToolchain():
""" Find toolchain/OS_ARCH directory """
base_dir = FindBaseDir(lambda cur: pathtools.basename(cur) == 'toolchain')
if base_dir is None:
Log.Fatal("Unable to find 'toolchain' directory")
toolchain_dir = os.path.join(
base_dir,
'%s_%s' % (GetOSName(), GetArchNameShort())
)
return shell.escape(toolchain_dir)
@env.register
@memoize
def FindBasePNaCl():
""" Find the base directory of the PNaCl toolchain """
# The <base> directory is one level up from the <base>/bin:
bindir = env.getone('DRIVER_BIN')
basedir = pathtools.dirname(bindir)
return shell.escape(basedir)
def AddHostBinarySearchPath(prefix):
""" Add a path to the list searched for host binaries. """
prefix = pathtools.normalize(prefix)
if pathtools.isdir(prefix) and not prefix.endswith('/'):
prefix += '/'
env.append('BPREFIXES', prefix)
@env.register
def FindBaseHost(tool):
""" Find the base directory for host binaries (i.e. llvm/binutils) """
if env.has('BPREFIXES'):
for prefix in env.get('BPREFIXES'):
if os.path.exists(pathtools.join(prefix, 'bin',
tool + env.getone('EXEC_EXT'))):
return prefix
base_pnacl = FindBasePNaCl()
if not pathtools.exists(pathtools.join(base_pnacl, 'bin',
tool + env.getone('EXEC_EXT'))):
Log.Fatal('Could not find PNaCl host directory for ' + tool)
return base_pnacl
def ReadConfig():
# Mock out ReadConfig if running unittests. Settings are applied directly
# by DriverTestEnv rather than reading this configuration file.
if env.has('PNACL_RUNNING_UNITTESTS'):
return
driver_bin = env.getone('DRIVER_BIN')
driver_conf = pathtools.join(driver_bin, 'driver.conf')
fp = DriverOpen(driver_conf, 'r')
linecount = 0
for line in fp:
linecount += 1
line = line.strip()
if line == '' or line.startswith('#'):
continue
sep = line.find('=')
if sep < 0:
Log.Fatal("%s: Parse error, missing '=' on line %d",
pathtools.touser(driver_conf), linecount)
keyname = line[:sep].strip()
value = line[sep+1:].strip()
env.setraw(keyname, value)
DriverClose(fp)
@env.register
def AddPrefix(prefix, varname):
values = env.get(varname)
return ' '.join([prefix + shell.escape(v) for v in values ])
######################################################################
#
# Argument Parser
#
######################################################################
DriverArgPatterns = [
( '--pnacl-driver-verbose', "env.set('LOG_VERBOSE', '1')"),
( ('-arch', '(.+)'), "SetArch($0)"),
( '--pnacl-sb', "env.set('SANDBOXED', '1')"),
( '--pnacl-use-emulator', "env.set('USE_EMULATOR', '1')"),
( '--dry-run', "env.set('DRY_RUN', '1')"),
( '--pnacl-arm-bias', "env.set('BIAS', 'ARM')"),
( '--pnacl-mips-bias', "env.set('BIAS', 'MIPS32')"),
( '--pnacl-i686-bias', "env.set('BIAS', 'X8632')"),
( '--pnacl-x86_64-bias', "env.set('BIAS', 'X8664')"),
( '--pnacl-bias=(.+)', "env.set('BIAS', FixArch($0))"),
( '-save-temps', "env.set('SAVE_TEMPS', '1')"),
( '-no-save-temps', "env.set('SAVE_TEMPS', '0')"),
( ('-B', '(.*)'), AddHostBinarySearchPath),
]
DriverArgPatternsNotInherited = [
( '--pnacl-driver-set-([^=]+)=(.*)', "env.set($0, $1)"),
( '--pnacl-driver-append-([^=]+)=(.*)', "env.append($0, $1)"),
]
def ShouldExpandCommandFile(arg):
""" We may be given files with commandline arguments.
Read in the arguments so that they can be handled as usual. """
if arg.startswith('@'):
possible_file = pathtools.normalize(arg[1:])
return pathtools.isfile(possible_file)
else:
return False
def DoExpandCommandFile(argv, i):
arg = argv[i]
fd = DriverOpen(pathtools.normalize(arg[1:]), 'r')
more_args = []
# Use shlex here to process the response file contents.
# This ensures that single and double quoted args are
# handled correctly. Since this file is very likely
# to contain paths with windows path seperators we can't
# use the normal shlex.parse() since we need to disable
# disable '\' (the default escape char).
for line in fd:
lex = shlex.shlex(line, posix=True)
lex.escape = ''
lex.whitespace_split = True
more_args += list(lex)
fd.close()
argv = argv[:i] + more_args + argv[i+1:]
return argv
def ParseArgs(argv,
patternlist,
driver_patternlist=DriverArgPatterns,
driver_patternlist_not_inherited=DriverArgPatternsNotInherited):
"""Parse argv using the patterns in patternlist
Also apply the built-in DriverArgPatterns unless instructed otherwise.
This function must be called by all (real) drivers.
"""
if driver_patternlist:
driver_args, argv = ParseArgsBase(argv, driver_patternlist)
# TODO(robertm): think about a less obscure mechanism to
# replace the inherited args feature
assert not env.get('INHERITED_DRIVER_ARGS')
env.append('INHERITED_DRIVER_ARGS', *driver_args)
_, argv = ParseArgsBase(argv, driver_patternlist_not_inherited)
_, unmatched = ParseArgsBase(argv, patternlist)
if unmatched:
for u in unmatched:
Log.Error('Unrecognized argument: ' + u)
Log.Fatal('unknown arguments')
def ParseArgsBase(argv, patternlist):
""" Parse argv using the patterns in patternlist
Returns: (matched, unmatched)
"""
matched = []
unmatched = []
i = 0
while i < len(argv):
if ShouldExpandCommandFile(argv[i]):
argv = DoExpandCommandFile(argv, i)
if i >= len(argv):
break
num_matched, action, groups = MatchOne(argv, i, patternlist)
if num_matched == 0:
unmatched.append(argv[i])
i += 1
continue
matched += argv[i:i+num_matched]
if isinstance(action, str):
# Perform $N substitution
for g in xrange(0, len(groups)):
action = action.replace('$%d' % g, 'groups[%d]' % g)
try:
if isinstance(action, str):
# NOTE: this is essentially an eval for python expressions
# which does rely on the current environment for unbound vars
# Log.Info('about to exec [%s]', str(action))
exec(action)
else:
action(*groups)
except Exception, err:
Log.Fatal('ParseArgs action [%s] failed with: %s', action, err)
i += num_matched
return (matched, unmatched)
def MatchOne(argv, i, patternlist):
"""Find a pattern which matches argv starting at position i"""
for (regex, action) in patternlist:
if isinstance(regex, str):
regex = [regex]
j = 0
matches = []
for r in regex:
if i+j < len(argv):
match = re.compile('^'+r+'$').match(argv[i+j])
else:
match = None
matches.append(match)
j += 1
if None in matches:
continue
groups = [ list(m.groups()) for m in matches ]
groups = reduce(lambda x,y: x+y, groups, [])
return (len(regex), action, groups)
return (0, '', [])
def UnrecognizedOption(*args):
Log.Fatal("Unrecognized option: " + ' '.join(args) + "\n" +
"Use '--help' for more information.")
######################################################################
#
# File Naming System (Temp files & Output files)
#
######################################################################
def DefaultOutputName(filename, outtype):
# For pre-processor mode, just print to stdout.
if outtype in ('pp'): return '-'
base = pathtools.basename(filename)
base = RemoveExtension(base)
if outtype in ('po'): return base + '.o'
assert(outtype in filetype.ExtensionMap.values())
assert(not filetype.IsSourceType(outtype))
return base + '.' + outtype
def DefaultPCHOutputName(filename):
# Clang currently uses the GCC '.gch' by default for precompiled headers,
# though their documentation example uses '-o foo.h.pch' as the example.
return filename + '.gch'
def RemoveExtension(filename):
if filename.endswith('.opt.bc'):
return filename[0:-len('.opt.bc')]
name, ext = pathtools.splitext(filename)
return name
def PathSplit(f):
paths = []
cur = f
while True:
cur, piece = pathtools.split(cur)
if piece == '':
break
paths.append(piece)
paths.reverse()
return paths
def CheckPathLength(filename, exit_on_failure=True):
'''Check that the length of the path is short enough for Windows.
On Windows, MAX_PATH is ~260 and applies to absolute paths, and to relative
paths and the absolute paths they expand to (except for specific uses of
some APIs; see link below). Most applications don't bother to support long
paths properly (including LLVM, GNU binutils, and ninja). If a path is too
long, ERROR_PATH_NOT_FOUND is returned, which isn't very useful or clear for
users. In addition the Chrome build has deep directory hierarchies with long
names.
This function checks that the path is valid, so we can throw meaningful
errors.
http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
'''
if not IsWindowsPython() and not env.has('PNACL_RUNNING_UNITTESTS'):
return True
# First check the name as-is (it's usually a relative path)
if len(filename) > 255:
if exit_on_failure:
Log.Fatal('Path name %s is too long (%d characters)' %
(filename, len(filename)))
return False
if os.path.isabs(filename):
return True
# Don't assume that the underlying tools or windows APIs will normalize
# the path before using it. Conservatively count the length of CWD + filename
appended_name = os.path.join(os.getcwd(), filename)
if len(appended_name) > 255:
if exit_on_failure:
Log.Fatal('Path name %s (expanded from %s) is too long (%d characters)' %
(appended_name, filename, len(appended_name)))
return False
return True
# Generate a unique identifier for each input file.
# Start with the basename, and if that is not unique enough,
# add parent directories. Rinse, repeat.
class TempNameGen(object):
def __init__(self, inputs, output):
self.TempBase = output + '---linked'
self.OutputDir = pathtools.dirname(output)
# TODO(pdox): Figure out if there's a less confusing way
# to simplify the intermediate filename in this case.
#if len(inputs) == 1:
# # There's only one input file, don't bother adding the source name.
# TempMap[inputs[0]] = output + '---'
# return
# Build the initial mapping
self.TempMap = dict()
for f in inputs:
if f.startswith('-'):
continue
path = PathSplit(f)
self.TempMap[f] = [1, path]
while True:
# Find conflicts
ConflictMap = dict()
Conflicts = set()
for (f, [n, path]) in self.TempMap.iteritems():
candidate = output + '---' + '_'.join(path[-n:]) + '---'
if candidate in ConflictMap:
Conflicts.add(ConflictMap[candidate])
Conflicts.add(f)
else:
ConflictMap[candidate] = f
if len(Conflicts) == 0:
break
# Resolve conflicts
for f in Conflicts:
n = self.TempMap[f][0]
if n+1 > len(self.TempMap[f][1]):
Log.Fatal('Unable to resolve naming conflicts')
self.TempMap[f][0] = n+1
# Clean up the map
NewMap = dict()
for (f, [n, path]) in self.TempMap.iteritems():
candidate = output + '---' + '_'.join(path[-n:]) + '---'
NewMap[f] = candidate
self.TempMap = NewMap
return
def ValidatePathLength(self, temp, imtype):
temp = pathtools.normpath(temp) if temp else temp
# If the temp name is too long, just pick a random one instead.
if not CheckPathLength(temp, exit_on_failure=False):
# imtype is sometimes just an extension, and sometimes a compound
# extension (e.g. pre_opt.pexe). To keep name length shorter,
# only take the last extension
if '.' in imtype:
imtype = imtype[imtype.rfind('.') + 1:]
temp = pathtools.join(
self.OutputDir,
str(random.randrange(100000, 1000000)) + '.' + imtype)
CheckPathLength(temp)
return temp
def TempNameForOutput(self, imtype):
temp = self.ValidatePathLength(self.TempBase + '.' + imtype, imtype)
TempFiles.add(temp)
return temp
def TempNameForInput(self, input, imtype):
# If input is already a temporary name, just change the extension
if input.startswith(self.TempBase):
temp = self.TempBase + '.' + imtype
else:
# Source file
temp = self.TempMap[input] + '.' + imtype
temp = self.ValidatePathLength(temp, imtype)
TempFiles.add(temp)
return temp
# (Invoked from loader.py)
# If the driver is waiting on a background process in RunWithLog()
# and the user Ctrl-C's or kill's the driver, it may leave
# the child process (such as llc) running. To prevent this,
# the code below sets up a signal handler which issues a kill to
# the currently running child processes.
CleanupProcesses = []
def SetupSignalHandlers():
global CleanupProcesses
def signal_handler(unused_signum, unused_frame):
for p in CleanupProcesses:
try:
p.kill()
except BaseException:
pass
os.kill(os.getpid(), signal.SIGKILL)
return 0
if os.name == 'posix' and sys.platform != 'nacl':
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGHUP, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
def ArgsTooLongForWindows(args):
""" Detect when a command line might be too long for Windows. """
if not IsWindowsPython():
return False
else:
return len(' '.join(args)) > 8191
def ConvertArgsToFile(args):
fd, outfile = tempfile.mkstemp()
# Remember to delete this file afterwards.
TempFiles.add(outfile)
cmd = args[0]
other_args = args[1:]
os.write(fd, ' '.join(other_args))
os.close(fd)
return [cmd, '@' + outfile]
# Note:
# The redirect_stdout and redirect_stderr is only used a handful of times
def Run(args,
errexit=True,
redirect_stdout=None,
redirect_stderr=None):
""" Run: Run a command.
Returns: return_code, stdout, stderr
Run() is used to invoke "other" tools, e.g.
those NOT prefixed with "pnacl-"
stdout and stderr only contain meaningful data if
redirect_{stdout,stderr} == subprocess.PIPE
Run will terminate the program upon failure unless errexit == False
TODO(robertm): errexit == True has not been tested and needs more work
redirect_stdout and redirect_stderr are passed straight
to subprocess.Popen
"""
result_stdout = None
result_stderr = None
if isinstance(args, str):
args = shell.split(env.eval(args))
args = [pathtools.tosys(args[0])] + args[1:]
Log.Info('Running: ' + StringifyCommand(args))
if env.getbool('DRY_RUN'):
if redirect_stderr or redirect_stdout:
# TODO(pdox): Prevent this from happening, so that
# dry-run is more useful.
Log.Fatal("Unhandled dry-run case.")
return 0, None, None
try:
# If we have too long of a cmdline on windows, running it would fail.
# Attempt to use a file with the command line options instead in that case.
if ArgsTooLongForWindows(args):
actual_args = ConvertArgsToFile(args)
Log.Info('Wrote long commandline to file for Windows: ' +
StringifyCommand(actual_args))
else:
actual_args = args
p = subprocess.Popen(actual_args,
stdout=redirect_stdout,
stderr=redirect_stderr)
result_stdout, result_stderr = p.communicate()
except Exception, e:
msg = '%s\nCommand was: %s' % (str(e), StringifyCommand(args))
print(msg)
DriverExit(1)
Log.Info('Return Code: ' + str(p.returncode))
if errexit and p.returncode != 0:
if redirect_stdout == subprocess.PIPE:
Log.Error('--------------stdout: begin')
Log.Error(result_stdout)
Log.Error('--------------stdout: end')
if redirect_stderr == subprocess.PIPE:
Log.Error('--------------stderr: begin')
Log.Error(result_stderr)
Log.Error('--------------stderr: end')
DriverExit(p.returncode)
return p.returncode, result_stdout, result_stderr
def IsWindowsPython():
return 'windows' in platform.system().lower()
def SetupCygwinLibs():
bindir = env.getone('DRIVER_BIN')
# Prepend the directory containing cygwin1.dll etc. to the PATH to ensure we
# get the right one.
os.environ['PATH'] = os.pathsep.join(
[pathtools.tosys(bindir)] + os.environ['PATH'].split(os.pathsep))
def HelpNotAvailable():
return 'Help text not available'
def DriverMain(module, argv):
# TODO(robertm): this is ugly - try to get rid of this
if '--pnacl-driver-verbose' in argv:
Log.IncreaseVerbosity()
env.set('LOG_VERBOSE', '1')
# driver_path has the form: /foo/bar/pnacl_root/newlib/bin/pnacl-clang
driver_path = pathtools.abspath(pathtools.normalize(argv[0]))
driver_bin = pathtools.dirname(driver_path)
script_name = pathtools.basename(driver_path)
env.set('SCRIPT_NAME', script_name)
env.set('DRIVER_PATH', driver_path)
env.set('DRIVER_BIN', driver_bin)
Log.SetScriptName(script_name)
ReadConfig()
if IsWindowsPython():
SetupCygwinLibs()
# skip tool name
argv = argv[1:]
# Handle help info
if ('--help' in argv or
'-h' in argv or
'-help' in argv or
'--help-full' in argv):
help_func = getattr(module, 'get_help', None)
if not help_func:
Log.Fatal(HelpNotAvailable())
helpstr = help_func(argv)
print(helpstr)
return 0
return module.main(argv)
def MaybeStripNonSFISuffix(s):
"""Removes _NONSFI suffix if possible, otherwise |s| as is."""
return s[:-len('_NONSFI')] if s.endswith('_NONSFI') else s
def SetArch(arch):
arch = FixArch(arch)
env.set('ARCH', arch)
base_arch = MaybeStripNonSFISuffix(arch)
env.set('BASE_ARCH', base_arch)
env.setbool('NONSFI_NACL', arch != base_arch)
def GetArch(required = False):
arch = env.getone('ARCH')
if arch == '':
arch = None
if required and not arch:
Log.Fatal('Missing -arch!')
return arch
# Read an ELF file or an archive file to determine the machine type. If ARCH is
# already set, make sure the file has the same architecture. If ARCH is not
# set, set the ARCH to the file's architecture.
# Note that the SFI and NONSFI shares the same file format, so they will be
# treated as same.
#
# Returns True if the file matches ARCH.
#
# Returns False if the file doesn't match ARCH. This only happens when
# must_match is False. If must_match is True, then a fatal error is generated
# instead.
def ArchMerge(filename, must_match):
file_type = filetype.FileType(filename)
if file_type in ('o','so'):
elfheader = elftools.GetELFHeader(filename)
if not elfheader:
Log.Fatal("%s: Cannot read ELF header", filename)
new_arch = elfheader.arch
elif filetype.IsNativeArchive(filename):
new_arch = file_type[len('archive-'):]
else:
Log.Fatal('%s: Unexpected file type in ArchMerge', filename)
existing_arch = GetArch()
if not existing_arch:
SetArch(new_arch)
return True
# The _NONSFI binary format is as same as the SFI's.
existing_arch = MaybeStripNonSFISuffix(existing_arch)
if new_arch != existing_arch:
if must_match:
msg = "%s: Incompatible object file (%s != %s)"
logfunc = Log.Fatal
else:
msg = "%s: Skipping incompatible object file (%s != %s)"
logfunc = Log.Warning
logfunc(msg, filename, new_arch, existing_arch)
return False
# existing_arch and new_arch == existing_arch
return True
def CheckTranslatorPrerequisites():
""" Assert that the scons artifacts for running the sandboxed translator
exist: sel_ldr, and the IRT blob. """
if env.getbool('DRY_RUN'):
return
reqs = ['SEL_LDR', 'IRT_BLOB']
# Linux also requires the nacl bootstrap helper.
if GetBuildOS() == 'linux':
reqs.append('BOOTSTRAP_LDR')
for var in reqs:
needed_file = env.getone(var)
if not pathtools.exists(needed_file):
Log.Fatal('Could not find %s [%s]', var, needed_file)
def SelLdrCommand():
if GetBuildOS() == 'linux':
cmd = '${BOOTSTRAP_LDR} ${SEL_LDR} --reserved_at_zero=0x%s' % ('X' * 16)
else:
cmd = '${SEL_LDR}'
return '${SEL_LDR_PREFIX} %s ${SEL_LDR_FLAGS}' % cmd
def AddListToEnv(command, env_var_prefix, string_list):
for index, string in enumerate(string_list):
command.append('-E')
command.append('%s_%d=%s' % (env_var_prefix, index, string))
class DriverChain(object):
""" The DriverChain class takes one or more input files,
an output file, and a sequence of steps. It executes
those steps, using intermediate files in between,
to generate the final outpu.
"""
def __init__(self, input, output, namegen):
self.input = input
self.output = pathtools.normpath(output) if output else output
self.steps = []
self.namegen = namegen
# "input" can be a list of files or a single file.
# If we're compiling for a single file, then we use
# TempNameForInput. If there are multiple files
# (e.g. linking), then we use TempNameForOutput.
if isinstance(self.input, str):
self.use_names_for_input = True
self.input = pathtools.normpath(self.input) if self.input else self.input
CheckPathLength(self.input)
else:
self.use_names_for_input = False
self.input = [pathtools.normpath(p) if p else p for p in self.input]
for path in self.input:
CheckPathLength(path)
CheckPathLength(output)
def add(self, callback, output_type, **extra):
step = (callback, output_type, extra)
self.steps.append(step)
def run(self):
step_input = self.input
for (i, (callback, output_type, extra)) in enumerate(self.steps):
if i == len(self.steps)-1:
# Last step
step_output = self.output
else:
# Intermediate step
if self.use_names_for_input:
step_output = self.namegen.TempNameForInput(self.input, output_type)
else:
step_output = self.namegen.TempNameForOutput(output_type)
callback(step_input, step_output, **extra)
step_input = step_output
|
{
"content_hash": "7c06e9e72b456af6bdc611c9117a769d",
"timestamp": "",
"source": "github",
"line_count": 884,
"max_line_length": 90,
"avg_line_length": 31.005656108597286,
"alnum_prop": 0.6424896931664782,
"repo_name": "endlessm/chromium-browser",
"id": "d210a021f6a7d79bc30bcd4be466eee9c831ff56",
"size": "27599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "native_client/pnacl/driver/driver_tools.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import datetime
import logging
import os
import sqlparse
from redash.query_runner import (
NotSupported,
register,
BaseSQLQueryRunner,
TYPE_STRING,
TYPE_BOOLEAN,
TYPE_DATE,
TYPE_DATETIME,
TYPE_INTEGER,
TYPE_FLOAT,
)
from redash.settings import cast_int_or_default
from redash.utils import json_dumps, json_loads
from redash.query_runner import split_sql_statements
from redash import __version__, settings, statsd_client
try:
import pyodbc
enabled = True
except ImportError:
enabled = False
TYPES_MAP = {
str: TYPE_STRING,
bool: TYPE_BOOLEAN,
datetime.date: TYPE_DATE,
datetime.datetime: TYPE_DATETIME,
int: TYPE_INTEGER,
float: TYPE_FLOAT,
}
ROW_LIMIT = cast_int_or_default(os.environ.get("DATABRICKS_ROW_LIMIT"), 20000)
logger = logging.getLogger(__name__)
def _build_odbc_connection_string(**kwargs):
return ";".join([f"{k}={v}" for k, v in kwargs.items()])
class Databricks(BaseSQLQueryRunner):
noop_query = "SELECT 1"
should_annotate_query = False
@classmethod
def type(cls):
return "databricks"
@classmethod
def enabled(cls):
return enabled
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"host": {"type": "string"},
"http_path": {"type": "string", "title": "HTTP Path"},
# We're using `http_password` here for legacy reasons
"http_password": {"type": "string", "title": "Access Token"},
},
"order": ["host", "http_path", "http_password"],
"secret": ["http_password"],
"required": ["host", "http_path", "http_password"],
}
def _get_cursor(self):
user_agent = "Redash/{} (Databricks)".format(__version__.split("-")[0])
connection_string = _build_odbc_connection_string(
Driver="Simba",
UID="token",
PORT="443",
SSL="1",
THRIFTTRANSPORT="2",
SPARKSERVERTYPE="3",
AUTHMECH=3,
# Use the query as is without rewriting:
UseNativeQuery="1",
# Automatically reconnect to the cluster if an error occurs
AutoReconnect="1",
# Minimum interval between consecutive polls for query execution status (1ms)
AsyncExecPollInterval="1",
UserAgentEntry=user_agent,
HOST=self.configuration["host"],
PWD=self.configuration["http_password"],
HTTPPath=self.configuration["http_path"],
)
connection = pyodbc.connect(connection_string, autocommit=True)
return connection.cursor()
def run_query(self, query, user):
try:
cursor = self._get_cursor()
statements = split_sql_statements(query)
for stmt in statements:
cursor.execute(stmt)
if cursor.description is not None:
result_set = cursor.fetchmany(ROW_LIMIT)
columns = self.fetch_columns(
[
(i[0], TYPES_MAP.get(i[1], TYPE_STRING))
for i in cursor.description
]
)
rows = [
dict(zip((column["name"] for column in columns), row))
for row in result_set
]
data = {"columns": columns, "rows": rows}
if (
len(result_set) >= ROW_LIMIT
and cursor.fetchone() is not None
):
logger.warning("Truncated result set.")
statsd_client.incr("redash.query_runner.databricks.truncated")
data["truncated"] = True
json_data = json_dumps(data)
error = None
else:
error = None
json_data = json_dumps(
{
"columns": [{"name": "result", "type": TYPE_STRING}],
"rows": [{"result": "No data was returned."}],
}
)
cursor.close()
except pyodbc.Error as e:
if len(e.args) > 1:
error = str(e.args[1])
else:
error = str(e)
json_data = None
return json_data, error
def get_schema(self):
raise NotSupported()
def get_databases(self):
query = "SHOW DATABASES"
results, error = self.run_query(query, None)
if error is not None:
raise Exception("Failed getting schema.")
results = json_loads(results)
first_column_name = results["columns"][0]["name"]
return [row[first_column_name] for row in results["rows"]]
def get_database_tables(self, database_name):
schema = {}
cursor = self._get_cursor()
cursor.tables(schema=database_name)
for table in cursor:
table_name = "{}.{}".format(table[1], table[2])
if table_name not in schema:
schema[table_name] = {"name": table_name, "columns": []}
return list(schema.values())
def get_database_tables_with_columns(self, database_name):
schema = {}
cursor = self._get_cursor()
# load tables first, otherwise tables without columns are not showed
cursor.tables(schema=database_name)
for table in cursor:
table_name = "{}.{}".format(table[1], table[2])
if table_name not in schema:
schema[table_name] = {"name": table_name, "columns": []}
cursor.columns(schema=database_name)
for column in cursor:
table_name = "{}.{}".format(column[1], column[2])
if table_name not in schema:
schema[table_name] = {"name": table_name, "columns": []}
schema[table_name]["columns"].append({"name": column[3], "type": column[5]})
return list(schema.values())
def get_table_columns(self, database_name, table_name):
cursor = self._get_cursor()
cursor.columns(schema=database_name, table=table_name)
return [{"name": column[3], "type": column[5]} for column in cursor]
register(Databricks)
|
{
"content_hash": "d1e1bc317f9d04c5e94aa38ade5feff7",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 89,
"avg_line_length": 30.425837320574164,
"alnum_prop": 0.5381349268752949,
"repo_name": "getredash/redash",
"id": "5728968869dd4d59c9c8055ce7cef13436450345",
"size": "6359",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "redash/query_runner/databricks.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "2135"
},
{
"name": "Dockerfile",
"bytes": "3500"
},
{
"name": "HTML",
"bytes": "32865"
},
{
"name": "JavaScript",
"bytes": "990852"
},
{
"name": "Less",
"bytes": "196598"
},
{
"name": "Makefile",
"bytes": "1381"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1238254"
},
{
"name": "Shell",
"bytes": "4734"
},
{
"name": "TypeScript",
"bytes": "521588"
}
],
"symlink_target": ""
}
|
"""
This module contains easy to use functions and classes for creating a secure
chat.
"""
__author__ = 'Elias Frantar, Gary Ye'
__version__ = '1.0'
|
{
"content_hash": "8a29b4f26bc120cfbff41b74f5e2a6ea",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 76,
"avg_line_length": 21.428571428571427,
"alnum_prop": 0.6733333333333333,
"repo_name": "gye-tgm/spychat",
"id": "f456f0345c87b81f681f74cde39a6caee7f88d9d",
"size": "150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spychat/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8905"
},
{
"name": "TeX",
"bytes": "23783"
}
],
"symlink_target": ""
}
|
"""Syntactic manipulation of trees."""
import logging
import copy
import os
import warnings
import networkx as nx
from tulip.spec.ast import nodes
from tulip.spec import parser
# inline:
# import tulip.graphics as _graphics
__all__ = [
'Tree', 'ast_to_labeled_graph',
'check_for_undefined_identifiers',
'sub_values', 'sub_constants',
'sub_bool_with_subtree', 'pair_node_to_var',
'infer_constants', 'check_var_name_conflict',
'collect_primed_vars']
logger = logging.getLogger(__name__)
class Tree(nx.MultiDiGraph):
"""Abstract syntax tree as a graph data structure.
Use this as a scaffold for syntactic manipulation.
It makes traversals and graph rewriting easier,
so it is preferred to working directly with the
recursive AST classes.
The attribute C{self.root} is the tree's root L{Node}.
"""
def __init__(self):
self.root = None
super(Tree, self).__init__()
def __repr__(self):
return repr(self.root)
def __str__(self):
# need to override networkx.DiGraph.__str__
return ('Abstract syntax tree as graph with edges:\n' +
str([(str(u), str(v))
for u, v, k in self.edges(keys=True)]))
@property
def variables(self):
"""Return the set of variables in C{tree}.
@rtype: C{set} of L{Var}
"""
return {u for u in self if u.type == 'var'}
@classmethod
def from_recursive_ast(cls, u):
tree = cls()
tree.root = u
tree._recurse(u)
return tree
def _recurse(self, u):
if hasattr(u, 'value'):
# necessary this terminal is the root
self.add_node(u)
elif hasattr(u, 'operator'):
for i, v in enumerate(u.operands):
self.add_edge(u, v, key=i)
self._recurse(v)
else:
raise Exception('unknown node type: {u}'.format(u=u))
return u
def to_recursive_ast(self, u=None):
if u is None:
u = self.root
w = copy.copy(u)
if not self.succ.get(u):
assert hasattr(u, 'value')
else:
w.operands = [self.to_recursive_ast(v)
for _, v, _ in sorted(
self.edges(u, keys=True),
key=lambda x: x[2])]
assert len(u.operands) == len(w.operands)
return w
def add_subtree(self, leaf, tree):
"""Add the C{tree} at node C{nd}.
@type leaf: L{FOL.Node}
@param tree: to be added, w/o copying AST nodes.
@type tree: L{Tree}
"""
assert not self.succ.get(leaf)
for u, v, k in tree.edges(keys=True):
self.add_edge(u, v, key=k)
# replace old leaf with subtree root
ine = self.in_edges(leaf, keys=True)
if ine:
assert len(ine) == 1
((parent, _, k), ) = ine
self.add_edge(parent, tree.root, key=k)
else:
self.root = tree.root
self.remove_node(leaf)
def _to_dot(self, detailed=False):
"""Create GraphViz dot string from given AST.
@type ast: L{ASTNode}
@rtype: str
"""
g = ast_to_labeled_graph(self, detailed)
import tulip.graphics as _graphics
return _graphics.networkx_to_graphviz(g)
def write(self, filename, detailed=False):
"""Layout AST and save result in PDF file."""
fname, fext = os.path.splitext(filename)
fext = fext[1:] # drop .
p = self._to_dot(detailed)
p.graph_attr['ordering'] = 'out'
p.render(
filename=filename,
format=fext)
def ast_to_labeled_graph(tree, detailed):
"""Convert AST to C{NetworkX.DiGraph} for graphics.
@param ast: Abstract syntax tree
@rtype: C{networkx.DiGraph}
"""
g = nx.DiGraph()
for u in tree:
if hasattr(u, 'operator'):
label = u.operator
elif hasattr(u, 'value'):
label = u.value
else:
raise TypeError(
'AST node must be Operator or Terminal, '
'got instead: {u}'.format(u=u) +
', of type: {t}'.format(t=type(u)))
# show both repr and AST node class in each vertex
if detailed:
label += '\n' + str(type(u).__name__)
g.add_node(id(u), label=label)
for u, v, k in tree.edges(keys=True):
g.add_edge(id(u), id(v), label=k)
return g
def check_for_undefined_identifiers(tree, domains):
"""Check that types in C{tree} are incompatible with C{domains}.
Raise a C{ValueError} if C{tree} either:
- contains a variable missing from C{domains}
- binary operator between variable and
invalid value for that variable.
@type tree: L{Tree}
@param domains: variable definitions:
C{{'varname': domain}}
See L{GRSpec} for more details of available domain types.
@type domains: C{dict}
"""
for u in tree:
if u.type == 'var' and u.value not in domains:
var = u.value
raise ValueError(
('Undefined variable "{var}" missing from '
'symbol table:\n\t{doms}\n'
'in subformula:\n\t{f}').format(
var=var, f=tree.to_recursive_ast(), doms=domains))
if u.type not in {'str', 'num'}:
continue
# is a Const or Num
var, c = pair_node_to_var(tree, u)
if c.type == 'str':
dom = domains[var]
if not isinstance(dom, list):
raise Exception(
('String constant "{c}" assigned to non-string '
'variable "{var}" with domain:\n\t{dom}').format(
var=var, c=c, dom=dom))
if c.value not in domains[var.value]:
raise ValueError(
('String constant "{c}" is not in the domain '
'of variable "{var}"').format(var=var, c=c))
if c.type == 'num':
dom = domains[var]
if not isinstance(dom, tuple):
raise Exception(
('Number: {c}, assigned to non-integer ' + str(c) +
'variable "{var}" with domain:\n\t{dom}').format(
var=var, c=c, dom=dom))
if not dom[0] <= c.value <= dom[1]:
raise Exception(
('Integer variable "{var}", is assigned the '
'value: {c}, that is out of its domain:'
'{dom[0]} ... {dom[1]}').format(
var=var, c=c, dom=dom))
def sub_values(tree, var_values):
"""Substitute given values for variables.
@param tree: AST
@type var_values: C{dict}
@return: AST with L{Var} nodes replaces by
L{Num}, L{Const}, or L{Bool}
"""
old2new = dict()
for u in tree.nodes():
if u.type != 'var':
continue
val = var_values[u.value]
# instantiate appropriate value type
if isinstance(val, bool):
v = nodes.Bool(val)
elif isinstance(val, int):
v = nodes.Num(val)
elif isinstance(val, str):
v = nodes.Str(val)
old2new[u] = v
# replace variable by value
nx.relabel_nodes(tree, old2new, copy=False)
def sub_constants(tree, var_str2int):
"""Replace string constants by integers.
To be used for converting arbitrary finite domains
to integer domains prior to calling gr1c.
@param const2int: {'varname':['const_val0', ...], ...}
@type const2int: C{dict} of C{list}
"""
# logger.info('substitute ints for constants in:\n\t' + str(self))
old2new = dict()
for u in tree.nodes():
if u.type != 'str':
continue
var, op = pair_node_to_var(tree, u)
# now: c, is the operator and: v, the variable
str2int = var_str2int[str(var)]
x = str2int.index(u.value)
num = nodes.Num(str(x))
# replace Const with Num
old2new[u] = num
nx.relabel_nodes(tree, old2new, copy=False)
# logger.info('result after substitution:\n\t' + str(self) + '\n')
def sub_bool_with_subtree(tree, bool2subtree):
"""Replace selected Boolean variables with given AST.
@type tree: L{LTL_AST}
@param bool2form: map from each Boolean variable to some
equivalent formula. A subset of Boolean varibles may be used.
Note that the types of variables in C{tree}
are defined by C{bool2form}.
@type bool2form: C{dict} from C{str} to L{Tree}
"""
for u in list(tree.nodes()):
if u.type == 'var' and u.value in bool2subtree:
# tree.write(str(id(tree)) + '_before.png')
tree.add_subtree(u, bool2subtree[u.value])
# tree.write(str(id(tree)) + '_after.png')
def pair_node_to_var(tree, c):
"""Find variable under L{Binary} operator above given node.
First move up from C{nd}, stop at first L{Binary} node.
Then move down, until first C{Var}.
This assumes that only L{Unary} operators appear between a
L{Binary} and its variable and constant operands.
May be extended in the future, depending on what the
tools support and is thus needed here.
@type tree: L{LTL_AST}
@type L{nd}: L{Const} or L{Num}
@return: variable, constant
@rtype: C{(L{Var}, L{Const})}
"""
# find parent Binary operator
while True:
old = c
c = next(iter(tree.predecessors(c)))
if c.type == 'operator':
if len(c.operands) == 2:
break
p, q = tree.successors(c)
v = p if q == old else q
# go down until terminal found
# assuming correct syntax for gr1c
while True:
if not tree.succ.get(v):
break
v = next(iter(tree.successors(v)))
# now: b, is the operator and: v, the variable
return v, c
def infer_constants(formula, variables):
"""Enclose all non-variable names in quotes.
@param formula: well-formed LTL formula
@type formula: C{str} or L{LTL_AST}
@param variables: domains of variables, or only their names.
If the domains are given, then they are checked
for ambiguities as for example a variable name
duplicated as a possible value in the domain of
a string variable (the same or another).
If the names are given only, then a warning is raised,
because ambiguities cannot be checked in that case,
since they depend on what domains will be used.
@type variables: C{dict} as accepted by L{GRSpec} or
container of C{str}
@return: C{formula} with all string literals not in C{variables}
enclosed in double quotes
@rtype: C{str}
"""
if isinstance(variables, dict):
for var in variables:
other_vars = dict(variables)
other_vars.pop(var)
_check_var_conflicts({var}, other_vars)
else:
logger.error('infer constants does not know the variable domains.')
warnings.warn(
'infer_constants can give an incorrect result '
'depending on the variable domains.\n'
'If you give the variable domain definitions as dict, '
'then infer_constants will check for ambiguities.')
tree = parser.parse(formula)
old2new = dict()
for u in tree:
if u.type != 'var':
continue
if str(u) in variables:
continue
# Var (so NAME token) but not a variable
# turn it into a string constant
old2new[u] = nodes.Const(str(u))
nx.relabel_nodes(tree, old2new, copy=False)
return str(tree)
def _check_var_conflicts(s, variables):
"""Raise exception if set intersects existing variable name, or values.
Values refers to arbitrary finite data types.
@param s: set
@param variables: definitions of variable types
@type variables: C{dict}
"""
# check conflicts with variable names
vars_redefined = {x for x in s if x in variables}
if vars_redefined:
raise Exception('Variables redefined: {v}'.format(v=vars_redefined))
# check conflicts with values of arbitrary finite data types
for var, domain in variables.items():
# not arbitrary finite type ?
if not isinstance(domain, list):
continue
# var has arbitrary finite type
conflicting_values = {x for x in s if x in domain}
if conflicting_values:
raise Exception(
'Values redefined: {v}'.format(v=conflicting_values))
def check_var_name_conflict(f, varname):
t = parser.parse(f)
g = Tree.from_recursive_ast(t)
v = {x.value for x in g.variables}
if varname in v:
raise ValueError('var name "{v}" already used'.format(v=varname))
return v
def collect_primed_vars(t):
"""Return `set` of variable identifiers in the context of a next operator.
@type t: recursive AST
"""
g = Tree.from_recursive_ast(t)
# (node, context)
Q = [(t, False)]
primed = set()
while Q:
u, c = Q.pop()
if u.type == 'var' and c:
primed.add(u.value)
try:
c = (u.operator == 'X') or c
except AttributeError:
pass
Q.extend((v, c) for v in g.successors(u))
return primed
# defunct until further notice
def _flatten(tree, u, to_lang, **kw):
"""Recursively flatten C{tree}.
@rtype: C{str}
"""
s = tree.succ[u]
if not s:
return to_lang(u, **kw)
elif len(s) == 2:
l, r = s
if 1 in s[l]:
l, r = r, l
l = _flatten(tree, l, to_lang, **kw)
r = _flatten(tree, r, to_lang, **kw)
return to_lang(u, l, r, **kw)
else:
(c,) = s
if u.op == 'X':
return to_lang(u, _flatten(tree, c, to_lang,
prime=True, **kw), **kw)
else:
return to_lang(u, _flatten(tree, c, to_lang, **kw), **kw)
|
{
"content_hash": "e817ffc59741b2dd55478d07f7c66a18",
"timestamp": "",
"source": "github",
"line_count": 457,
"max_line_length": 78,
"avg_line_length": 30.888402625820568,
"alnum_prop": 0.5589402096911307,
"repo_name": "tulip-control/tulip-control",
"id": "d24c7d2a33a67a3a58f4ad7b60ce81497ec0c024",
"size": "15716",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tulip/spec/transformation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "34858"
},
{
"name": "MATLAB",
"bytes": "43201"
},
{
"name": "Makefile",
"bytes": "3028"
},
{
"name": "Python",
"bytes": "1036380"
},
{
"name": "Raku",
"bytes": "631"
},
{
"name": "Shell",
"bytes": "9640"
},
{
"name": "Tcl",
"bytes": "2894"
}
],
"symlink_target": ""
}
|
import libraries
import os
import new
import test
import unittest
import sys
import glob
def get_suites():
dir = os.path.dirname(os.path.abspath(sys.modules[__name__].__file__))
suites = []
for testFile in glob.glob(dir + os.sep + 'test/test_*.py'):
testModuleName = os.path.basename(testFile)[:-len('.py')]
testModule = __import__('test.' + testModuleName,globals(),locals(),[testModuleName])
if hasattr(testModule,'suite') and callable(testModule.suite):
suites.append(testModule.suite())
return suites
def suite():
suites = get_suites()
suite = unittest.TestSuite(suites)
return suite
def runGui():
import unittestgui
unittestgui.main(__name__ + '.suite')
def runConsole():
sys.argv = [sys.argv[0]]
unittest.main(defaultTest = 'suite')
#DEFAULT_UI = 'gui'
DEFAULT_UI = 'console'
if __name__ == '__main__':
if len(sys.argv) == 1:
ui = DEFAULT_UI
else:
ui = sys.argv[1]
if ui == 'console':
runConsole()
elif ui == 'gui':
runGui()
else:
print >>sys.stderr, "Select ui [console|gui]"
|
{
"content_hash": "d6403e5d014fdbc92d8ecbe7921760b1",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 87,
"avg_line_length": 21.851063829787233,
"alnum_prop": 0.667964946445959,
"repo_name": "sparkslabs/kamaelia",
"id": "af8649cdfc0a116abb28d1202e06b0aabb270869",
"size": "2019",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Sketches/PO/KamPlanet/runTests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3814"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "M4",
"bytes": "12224"
},
{
"name": "Makefile",
"bytes": "150947"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "OCaml",
"bytes": "643"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "504"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Python",
"bytes": "18900785"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "707588"
}
],
"symlink_target": ""
}
|
import sys
from PyQt4 import QtGui, QtCore
import matplotlib
matplotlib.use("qt4agg")
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
class GUI(QtGui.QMainWindow):
def __init__(self, parent=None):
super(GUI, self).__init__(parent)
self.buildLayout()
self.buildMenus()
self.menuBar()
self.statusBar()
## Style Sheets
self.splitter.setStyleSheet("QSplitter::handle:horizontal {background-color: #ccc}")
self.controlWidget.setStyleSheet(".QWidget {background-color: #0ff}")
menuStyle = """.QMenuBar {background-color: #0ff}
QMenuBar::item {background: transparent}
QMenuBar::item:selected {background: #8ff}"""
self.statusBar().setStyleSheet(".QStatusBar {background-color: #0ff}")
self.menuBar().setStyleSheet(menuStyle)
# .....THIS DOESN"T WORK !! .....
self.mplFig.setStyleSheet("QWidget {background-color: #f00}")
self.mplFig.setStyleSheet("QWidget {background: #f00}")
self.mplFig.setStyleSheet("QWidget {color: #f00}")
# self.mplFig.setStyleSheet("""QWidget {
# background-color: #0f0;
# }
#
# QWidget::item {
# background: #0f0;
# }""")
def buildLayout(self):
self.controlWidget = QtGui.QWidget(self)
self.plotList = QtGui.QListWidget(self)
self.combo = QtGui.QComboBox(self)
self.button = QtGui.QPushButton('Plot')
self.combo.addItems(['1','2','3','4'])
layout = QtGui.QVBoxLayout()
layout.addWidget(self.plotList)
layout.addWidget(self.combo)
layout.addWidget(self.button)
self.controlWidget.setLayout(layout)
self.mplFig = MplGrapher()
self.splitter = QtGui.QSplitter(QtCore.Qt.Horizontal)
self.splitter.addWidget(self.controlWidget)
self.splitter.addWidget(self.mplFig)
self.setCentralWidget(self.splitter)
# QtGui.QApplication.setStyle(QtGui.QStyleFactory.create('Plastique'))
def buildMenus(self):
openFile = QtGui.QAction('Open', self)
self.fileMenu = self.menuBar().addMenu('&File')
self.fileMenu.addAction(openFile)
class MplGrapher(QtGui.QWidget):
def __init__(self,parent=None):
super(MplGrapher, self).__init__(parent)
self.initFigure()
def initFigure(self):
self.figure = Figure()
self.canvas = FigureCanvas(self.figure)
self.navbar = NavigationToolbar(self.canvas, self)
self.figure.add_subplot(1,1,1)
self.layout = QtGui.QVBoxLayout()
self.layout.addWidget(self.navbar)
self.layout.addWidget(self.canvas)
self.setLayout(self.layout)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
main = GUI()
main.show()
sys.exit(app.exec_())
|
{
"content_hash": "aec4be831f744d17ff1990f4f7c8c94c",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 94,
"avg_line_length": 38.56962025316456,
"alnum_prop": 0.6320971447325238,
"repo_name": "chilleo/ALPHA",
"id": "97675d9d92f5fdde63c46efad5d7c83014ac81f5",
"size": "3047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "raxmlOutputWindows/ok.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "363975"
},
{
"name": "Python",
"bytes": "672122"
},
{
"name": "R",
"bytes": "26202"
}
],
"symlink_target": ""
}
|
from atom.mixins import AdminTestCaseMixin
from test_plus import TestCase
from poradnia.events.factories import EventFactory
from poradnia.events.models import Event
from poradnia.users.factories import UserFactory
class EventAdminTestCase(AdminTestCaseMixin, TestCase):
user_factory_cls = UserFactory
factory_cls = EventFactory
model = Event
|
{
"content_hash": "aca25054afc398aa37390ec9d39616eb",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 55,
"avg_line_length": 29.833333333333332,
"alnum_prop": 0.8184357541899442,
"repo_name": "watchdogpolska/poradnia",
"id": "b859813b9c5c768051ce3a0943cba95a56903a04",
"size": "358",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "poradnia/events/tests/test_admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "213565"
},
{
"name": "Dockerfile",
"bytes": "212"
},
{
"name": "HTML",
"bytes": "149976"
},
{
"name": "JavaScript",
"bytes": "1251748"
},
{
"name": "Makefile",
"bytes": "912"
},
{
"name": "Python",
"bytes": "461894"
},
{
"name": "SCSS",
"bytes": "55433"
},
{
"name": "Shell",
"bytes": "320"
}
],
"symlink_target": ""
}
|
from collections import Callable
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# From http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
class DefaultOrderedDict(OrderedDict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not isinstance(default_factory, Callable)):
raise TypeError('first argument must be callable')
OrderedDict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return OrderedDict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'OrderedDefaultDict(%s, %s)' % (self.default_factory,
OrderedDict.__repr__(self))
|
{
"content_hash": "0d123f95727cc1f65bc58e34cb8bd624",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 87,
"avg_line_length": 33.97019867549669,
"alnum_prop": 0.5483965298762062,
"repo_name": "antiface/dsp-testbed",
"id": "c5e8807aad020f30d7b5dfcfd707de179f19ccdd",
"size": "10259",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dsptestbed/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46541"
}
],
"symlink_target": ""
}
|
import hashlib
import collections
from androguard.core import androconf
from androguard.core.bytecodes.apk import *
from androguard.core.bytecodes.dvm import *
from androguard.core.analysis.analysis import *
from androguard.decompiler.decompiler import *
from androguard.misc import save_session, load_session
class Session(object):
def __init__(self, export_ipython=False):
self.setupObjects()
self.export_ipython = export_ipython
def save(self, filename):
save_session([self.analyzed_files, self.analyzed_digest,
self.analyzed_apk, self.analyzed_dex], filename)
def load(self, filename):
self.analyzed_files, self.analyzed_digest, self.analyzed_apk, self.analyzed_dex = load_session(
filename)
def setupObjects(self):
self.analyzed_files = collections.OrderedDict()
self.analyzed_digest = {}
self.analyzed_apk = {}
self.analyzed_dex = {}
def reset(self):
self.setupObjects()
def isOpen(self):
return self.analyzed_digest != {}
def addAPK(self, filename, data):
digest = hashlib.sha256(data).hexdigest()
androconf.debug("add APK:%s" % digest)
apk = APK(data, True)
self.analyzed_apk[digest] = [apk]
self.analyzed_files[filename].append(digest)
self.analyzed_digest[digest] = filename
androconf.debug("added APK:%s" % digest)
return (digest, apk)
def addDEX(self, filename, data, dx=None):
digest = hashlib.sha256(data).hexdigest()
androconf.debug("add DEX:%s" % digest)
d = DalvikVMFormat(data)
dx = self.runAnalysis(d, dx)
androconf.debug("added DEX:%s" % digest)
self.analyzed_dex[digest] = (d, dx)
if filename not in self.analyzed_files:
self.analyzed_files[filename] = []
self.analyzed_files[filename].append(digest)
self.analyzed_digest[digest] = filename
if self.export_ipython:
d.create_python_export()
return (digest, d, dx)
def addDEY(self, filename, data, dx=None):
digest = hashlib.sha256(data).hexdigest()
androconf.debug("add DEY:%s" % digest)
d = DalvikOdexVMFormat(data)
dx = self.runAnalysis(d, dx)
androconf.debug("added DEY:%s" % digest)
self.analyzed_dex[digest] = (d, dx)
if filename not in self.analyzed_files:
self.analyzed_files[filename] = []
self.analyzed_files[filename].append(digest)
self.analyzed_digest[digest] = filename
if self.export_ipython:
d.create_python_export()
return (digest, d, dx)
def runAnalysis(self, d, dx=None):
androconf.debug("VMAnalysis ...")
if dx == None:
dx = newVMAnalysis(d)
else:
dx.add(d)
dx.create_xref()
d.set_decompiler(DecompilerDAD(d, dx))
d.set_vmanalysis(dx)
return dx
def add(self, filename, raw_data, dx=None):
ret = androconf.is_android_raw(raw_data)
if ret:
self.analyzed_files[filename] = []
digest = hashlib.sha256(raw_data).hexdigest()
if ret == "APK":
apk_digest, apk = self.addAPK(filename, raw_data)
dex_files = list(apk.get_all_dex())
if dex_files:
dex_digest, _, dx = self.addDEX(filename, dex_files[0], dx)
self.analyzed_apk[digest].append(dex_digest)
for i in range(1, len(dex_files)):
dex_digest, _, _ = self.addDEX(filename, dex_files[i],
dx)
self.analyzed_apk[digest].append(dex_digest)
elif ret == "DEX":
self.addDEX(filename, raw_data, dx)
elif ret == "DEY":
self.addDEY(filename, raw_data, dx)
else:
return False
return True
return False
def get_classes(self):
idx = 0
for filename in self.analyzed_files:
for digest in self.analyzed_files[filename]:
if digest in self.analyzed_dex:
d, _ = self.analyzed_dex[digest]
yield idx, filename, digest, d.get_classes()
idx += 1
def get_analysis(self, current_class):
for digest in self.analyzed_dex:
d, dx = self.analyzed_dex[digest]
if dx.is_class_present(current_class.get_name()):
return dx
return None
def get_format(self, current_class):
for digest in self.analyzed_dex:
d, dx = self.analyzed_dex[digest]
if dx.is_class_present(current_class.get_name()):
return d
return None
def get_filename_by_class(self, current_class):
for digest in self.analyzed_dex:
d, dx = self.analyzed_dex[digest]
if dx.is_class_present(current_class.get_name()):
return self.analyzed_digest[digest]
return None
def get_digest_by_class(self, current_class):
for digest in self.analyzed_dex:
d, dx = self.analyzed_dex[digest]
if dx.is_class_present(current_class.get_name()):
return digest
return None
def get_strings(self):
for digest in self.analyzed_dex:
d, dx = self.analyzed_dex[digest]
yield digest, self.analyzed_digest[digest], dx.get_strings_analysis(
)
def get_nb_strings(self):
nb = 0
for digest in self.analyzed_dex:
d, dx = self.analyzed_dex[digest]
nb += len(dx.get_strings_analysis())
return nb
def get_objects_apk(self, filename):
digest = self.analyzed_files.get(filename)
if digest:
a = self.analyzed_apk[digest[0]][0]
d = None
dx = None
if len(self.analyzed_apk[digest[0]][1:]) > 1:
d = []
for dex_file in self.analyzed_apk[digest[0]][1:]:
d.append(self.analyzed_dex[dex_file][0])
else:
dex_file = self.analyzed_dex[self.analyzed_apk[digest[0]][1]]
d = dex_file[0]
dx = dex_file[1]
return a, d, dx
return None
def get_objects_dex(self):
for digest in self.analyzed_dex:
yield self.analyzed_dex[digest]
|
{
"content_hash": "a31552f0e696289b30b87746882dcdf9",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 103,
"avg_line_length": 32.63,
"alnum_prop": 0.5612932883849219,
"repo_name": "Fuzion24/androguard",
"id": "fae9b0df986ba3acf7ce5d999a5c8f6ff3c35eee",
"size": "6526",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "androguard/session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "384130"
},
{
"name": "C++",
"bytes": "57017"
},
{
"name": "Makefile",
"bytes": "6008"
},
{
"name": "Python",
"bytes": "27488994"
}
],
"symlink_target": ""
}
|
"""Step implementations for changelogs."""
from asserts import assert_equal
from behave import then # pylint: disable=no-name-in-module
@then("the {item} changelog reads")
@then("the changelog reads")
def check_changelog(context, item=None):
"""Check that the changelog contains the text."""
item_path = f"{item}/{context.uuid[item]}/" if item else ""
response = context.get(f"changelog/{item_path}10")
for index, line in enumerate(context.text.split("\n")):
assert_equal(line, response["changelog"][index]["delta"])
|
{
"content_hash": "ab1f40a24d8b4f6cf82229f25bb4d809",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 65,
"avg_line_length": 38.92857142857143,
"alnum_prop": 0.6954128440366972,
"repo_name": "ICTU/quality-time",
"id": "4ac53396d0cda7b8127c10855ff66e52c22110c5",
"size": "545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/feature_tests/steps/changelog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11325"
},
{
"name": "Dockerfile",
"bytes": "7493"
},
{
"name": "Gherkin",
"bytes": "48447"
},
{
"name": "HTML",
"bytes": "1575"
},
{
"name": "JavaScript",
"bytes": "547159"
},
{
"name": "Python",
"bytes": "1386198"
},
{
"name": "Shell",
"bytes": "19321"
}
],
"symlink_target": ""
}
|
import argparse
import shutil
import os
import tempfile
import subprocess
DESCRIPTION = """caca - Copy And Convert Audio.
A very simple one-shot parallel audio converter that behaves a lot like cp"""
args = None
def detect_utils():
tools = ['flac', 'lame', 'oggenc']
for tool in tools:
if not shutil.which(tool):
raise FileNotFoundError("'{}' was not found on your system but is required".format(tool))
import flac, mp3
format_modules = [flac, mp3]
def get_extension(filename):
ext = os.path.splitext(filename)[1].lower()
if ext: ext = ext[1:] # cut away the '.'
return ext
def first(fn, seq):
for i in seq:
if fn(i):
return i
return None
def shell_command(cmd, src, target):
cmd = cmd.format(src=src.replace('"', '\\"'), target=target.replace('"', '\\"'))
if args.verbose:
print("--> " + cmd)
return subprocess.call(cmd, shell=True) == 0
def convert(src, target):
src_ext = get_extension(src)
src_format = first(lambda module: src_ext in module.extensions, format_modules)
target_ext = get_extension(target)
target_format = first(lambda module: target_ext in module.extensions, format_modules)
if not src_format or not target_format:
return False
if src_ext == target_ext:
return copy_raw(src, target)
# check whether we can convert directly between those formats
direct = src_format.direct_convert(target_ext, src, target)
if direct:
return shell_command(direct, src, target)
def composed():
with tempfile.NamedTemporaryFile() as f:
return shell_command(src_format.decode, f.name, target) and \
shell_command(target_format.encode, f.name, target)
return composed
def copy_raw(src, target):
shutil.copyfile(src, target)
return True
def handle_file(src, target):
ext = get_extension(src)
# create target directory
target_dir = target if os.path.isdir(target) else os.path.dirname(target)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
# build target filename
f, e = os.path.splitext(os.path.basename(src))
target = os.path.join(target_dir, f + "." + args.format)
if os.path.exists(target) and args.skip_existing:
if args.verbose:
print("[existing] '{}' -> '{}'".format(src, target))
return
if convert(src, target):
if args.verbose:
print("[converted] '{}' -> '{}'".format(src, target))
else:
if args.skip_unknown:
if args.verbose:
print("[skip] '{}'".format(src))
else:
if args.verbose:
print("[copy] '{}' -> '{}'".format(src, target))
copy_raw(src, target)
def handle_path(src, target):
if os.path.isdir(src):
for entry in os.listdir(src):
handle_path(os.path.join(src, entry), os.path.join(target, entry))
else:
handle_file(src, target)
def main():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument("sources", metavar="SOURCE", nargs='+', help="input path")
parser.add_argument("target", metavar="TARGET", help="output path")
parser.add_argument("-v", "--verbose", help="print progress output", action="store_true")
parser.add_argument("-R", "-r", "--recursive", help="copy directories recursively", action="store_true")
parser.add_argument("-a", "--archive", help="same as --recursive but preserves attributes", action="store_true")
parser.add_argument("-s", "--skip-unknown", help="do not copy unknown file types", action="store_true")
parser.add_argument("-S", "--skip-existing", help="do not overwrite existing files", action="store_true")
parser.add_argument("-f", "--format", help="target file extension (mp3/flac/ogg/wav)", default="mp3")
# overwrite args array
global args
args = parser.parse_args()
detect_utils()
# Handle case where we have more than one sources but target is not a directory
if len(args.sources) > 1 and os.path.isfile(args.target):
raise RuntimeException("target '{}' is not a directory".format(args.target))
# Handle case where the source is a directory but target is a file
if os.path.isdir(args.sources[0]) and os.path.isfile(args.target):
raise RuntimeException("cannot overwrite non-directory '{}' with directory '{}'".format(args.target, args.sources[0]))
for src in args.sources:
target = args.target
# Checks if the source should be moved to a new folder based on file/dirname or directly into target folder
if os.path.isdir(args.target) and len(args.sources) > 1 or args.target[-1] == "/":
target = os.path.join(target, os.path.basename(src))
handle_path(src, target)
if __name__ == '__main__':
main()
|
{
"content_hash": "7313a71d5a1dd16c78635585f1d41a4a",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 126,
"avg_line_length": 33.826388888888886,
"alnum_prop": 0.6364196263600903,
"repo_name": "Nukesor/caca",
"id": "e9549413e42255f8bc03e6e40afd976f2ebb777d",
"size": "4913",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "caca.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5444"
}
],
"symlink_target": ""
}
|
"""
Calculate the ratings matrix using linear win-difference ratio
Notation is used from the following paper:
http://www.phys.utk.edu/sorensen/ranking/Documentation/Sorensen_documentation_v1.pdf
"""
from pathlib import Path
import logging
from scipy.optimize import lsq_linear
from scipy.sparse import coo_matrix
import numpy as np
import pandas as pd
from plotnine import ggplot, aes, geom_line, geom_label, theme_bw, labs, guides
import warnings
__author__ = 'Ryne Carbone'
logger = logging.getLogger(__name__)
def calc_n_g(df_schedule, week):
"""Calculate list of games i.e. Ng
Each row has home_id, away_id, home_total_points, away_total_points
:param df_schedule: data frame with each matchup
:param week: current matchup period id
:return: list of games with team ids and scores for home/away
"""
df_scores = (
df_schedule
.query(f'matchupPeriodId <= {week} & winner!="UNDECIDED"')
[['home_id', 'away_id', 'home_total_points', 'away_total_points']]
)
return df_scores
def calc_r_g(df_scores, dS_max=35., B_w=30., B_r=35.):
"""Calculates game result given home and away scores
:param df_scores: data frame with columns home_id, away_id, home_total_points, away_total_points
:param dS_max: maximum score differential for truncating
:param B_w: bonus for win
:param B_r: bonus for score ratio
:return: game result, i.e. R_g
"""
df_scores['home_mov'] = df_scores.get('home_total_points') - df_scores.get('away_total_points')
df_scores['home_mov_trunc'] = dS_max * np.tanh(df_scores.get('home_mov') / dS_max)
df_scores['score_winner'] = df_scores.apply(
lambda x: x.get('home_total_points')
if x.get('home_total_points') > x.get('away_total_points')
else x.get('away_total_points')
, axis=1)
df_scores['home_bonus_sign'] = np.sign(df_scores.get('home_mov'))
df_scores['R_g'] = df_scores.apply(
lambda x: B_w*x.get('home_bonus_sign') + x.get('home_mov_trunc') + B_r*x.get('home_mov')/x.get('score_winner')
, axis=1)
return df_scores.get('R_g')
def calc_sig_g(N_g, df_ranks, beta_w):
"""Calculate sigma for each game
:param N_g: data frame with record of home/away teams
:param df_ranks: data frame with the previous ranks for each team
:param beta_w: controls weighting of alpha_w
:return: sigma for each game
"""
alpha_w = (df_ranks.get('ranks').max(axis=0) - df_ranks.get('ranks').min(axis=0)) / np.log(beta_w * beta_w)
N_g['home_rank'] = N_g.get('home_id').apply(lambda x: df_ranks.loc[df_ranks.team_id == x, 'ranks'].values[0])
N_g['away_rank'] = N_g.get('away_id').apply(lambda x: df_ranks.loc[df_ranks.team_id == x, 'ranks'].values[0])
w_g = np.exp(-np.fabs(N_g.get('home_rank') - N_g.get('away_rank')) / alpha_w)
sig_g = 1 / np.sqrt(w_g)
return sig_g
def calc_ranks_lsq_iter(df_teams, N_g, R_g, prev_ranks=None, beta_w=2.2, initial_pass=False):
"""Calculates new rankings based on previous rankings using linear lsq algorithm
:param df_teams: data frame with team ids
:param N_g: data frame with rows for each game and home/away ids
:param R_g: game results based on score differential
:param prev_ranks: data frame with previous iteration rankings for each team
:param beta_w: control weighting of alpha_w
:param initial_pass: flag to indicate first iteration of algorithm
:return: list of new rankings
"""
if initial_pass:
sig_g = np.ones(N_g.index.size)
else:
sig_g = calc_sig_g(N_g=N_g, df_ranks=prev_ranks, beta_w=beta_w)
# Calculate the coefficient vector
b = R_g/sig_g
# Get dimensions for matrix
max_id = max(N_g.get('away_id').max(),
N_g.get('home_id').max())
n_games = N_g.get('home_id').size
# Calculate the matrix using COO formatted matrix (n_games x n_teams dimensions)
# Elements are +/- 1/sig_g if team is home/away else 0
home_coo = coo_matrix((1/sig_g, (N_g.home_id.index, N_g.home_id.values)), shape=(n_games, max_id+1))
away_coo = coo_matrix((-1/sig_g, (N_g.away_id.index, N_g.away_id.values)), shape=(n_games, max_id+1))
A = home_coo + away_coo
# Solve for the rankings
res = lsq_linear(A=A, b=b, bounds=(30, 130))
if res.success == False:
logger.warning(f'WARNING: {res.message}')
# Match the rankings to the teams and return a data frame
new_ranks = pd.DataFrame(dict(team_id=df_teams.team_id, ranks = res.x[df_teams.team_id.values]))
return new_ranks
def get_ranks_lsq(df_teams, df_schedule, year, week, B_w=30., B_r=35., dS_max=35., beta_w=2.2, show=False):
"""Calculate iterative LSQ rankings, and save plot
:param df_teams: data frame wtih team_ids
:param df_schedule: data frame with data for each matchup
:param year: current year
:param week: current week
:param B_w: bonus for wins
:param B_r: bonus for score ratio
:param dS_max: max home mov for truncation
:param beta_w: for measuring alpha_w
:param show: flag for showing plot
:return: data frame with team_id and rankings
"""
logger.debug('Calculating ranks using LSQ method with 100 iterations')
N_g = calc_n_g(df_schedule, week)
R_g = calc_r_g(N_g, dS_max=dS_max, B_w=B_w, B_r=B_r)
df_ranks = calc_ranks_lsq_iter(
df_teams=df_teams,
N_g=N_g,
R_g=R_g,
prev_ranks=None,
beta_w=beta_w,
initial_pass=True
)
prev_ranks = df_ranks
# Iterate with previous ranks as input, recalculate weight
for p in range(1, 100):
prev_ranks = calc_ranks_lsq_iter(
df_teams=df_teams,
N_g=N_g,
R_g=R_g,
prev_ranks=prev_ranks,
beta_w=beta_w,
initial_pass=False
)
df_ranks = pd.concat([df_ranks, prev_ranks.ranks.rename(p)], axis=1)
# plot_save_ranks
df_final_ranks = plot_save_rank(
df_ranks=df_ranks,
df_teams=df_teams,
year=year,
week=week,
show=show
)
return df_final_ranks
def plot_save_rank(df_ranks, df_teams, year, week, show=False):
"""Plot the ranking iterations for each team
:param df_ranks: data frame with team_id, and rankings for each iteration
:param df_teams: data frame with team_id and owner info
:param year: year for data
:param week: current week
:param show: flag to display the plot
:return: final summarised rankings data frame with columns for team_id and ranks
"""
# Plot each iteration
df_ranks_lsq = pd.merge(df_teams[['team_id', 'firstName']], df_ranks, on='team_id')
# Space out labels on x-axis according to final rankings
df_ranks_lsq['label_x_pos'] = df_ranks_lsq.get(99).rank() * 100 / df_ranks_lsq.get(99).size
# Convert to long format for plotting ease
df_ranks_lsq_long = (
df_ranks_lsq
.rename({'ranks': '0'}, axis='columns')
.melt(id_vars=['team_id', 'firstName', 'label_x_pos'])
)
# Convert iteration variable to int
df_ranks_lsq_long.variable = df_ranks_lsq_long.variable.astype(int)
# Make the plot
p = (
ggplot(aes(x='variable', y='value', color='factor(team_id)', group='team_id'),
data=df_ranks_lsq_long) +
geom_line() +
geom_label(aes(label='firstName', x='label_x_pos', y='value', color='factor(team_id)'),
data=df_ranks_lsq_long[df_ranks_lsq_long.variable == 99],
size=10) +
labs(x='Iteration', y='LSQ rank') +
theme_bw() +
guides(color=False)
)
# Save plot
if show:
p.draw()
# make dir if it doesn't exist already
out_dir = Path(f'output/{year}/week{week}')
out_dir.mkdir(parents=True, exist_ok=True)
out_name = out_dir / 'lsq_iter_rankings.png'
# plotnine is throwing too many warnings
warnings.filterwarnings('ignore')
p.save(out_name, width=9, height=6, dpi=300)
warnings.filterwarnings('default')
logger.info(f'Saved LSQ rankings plot to local file: {out_name.resolve()}')
# Average last 70 elements to get final rank
df_final_ranks = (
df_ranks_lsq_long
.query('variable>70')
.groupby(['team_id'])[['value']]
.agg(lambda x: np.tanh(np.mean(x) / 75.))
.reset_index()
.rename({'value': 'lsq'}, axis=1)
)
# Normalize by max score
df_final_ranks['lsq'] = df_final_ranks.get('lsq') / df_final_ranks.get('lsq').max()
return df_final_ranks
|
{
"content_hash": "f0ad228a99a2cb0262e302b865db954c",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 114,
"avg_line_length": 36.93607305936073,
"alnum_prop": 0.6642353813821239,
"repo_name": "rynecarbone/power_ranker",
"id": "28ced4429ef5fa3d657ec42142b5ace45ccb649c",
"size": "8112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "power_ranker/lsq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "117088"
}
],
"symlink_target": ""
}
|
from django.shortcuts import get_object_or_404, render_to_response
from django.http import HttpResponse, HttpResponseBadRequest, QueryDict
from django.views.decorators.csrf import csrf_protect
from django.core.exceptions import PermissionDenied
from django.template import RequestContext
from smartsnippets_inherit.models import InheritPageContent, OverwriteVariable
from smartsnippets.models import SmartSnippetPointer
from cms.utils.permissions import has_plugin_permission
@csrf_protect
def variables_edit_view(request, plugin_id):
plugin = get_object_or_404(InheritPageContent, id=plugin_id)
if not has_plugin_permission(request.user, plugin.plugin_type, "change"):
raise PermissionDenied
snippet_plugin_id = None
if request.method == 'DELETE':
snippet_plugin_id = QueryDict(request.body).get('snippet_plugin')
snippet_plugin_id = (snippet_plugin_id or
request.REQUEST.get('snippet_plugin'))
if snippet_plugin_id is None:
return HttpResponseBadRequest('Snippet plugin missing')
snippet_plugin = get_object_or_404(
SmartSnippetPointer, id=snippet_plugin_id
)
variables = snippet_plugin.variables.filter(
snippet_variable__snippet=snippet_plugin.snippet
)
overwrite_variables = None
if request.method == 'POST':
variables = variables.select_related('snippet_variable')
overwrite_variables = []
for var in variables:
new_value = request.POST.get("_%s_" % var.snippet_variable.name)
if new_value is None:
continue
try:
existing_var = OverwriteVariable.objects.get(
plugin=plugin, variable=var)
existing_var.value = new_value
existing_var.save()
overwrite_variables.append(existing_var)
except (OverwriteVariable.DoesNotExist, ):
new_var = OverwriteVariable.objects.create(
plugin=plugin, variable=var, value=new_value)
overwrite_variables.append(new_var)
if overwrite_variables is None:
overwrite_variables = OverwriteVariable.objects.filter(
plugin=plugin, variable__in=list(variables))
if request.method == 'DELETE':
overwrite_variables.delete()
overwrite_variables = []
# transform all into Variable instances
overwrite_as_vars = [v.to_variable() for v in overwrite_variables]
vars_to_render = {
var.snippet_variable.name: var
for var in list(variables) + overwrite_as_vars
}
return render_to_response('smartsnippets/variables_widgets.html', {
'plugin': plugin,
'variables': sorted(vars_to_render.values(),
key=lambda var: var.snippet_variable.name)
}, context_instance=RequestContext(request))
|
{
"content_hash": "e97f0c6b88626e0b070ebd042c751497",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 78,
"avg_line_length": 39.916666666666664,
"alnum_prop": 0.6670146137787056,
"repo_name": "pbs/django-cms-smartsnippets",
"id": "41750578c4fe407d99a5d59c2d364cc1e1b15659",
"size": "2874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smartsnippets_inherit/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "15195"
},
{
"name": "HTML",
"bytes": "32562"
},
{
"name": "JavaScript",
"bytes": "314695"
},
{
"name": "Python",
"bytes": "163661"
},
{
"name": "Ruby",
"bytes": "799"
}
],
"symlink_target": ""
}
|
import psycopg2
class py2psql:
# private member
# host : URL or IP
# port : postgresql server port
# db : as a string
# tb : as a string
# user : as a string
# pwd : as a string
# data : as a dictionary, { colName : colValue }
# columns : save table schema
# datatype : save column data tpye { "col" : { "type" : "code", "null" : "True/False" } }
# retStatus : returned data as a dictionary
__host = ""
__port = ""
__db = ""
__tb = ""
__user = ""
__pwd = ""
__columns = []
__datatype = {}
__retStatus = { }
#
# desc : constructor
# param@getTB : can be null when only using execsql()
#
def __init__(self, getHost, getPort, getDB, getTB, getUser, getPwd):
self.__host = getHost
self.__port = getPort
self.__db = getDB
self.__tb = getTB
self.__user = getUser
self.__pwd = getPwd
self.__columns = []
self.__datatype = {}
self.__retStatus = { "state" : 0, "data" : {}, "info" : "" }
# fetch column information
if len(getTB) > 0:
self.__tableSchema()
# ------------------------
# private member
# ------------------------
#
# desc : define server DSN
# retn : string
#
def __serverDSN(self):
conStr = ["host=" + self.__host, "port=" + self.__port, "dbname=" + self.__db, "user=" + self.__user , "password=" + self.__pwd]
return ' '.join(conStr)
#
# desc : get table schema
# retn : None
#
def __tableSchema(self):
# Connect to an existing database
conn = psycopg2.connect(self.__serverDSN())
# Open a cursor to perform database operations
cur = conn.cursor()
# select sql
selectStr = "select * from " + self.__tb
cur.execute(selectStr)
# get columns
self.__columns = [desc[0] for desc in cur.description]
# close communication
cur.close()
conn.close()
#
# desc : get table colunm data type
# retn : column data type in the table
#
def __tableColDatatype(self):
# Connect to an existing database
conn = psycopg2.connect(self.__serverDSN())
# Open a cursor to perform database operations
cur = conn.cursor()
# select sql
selectStr = "select * from " + self.__tb
cur.execute(selectStr)
# get column data type
for item in cur.description:
self.__datatype.setdefault(item[0], { "type" : item[1] , "null" : item[6] })
# close communication
cur.close()
conn.close()
return self.__datatype
#
# desc : get col index in the column order
# retn : -1 (None) or Number
#
def __getColIndex(self, getColName):
if getColName in self.__columns:
return self.__columns.index(getColName)
else:
return -1
#
# desc : set returned status
# retn : None
#
def __setStatus(self, getStatus, getInfo, getData):
self.__retStatus["state"] = getStatus
self.__retStatus["data"] = getData
self.__retStatus["info"] = getInfo
#
# desc : get column description on the execution pointer
# param@getCur : a psycopg2 connect cursor
# param@curIndex : index on the cursor description
# retn : [] data type
#
def __getCurDesc(self, getCur, curIndex):
curInfo = [desc[curIndex] for desc in getCur.description]
return curInfo
# ------------------------
# public member
# ------------------------
#
# desc : returned status
# retn : return executing status
#
def status(self):
return self.__retStatus
#
# desc : get table schema
# param@getTable : get desired table schema
# param@descIndex : description index of table schema, -1 : means all
# retn : status object
#
def getTableSchema(self, getTable=None, descIndex=0):
if self.__tb == "" and getTable == None:
self.__setStatus("failure","There is no table assigned.",{})
elif self.__tb != "" and getTable == None:
try:
self.__tableSchema()
self.__setStatus("success","Get the table schema.", self.__columns)
except:
self.__setStatus("failure","Can not get the table schema.", self.__columns)
elif getTable != None:
# Connect to an existing database
conn = psycopg2.connect(self.__serverDSN())
# Open a cursor to perform database operations
cur = conn.cursor()
# select sql
selectStr = "select * from " + getTable
try:
cur.execute(selectStr)
# get columns desc
if descIndex < 0:
getColDesc = [desc for desc in cur.description]
else:
getColDesc = [desc[descIndex] for desc in cur.description]
self.__setStatus("success","Get the table schema.", getColDesc)
except:
self.__setStatus("failure","Can not get the table schema.", {})
# close communication
cur.close()
conn.close()
return self.__retStatus
#
# desc : select operation
# param@getConds : {}, defined where SQL conditions
# param@getParams : [], selected column names, empty : means all
# param@asdict : boolean, returned row as dictionary data type
# retn : data as [] type
# note : also support status object, use status()
#
def select(self, getConds, getParams, asdict=False):
# filter the column value
colSelected = "*"
colList = []
retdata = []
dataTuple = ()
# check column existing
if len(getParams) > 0:
for item in getParams:
if self.__getColIndex(item) > -1:
colList.append(item)
# set selected columns
if len(colList) > 0:
colSelected = ','.join(colList)
try:
# Connect to an existing database
conn = psycopg2.connect(self.__serverDSN())
# Open a cursor to perform database operations
cur = conn.cursor()
# select sql
selectStr = "select " + colSelected + " from " + self.__tb
if len(getConds.keys()) > 0:
selectStr += " where "
item = 0
for key, value in getConds.iteritems():
if item != 0:
selectStr += " and "
selectStr += str(key) + "= %s "
item += 1
dataTuple += (value,)
selectStr += ";"
# parameter-based select sql
cur.execute(selectStr, dataTuple)
# get all data
rawdata = cur.fetchall()
# modify data to customized type
if asdict:
if len(colList) > 0:
for pair in rawdata:
tmpDict = {}
for item in range(0,len(pair),1):
tmpDict.setdefault(colList[item],pair[item])
retdata.append(tmpDict)
else:
for pair in rawdata:
tmpDict = {}
for item in range(0,len(pair),1):
tmpDict.setdefault(self.__columns[item],pair[item])
retdata.append(tmpDict)
else:
retdata = rawdata
# close communication
cur.close()
conn.close()
# set status
self.__setStatus("success", "Select operation succeeded.", retdata)
except:
self.__setStatus("failure", "Select operation executed failed.", retdata)
return retdata
#
# desc : update operation
# param@getParams : {}, set sql parameters
# param@getConds : {}, where sql conditions
# retn : 0 : failure, 1 : success
# note : also support status object, use status()
#
def update(self, getParams, getConds):
# 0 : failure, 1 : success
retStatus = 1
# filter the column value
paraKeys = getParams.keys()
condKeys = getConds.keys()
paraList = []
condList = []
dataTuple = ()
# check parameter existing
if len(paraKeys) > 0:
for item in paraKeys:
if self.__getColIndex(item) > -1:
paraList.append(item)
else:
retStatus = 0
self.__setStatus("failure","Set SQL was checked in failure.",{})
return retStatus
# check condition existing
if len(condKeys) > 0:
for item in condKeys:
if self.__getColIndex(item) > -1:
condList.append(item)
else:
retStatus = 0
self.__setStatus("failure","Where SQL was checked in failure.",{})
return retStatus
# update sql
updateStr = "update " + self.__tb + " set "
if len(paraList) > 0:
paraListItem = []
for item in paraList:
paraListItem.append(item + "= %s ")
dataTuple += (getParams[item],)
updateStr += ' , '.join(paraListItem)
else:
retStatus = 0
self.__setStatus("failure","Set SQL was checked in failure.",{})
return retStatus
updateStr += " where "
if len(condList) > 0:
condListItem = []
for item in condList:
condListItem.append(item + "= %s ")
dataTuple += (getConds[item],)
updateStr += ' and '.join(condListItem)
else:
retStatus = 0
self.__setStatus("failure","Where SQL was checked in failure.",{})
return retStatus
updateStr += ";"
try:
# Connect to an existing database
conn = psycopg2.connect(self.__serverDSN())
# Open a cursor to perform database operations
cur = conn.cursor()
# parameter-based update sql
cur.execute(updateStr, dataTuple)
# get all data
conn.commit()
# close communication
cur.close()
conn.close()
self.__setStatus("success","Update operation succeeded.",{})
except:
retStatus = 0
self.__setStatus("failure","Update operation was executed in failure.",{})
return retStatus
#
# desc : insert operation
# param@getParams : {}, value sql parameters
# retn : 0 : failure, 1 : success
# note : also support status object, use status()
#
def insert(self, getParams):
# 0 : failure, 1 : success
retStatus = 1
# filter the column value
paraKeys = getParams.keys()
paraList = []
insertedData = ()
# check parameter existing
if len(paraKeys) > 0:
for item in paraKeys:
if self.__getColIndex(item) > -1:
paraList.append(item)
else:
retStatus = 0
self.__setStatus("failure","Data parameter was empty.",{})
return retStatus
# insert string
insertStr = "insert into " + self.__tb + " ("
for index in range(0,len(paraList),1):
if index != 0:
insertStr += ', '
insertStr += paraList[index]
insertStr += ') values ('
for index in range(0,len(paraList),1):
if index != 0:
insertStr += ', '
insertStr += "%s"
insertedData += (getParams[paraList[index]],)
insertStr += ')'
try:
# Connect to an existing database
conn = psycopg2.connect(self.__serverDSN())
# Open a cursor to perform database operations
cur = conn.cursor()
# parameter-based insertion sql
cur.execute(insertStr,insertedData)
# get all data
conn.commit()
# close communication
cur.close()
conn.close()
self.__setStatus("success","Insert operation succeeded.",{})
except:
self.__setStatus("failure","Insert operation was executed in failure.",{})
retStatus = 0
return retStatus
#
# desc : delete operation
# param@getConds : {}, where sql conditions
# retn : 0 : failure, 1 : success
# note : also support status object, use status()
#
def delete(self, getConds):
# 0 : failure, 1 : success
retStatus = 1
# filter the column value
condKeys = getConds.keys()
condList = []
selectedTuple = ()
# check parameter existing
if len(condKeys) > 0:
for item in condKeys:
if self.__getColIndex(item) > -1:
condList.append(item)
else:
retStatus = 0
self.__setStatus("failure","Where parameter was empty.",{})
return retStatus
# no where condition
if len(condList) < 1:
retStatus = 0
self.__setStatus("failure","Value in where parameter was empty.",{})
return retStatus
# delete string
deleteStr = "delete from " + self.__tb + " where "
for index in range(0,len(condList),1):
if index != 0:
deleteStr += ' and '
deleteStr += condList[index] + " = " + "%s"
selectedTuple += (getConds[condList[index]],)
# delete transaction
try:
# Connect to an existing database
conn = psycopg2.connect(self.__serverDSN())
# Open a cursor to perform database operations
cur = conn.cursor()
# parameter-based sql
cur.execute(deleteStr, selectedTuple)
# get all data
conn.commit()
# close communication
cur.close()
conn.close()
self.__setStatus("success","Delete operation succeeded.",{})
except:
self.__setStatus("failure","Delete operation was executed in failure.",{})
retStatus = 0
return retStatus
#
# desc : execute complex sql command
# param@getSQL : parameter-based complex sql command
# e.g. "select * from public.user where name = %(name)s;"
# param@hasRetValue : are there returned values ?
# param@getParams : {}
# e.g. {'name' : "test114"}
# param@asdict : only works when param@hasRetValue is true, returned value as dictionary data type
# retn : return executing status
#
def execsql(self, getSQL, hasRetValue, getParams, asdict=True):
# save returned data as dictionary data type
retData = []
# check data type is allowed
if not isinstance(getParams, dict):
self.__setStatus("failure", "Parameters must be as dictionary type.", {})
return
try:
# connect to db
conn = psycopg2.connect(self.__serverDSN())
except:
self.__setStatus("failure", "Can not connect to db.", {})
return
try:
# Open a cursor to perform database operations
cur = conn.cursor()
# parameter-based select sql
cur.execute(getSQL, getParams)
except:
self.__setStatus("failure", "SQL was executed in failure.", {})
return
rawdata = {}
try:
if hasRetValue:
# select
# get columns
execColumns = self.__getCurDesc(cur, 0)
# get all data
rawdata = cur.fetchall()
if asdict:
# set transform tuple data type into dictionary data type
tmp = {}
for item in range(0, len(rawdata), 1):
tmp = {}
for col in range(0, len(execColumns), 1):
tmp.setdefault(execColumns[col], rawdata[item][col])
retData.append(tmp)
else:
# insert, delete, update
conn.commit()
# close communication
cur.close()
conn.close()
except:
self.__setStatus("failure", "Data can not be queried or SQL command can not be executed.", {})
return
if hasRetValue:
if asdict:
self.__setStatus("success", "SQL command was executed.", retData)
else:
self.__setStatus("success", "SQL command was executed.", rawdata)
else:
self.__setStatus("success", "SQL command was executed.", {})
return
#
# desc : create table based on schema
# param@tableName : name of the table for creation
# param@tableSchema : { 'colName' : 'colSchema', '' : '' }
# param@dropFirst : whether to drop table first if it exists
# retn : None, call status() to get status object
#
def createTable(self, tableName, tableSchema, dropFirst=False):
if not (isinstance(tableSchema, dict)):
self.__setStatus("failure", "Parameters are not correct.", {})
return
# check table status (whether it exists or not)
try:
self.execsql("select * from information_schema.tables where table_name = %(name)s;", True, {'name' : tableName})
except:
self.__setStatus("failure", "Can not get the table list.", {})
return
if self.__retStatus["state"] != "success":
self.__setStatus("failure", "Can not check table status.", {})
return
if len(self.__retStatus["data"]) > 0:
# table already exists
if dropFirst:
# delete first
self.execsql("drop table if exists " + tableName + ";", False, {})
if self.__retStatus["state"] != "success":
self.__setStatus("failure", self.__setStatus["data"] + " Can not drop the data table.", {})
else:
self.__setStatus("failure", "The table already exists, if it does not drop, the table can not be created.", {})
return
# create table
tmpKey = tableSchema.keys()
createTBCmd = "create table if not exists " + tableName + " ( "
for colIndex in range(0, len(tmpKey), 1):
if colIndex != 0:
createTBCmd += ', '
createTBCmd += tmpKey[colIndex] + " " + tableSchema[tmpKey[colIndex]]
createTBCmd += " );"
try:
self.execsql(createTBCmd, False, {})
except:
self.__setStatus("failure", "Unexcepted error on creating the data table.", {})
return
if self.__retStatus["state"] != "success":
self.__setStatus("failure", "Can not create data table.", {})
return
else:
self.__setStatus("success", "Create data table successfully.", {})
#
# desc : alter table schema
# param@tableName : table for altering
# param@tableSchema : { 'colName' : 'new col schema' }
# param@createTableFirstIfNotExisted : whether to create table first if table does not exist
# param@addColIfNotExisted : whether to add column if it does not exist
# param@theSameWithThisSchema : whether to fit the table with the input schema
# retn : None, call status() to get status object
# note : if addColIfNotExisted == False, the column for altering would be skipped
#
def alterTable(self, \
tableName, \
tableSchema, \
createTableFirstIfNotExisted=True, \
addColIfNotExisted=True,\
theSameWithThisSchema=True):
if not (\
isinstance(tableName, str) and \
isinstance(tableSchema, dict) and\
isinstance(createTableFirstIfNotExisted, bool) and\
isinstance(addColIfNotExisted, bool)
):
self.__setStatus("failure", "Parameters are not correct.", {})
return
# check table status (whether it exists or not)
try:
self.execsql("select * from information_schema.tables where table_name = %(name)s;", True, {'name' : tableName})
except:
self.__setStatus("failure", "Can not get the table list.", {})
return
if self.__retStatus["state"] != "success":
self.__setStatus("failure", "Can not check table status.", {})
return
# table does not exist
if len(self.__retStatus["data"]) < 1:
if createTableFirstIfNotExisted:
# create table first
self.createTable(tableName, tableSchema, False)
if self.__retStatus["state"] != "success":
self.__setStatus("failure", self.__retStatus["info"] + " Can not create the data table.", {})
return
else:
self.__setStatus("failure", "The table does not exist, if it does not be created, the alter operation would be stop.", {})
return
# table exists
else:
# get table column name
crtColName = self.getTableSchema(tableName, 0)['data']
warningFlag = 0
warningMsg = ""
for name, schema in tableSchema.iteritems():
if name in crtColName:
# the same column name
self.execsql(\
"alter table " + tableName + " alter column " + name + " type " + schema + " ;",
False,
{},
False
)
if self.__retStatus["state"] != "success":
warningFlag = 1
warningMsg = warningMsg + ' [alter column failure]' + self.__retStatus["info"]
# remove the column from the list
# the left column in the list may be dropped
crtColName.remove(name)
else:
# there is no existing column
if addColIfNotExisted:
self.execsql(\
"alter table " + tableName + " add column " + name + " " + schema + " ;",
False,
{},
False
)
if self.__retStatus["state"] != "success":
warningFlag = 1
warningMsg = warningMsg + ' [add column failure]' + self.__retStatus["info"]
else:
warningFlag = 1
warningMsg = warningMsg + ' [not to add column] The column ' + name + ' does not exist and also not to create if it does not exist.'
# drop the other column
if theSameWithThisSchema:
for colName in crtColName:
self.execsql(\
"alter table " + tableName + " drop column if exists " + colName + " ;",
False,
{},
False
)
if self.__retStatus["state"] != "success":
warningFlag = 1
warningMsg = warningMsg + ' [drop column failure] ' + colName + ' ' + self.__retStatus["info"]
if warningFlag == 1:
self.__setStatus("warning",warningMsg,{})
else:
self.__setStatus("success","Alter table completely.",{})
#
# desc : drop table
# param@tableName : table for droping
# retn : None, call status() to get status object
#
def dropTable(self, tableName):
if not (isinstance(tableName, str)):
self.__setStatus("failure", "Parameters are not correct.", {})
return
# check table status (whether it exists or not)
try:
self.execsql("select * from information_schema.tables where table_name = %(name)s;", True, {'name' : tableName})
except:
self.__setStatus("failure", "Can not get the table list.", {})
return
if self.__retStatus["state"] != "success":
self.__setStatus("failure", "Can not check table status.", {})
return
if len(self.__retStatus["data"]) > 0:
# table exist
self.execsql("drop table if exists " + tableName + ";", False, {}, False)
if self.__retStatus["state"] == "success":
self.__setStatus("success", "Drop table " + tableName + " successfully.", {})
else:
self.__setStatus("failure", "Can not drop table " + tableName + ".", {})
else:
# table does not exist
self.__setStatus("success", "Table " + tableName + " does not exist.", {})
|
{
"content_hash": "74f9699cdcf48f7edb4ec73080f03232",
"timestamp": "",
"source": "github",
"line_count": 791,
"max_line_length": 156,
"avg_line_length": 33.86472819216182,
"alnum_prop": 0.48325680367342366,
"repo_name": "jiankaiwang/seed",
"id": "18a12003354f06e8b3f76650d6755794a4159a2c",
"size": "29363",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/py2psql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "32941"
},
{
"name": "C++",
"bytes": "4940"
},
{
"name": "CSS",
"bytes": "458"
},
{
"name": "Java",
"bytes": "29638"
},
{
"name": "JavaScript",
"bytes": "26297"
},
{
"name": "PHP",
"bytes": "24620"
},
{
"name": "Perl",
"bytes": "507"
},
{
"name": "Python",
"bytes": "95079"
},
{
"name": "R",
"bytes": "9676"
},
{
"name": "TypeScript",
"bytes": "6378"
}
],
"symlink_target": ""
}
|
'''OpenGL extension ARB.point_sprite
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_ARB_point_sprite'
_DEPRECATED = False
GL_POINT_SPRITE_ARB = constant.Constant( 'GL_POINT_SPRITE_ARB', 0x8861 )
glget.addGLGetConstant( GL_POINT_SPRITE_ARB, (1,) )
GL_COORD_REPLACE_ARB = constant.Constant( 'GL_COORD_REPLACE_ARB', 0x8862 )
def glInitPointSpriteARB():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
{
"content_hash": "e0ee6ff01e2252aa9e7e3bb445ea9df1",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 36.5,
"alnum_prop": 0.7701674277016742,
"repo_name": "Universal-Model-Converter/UMC3.0a",
"id": "55b6067e274b3c8c57b1f2320be4e7013dfce00a",
"size": "657",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "data/Python/x86/Lib/site-packages/OpenGL/raw/GL/ARB/point_sprite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "226"
},
{
"name": "C",
"bytes": "1082640"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "3621086"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "5943"
},
{
"name": "HTML",
"bytes": "1196266"
},
{
"name": "Java",
"bytes": "5793"
},
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "33351557"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "6931"
},
{
"name": "Tcl",
"bytes": "2084458"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import ftplib
import StringIO
import zipfile
import parser_libxml
import lxml
#from helper import DBConnector
import psycopg2
import datetime
import sys,os,traceback
from helper import DBConnector
def get_zip_data(filename,callback):
ftp = ftplib.FTP("ftp.zakupki.gov.ru", "free", "free")
file = ftp.retrbinary("RETR " +filename, lambda data: callback(data))
return file
def parse_file_id(file_id,filename):
zipdata = StringIO.StringIO()
get_zip_data(filename,zipdata.write)
try:
myzipfile = zipfile.ZipFile(zipdata)
except:
'Not zip file:' + filename
return
finally:
for name in myzipfile.namelist():
print "Start work!"
content=myzipfile.open(name)
tree = lxml.etree.parse(content)
root = tree.xpath(".")
elements=root[0].xpath("./*")
parser=parser_libxml.ZakupkiXMLParser()
parser.file_id=str(file_id)
parser.writeToConsole()
parser.writeToDb()
#parser.setDbConn(dbConn_string)
data = []
for i in elements:
parser.handle_element('',i)
#data+='--Start--',i
#data+='BEGIN;'
parser.generate_insert_statements(parser.values)
#data+='commit;'
#data+= '--End--',i
#print data
parser.save_uids()
parser.values=dict()
return
def main():
db_connector = DBConnector('default')
curr=db_connector.getCursor()
conn=db_connector.getConn()
curr.execute("SELECT id,path FROM files_list where inserted=false and locked=false and pg_try_advisory_lock(tableoid::INTEGER,id) and path like '%contract%2014%' limit 1")
[(id,path)] = curr.fetchall()
results=[]
while id > 0:
try:
dt = datetime.datetime.now()
curr.execute("update files_list set locked=true ,lock_time=now() where id=%s",(id,))
conn.commit()
print "--Try get file" , id , path
print "--RAISE warning 'Start to read file:%s:%s';" % (id,path)
dt = datetime.datetime.now()
parse_file_id(id,path)
except Exception as e:
print "--Poblem!",e
conn.rollback()
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
traceback.print_exc()
finally:
curr.execute("update files_list set locked=false,inserted=true,insert_time=now() where id=%s; select pg_advisory_unlock(tableoid::INTEGER,id) from files_list where id = %s",(id,id))
conn.commit()
curr.execute("SELECT id,path FROM files_list where inserted=false and locked=false and pg_try_advisory_lock(tableoid::INTEGER,id) and path like '%contract%2014%' limit 1")
[(id,path)] = curr.fetchall()
print "Seems, there is no more to insert."
print results
if __name__ == "__main__":
main()
|
{
"content_hash": "948e7d3998c74f4f9602a5d4b73e6a64",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 193,
"avg_line_length": 35.08988764044944,
"alnum_prop": 0.579250720461095,
"repo_name": "Dixon3/zakpars",
"id": "d5cab1a82e6821a613f5be075f03b53f2dfd8e64",
"size": "3170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parallel_python/parseFileFromFtp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17264"
}
],
"symlink_target": ""
}
|
import speech_recognition as sr #Import the speech recognition library
import wordToNum #To convert nums to integers
#from eqSolver import eqSolver
from sympy import * #Necessary for commented out code, but comment this import if not using them
#Setup speech
r = sr.Recognizer() #Creates a variable to listen to the audio
#Code to get speech
with sr.Microphone() as source: #Need this block every time you want to input speech
#print("Do you want to solve an expression or solve for a variable?") #To be implemented lated with sympy
print("Say your equation:")
#audio = input("Input your equation") #For text input, can be moved outside of with statement
#x = Symbol('x')
#diff(audio, x) #For direct derivative typed input
audio = r.listen(source) #Sets a variable called audio to the audio input, to be later interpreted
######################################
#EQUATION CODE FOR FUTURE WORK
#equation = input("Input your equation: \n") #Uncomment to test this code
#x = Symbol('x')
#print(solve(equation, x)) #Solves the equation with a variable 'x' NOTE: must use 'x' and only 'x' as variable
#equation.replace("y", "")
#equation.replace("=", "")
################SAMPLE CODE######################
#limit(sin(x)/x, x, 0) #LIMIT
#integrate(exp(x)*sin(x) + exp(x)*cos(x), x) #INTEGRAL
#diff(sin(x)*exp(x), x) #DERIVATIVE
#if ("equation" in equation): ###May not use
# ##Switch to equation solvers, else, continue with basic solving
# eqSolver()
#else:
# #do the eval() stuff
keys = [line.rstrip('\n') for line in open('keys.txt')] #Puts every line in keys.txt in a list, split by "\n"
results = [line.rstrip('\n') for line in open('results.txt')] #Same thing, but for results.txt
#Below are the try and except blocks for google speech recognition
try:
equation = r.recognize_google(audio).lower() #Takes whatever the speech interpretor took the input as and makes it lower case to fit the dictionary
print(equation) #Prints the equation, check to see if it recognized your speech correctly
for i in range(len(keys)): #Goes through the equation replacing any phrases with their mathematical equivalents
equation = equation.replace(keys[i], results[i])
equation = equation.strip() #Removes unnecessary splaces at ends(shouldn't change anything)
eqSplit = equation.split()
listNums = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight","nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen","sixteen", "seventeen", "eighteen", "nineteen","", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety", "hundred", "thousand", "million", "billion", "trillion"]
try:
ev = eval(equation) #eval is a standard evaluating function in python. Later it should be replaced with a manual evaluator
print(equation) #Prints the equation, given that it can be evaluated
print(ev) #Prints the evaluated output
except: #In case the input doesn't explicitly state an end parentheses, adds one
try: # THIS SHOULD BE CHANGED LATER
equation = equation + ")" #Add a paren to the end
ev = eval(equation) #Attemps to evaluate again
print(equation) #Prints the equation
print(ev) #Prints the evaluation
except: #If it isn't just missing an end paren...
print("Unable to evaulate equation") #say unable to evaluate
except sr.UnknownValueError: #This is the most common error, try audio again, making certain the program can clearly hear you
print("Google Speech Recognition could not understand audio") #The two excepts below are the standard for google speech recognition
except sr.RequestError as e: #Make sure to keep these excepts whenever calling recognize_google(audio)
print("Could not request results from Google Speech Recognition service; {0}".format(e))
|
{
"content_hash": "586ba131c3d6a829a4a2a3b0a9091caa",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 342,
"avg_line_length": 60.564516129032256,
"alnum_prop": 0.7145139813581891,
"repo_name": "Alex-Gurung/MathSpeech",
"id": "58fca407fd96031b0f5fb02b213496e0d749b107",
"size": "3755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7088"
}
],
"symlink_target": ""
}
|
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
ssl() -- secure socket layer support (only available if configured)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
Integer constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
try:
import _ssl
except ImportError:
# no SSL support
pass
else:
def ssl(sock, keyfile=None, certfile=None):
# we do an internal import here because the ssl
# module imports the socket module
import ssl as _realssl
warnings.warn("socket.ssl() is deprecated. Use ssl.wrap_socket() instead.",
DeprecationWarning, stacklevel=2)
return _realssl.sslwrap_simple(sock, keyfile, certfile)
# we need to import the same constants we used to...
from _ssl import SSLError as sslerror
from _ssl import \
RAND_add, \
RAND_egd, \
RAND_status, \
SSL_ERROR_ZERO_RETURN, \
SSL_ERROR_WANT_READ, \
SSL_ERROR_WANT_WRITE, \
SSL_ERROR_WANT_X509_LOOKUP, \
SSL_ERROR_SYSCALL, \
SSL_ERROR_SSL, \
SSL_ERROR_WANT_CONNECT, \
SSL_ERROR_EOF, \
SSL_ERROR_INVALID_ERROR_CODE
import os, sys, warnings
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EINTR = getattr(errno, 'EINTR', 4)
__all__ = ["getfqdn", "create_connection"]
__all__.extend(os._get_exports_list(_socket))
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_socketmethods = (
'bind', 'connect', 'connect_ex', 'fileno', 'listen',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'sendall', 'setblocking',
'settimeout', 'gettimeout', 'shutdown')
if os.name == "nt":
_socketmethods = _socketmethods + ('ioctl',)
if sys.platform == "riscos":
_socketmethods = _socketmethods + ('sleeptaskw',)
class _closedsocket(object):
__slots__ = []
def _dummy(*args):
raise error(EBADF, 'Bad file descriptor')
# All _delegate_methods must also be initialized here.
send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy
__getattr__ = _dummy
# Wrapper around platform socket objects. This implements
# a platform-independent dup() functionality. The
# implementation currently relies on reference counting
# to close the underlying socket object.
class _socketobject(object):
__doc__ = _realsocket.__doc__
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
if _sock is None:
_sock = _realsocket(family, type, proto)
self._sock = _sock
self._io_refs = 0
self._closed = False
def send(self, data, flags=0):
return self._sock.send(data, flags=flags)
send.__doc__ = _realsocket.send.__doc__
def recv(self, buffersize, flags=0):
return self._sock.recv(buffersize, flags=flags)
recv.__doc__ = _realsocket.recv.__doc__
def recv_into(self, buffer, nbytes=0, flags=0):
return self._sock.recv_into(buffer, nbytes=nbytes, flags=flags)
recv_into.__doc__ = _realsocket.recv_into.__doc__
def recvfrom(self, buffersize, flags=0):
return self._sock.recvfrom(buffersize, flags=flags)
recvfrom.__doc__ = _realsocket.recvfrom.__doc__
def recvfrom_into(self, buffer, nbytes=0, flags=0):
return self._sock.recvfrom_into(buffer, nbytes=nbytes, flags=flags)
recvfrom_into.__doc__ = _realsocket.recvfrom_into.__doc__
def sendto(self, data, param2, param3=None):
if param3 is None:
return self._sock.sendto(data, param2)
else:
return self._sock.sendto(data, param2, param3)
sendto.__doc__ = _realsocket.sendto.__doc__
def close(self):
# This function should not reference any globals. See issue #808164.
self._sock = _closedsocket()
close.__doc__ = _realsocket.close.__doc__
def accept(self):
sock, addr = self._sock.accept()
return _socketobject(_sock=sock), addr
accept.__doc__ = _realsocket.accept.__doc__
def dup(self):
"""dup() -> socket object
Return a new socket object connected to the same system resource."""
return _socketobject(_sock=self._sock)
def makefile(self, mode='r', bufsize=-1):
"""makefile([mode[, bufsize]]) -> file object
Return a regular file object corresponding to the socket. The mode
and bufsize arguments are as for the built-in open() function."""
self._io_refs += 1
return _fileobject(self, mode, bufsize)
def _decref_socketios(self):
if self._io_refs > 0:
self._io_refs -= 1
if self._closed:
self.close()
def _real_close(self):
# This function should not reference any globals. See issue #808164.
self._sock.close()
def close(self):
# This function should not reference any globals. See issue #808164.
self._closed = True
if self._io_refs <= 0:
self._real_close()
family = property(lambda self: self._sock.family, doc="the socket family")
type = property(lambda self: self._sock.type, doc="the socket type")
proto = property(lambda self: self._sock.proto, doc="the socket protocol")
# Delegate many calls to the raw socket object.
_s = ("def %(name)s(self, %(args)s): return self._sock.%(name)s(%(args)s)\n\n"
"%(name)s.__doc__ = _realsocket.%(name)s.__doc__\n")
for _m in _socketmethods:
# yupi! we're on pypy, all code objects have this interface
argcount = getattr(_realsocket, _m).im_func.func_code.co_argcount - 1
exec _s % {'name': _m, 'args': ', '.join('arg%d' % i for i in range(argcount))}
del _m, _s, argcount
# Delegation methods with default arguments, that the code above
# cannot handle correctly
def sendall(self, data, flags=0):
self._sock.sendall(data, flags)
sendall.__doc__ = _realsocket.sendall.__doc__
def getsockopt(self, level, optname, buflen=None):
if buflen is None:
return self._sock.getsockopt(level, optname)
return self._sock.getsockopt(level, optname, buflen)
getsockopt.__doc__ = _realsocket.getsockopt.__doc__
socket = SocketType = _socketobject
class _fileobject(object):
"""Faux file object attached to a socket object."""
default_bufsize = 8192
name = "<socket>"
__slots__ = ["mode", "bufsize", "softspace",
# "closed" is a property, see below
"_sock", "_rbufsize", "_wbufsize", "_rbuf", "_wbuf", "_wbuf_len",
"_close"]
def __init__(self, sock, mode='rb', bufsize=-1, close=False):
self._sock = sock
self.mode = mode # Not actually used in this version
if bufsize < 0:
bufsize = self.default_bufsize
self.bufsize = bufsize
self.softspace = False
# _rbufsize is the suggested recv buffer size. It is *strictly*
# obeyed within readline() for recv calls. If it is larger than
# default_bufsize it will be used for recv calls within read().
if bufsize == 0:
self._rbufsize = 1
elif bufsize == 1:
self._rbufsize = self.default_bufsize
else:
self._rbufsize = bufsize
self._wbufsize = bufsize
# We use StringIO for the read buffer to avoid holding a list
# of variously sized string objects which have been known to
# fragment the heap due to how they are malloc()ed and often
# realloc()ed down much smaller than their original allocation.
self._rbuf = StringIO()
self._wbuf = [] # A list of strings
self._wbuf_len = 0
self._close = close
def _getclosed(self):
return self._sock is None
closed = property(_getclosed, doc="True if the file is closed")
def close(self):
try:
if self._sock:
self.flush()
finally:
if self._sock:
if self._close:
self._sock.close()
else:
self._sock._decref_socketios()
self._sock = None
def __del__(self):
try:
self.close()
except:
# close() may fail if __init__ didn't complete
pass
def flush(self):
if self._wbuf:
data = "".join(self._wbuf)
self._wbuf = []
self._wbuf_len = 0
buffer_size = max(self._rbufsize, self.default_bufsize)
data_size = len(data)
write_offset = 0
view = memoryview(data)
try:
while write_offset < data_size:
self._sock.sendall(view[write_offset:write_offset+buffer_size])
write_offset += buffer_size
finally:
if write_offset < data_size:
remainder = data[write_offset:]
del view, data # explicit free
self._wbuf.append(remainder)
self._wbuf_len = len(remainder)
def fileno(self):
return self._sock.fileno()
def write(self, data):
data = str(data) # XXX Should really reject non-string non-buffers
if not data:
return
self._wbuf.append(data)
self._wbuf_len += len(data)
if (self._wbufsize == 0 or
self._wbufsize == 1 and '\n' in data or
self._wbuf_len >= self._wbufsize):
self.flush()
def writelines(self, list):
# XXX We could do better here for very long lists
# XXX Should really reject non-string non-buffers
lines = filter(None, map(str, list))
self._wbuf_len += sum(map(len, lines))
self._wbuf.extend(lines)
if (self._wbufsize <= 1 or
self._wbuf_len >= self._wbufsize):
self.flush()
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(rbufsize)
except error, e:
if e.args[0] == EINTR:
continue
raise
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
try:
data = self._sock.recv(left)
except error, e:
if e.args[0] == EINTR:
continue
raise
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self._sock.recv
while True:
try:
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
except error, e:
# The try..except to catch EINTR was moved outside the
# recv loop to avoid the per byte overhead.
if e.args[0] == EINTR:
continue
raise
break
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except error, e:
if e.args[0] == EINTR:
continue
raise
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except error, e:
if e.args[0] == EINTR:
continue
raise
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
def readlines(self, sizehint=0):
total = 0
list = []
while True:
line = self.readline()
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
# Iterator protocols
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
|
{
"content_hash": "dd141be8c2e839ca27ca5033cd8d1bac",
"timestamp": "",
"source": "github",
"line_count": 615,
"max_line_length": 87,
"avg_line_length": 36.00650406504065,
"alnum_prop": 0.5475523843930635,
"repo_name": "ojii/sandlib",
"id": "f2febfeea50b1928a371c0fcb222269ace7b787b",
"size": "22237",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/lib-python/2.7/socket.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "82564"
},
{
"name": "Perl",
"bytes": "169"
},
{
"name": "Python",
"bytes": "19181595"
},
{
"name": "Shell",
"bytes": "2922"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import numpy as np
import binvox_rw
import numba
import mcubes
@numba.jit(forceobj=True)
def get_voxel_resolution(pc, patch_size):
"""
This function takes in a pointcloud and returns the resolution
of a voxel given that there will be a fixed number of voxels.
For example if patch_size is 40, then we are determining the
side length of a single voxel in meters. Sovoxel_resolution
may end up being something like 0.01 for a 1cm^3 voxel size
jaccard_distance
:type pc: numpy.ndarray
:param pc: nx3 numpy array representing a pointcloud
:type patch_size: int
:param patch_size: int, how many voxels are there going to be.
:rtype voxel_resolution: float
"""
if not pc.shape[1] == 3:
raise Exception("Invalid pointcloud size, should be nx3, but is {}".format(pc.shape))
min_x = pc[:, 0].min()
min_y = pc[:, 1].min()
min_z = pc[:, 2].min()
max_x = pc[:, 0].max()
max_y = pc[:, 1].max()
max_z = pc[:, 2].max()
max_dim = max((max_x - min_x),
(max_y - min_y),
(max_z - min_z))
voxel_resolution = (1.0 * max_dim) / patch_size
return voxel_resolution
@numba.jit(forceobj=True)
def get_bbox_center(pc):
"""
This function takes an nx3 pointcloud and returns a tuple
(x,y,z) which is the center of the bbox that contains
the pointcloud
:type pc: numpy.ndarray
:param pc: a nx3 numpy array representing a pointcloud
:rtype numpy.ndarray
"""
if not pc.shape[1] == 3:
raise Exception("Invalid pointcloud size, should be nx3, but is {}".format(pc.shape))
min_x = pc[:, 0].min()
min_y = pc[:, 1].min()
min_z = pc[:, 2].min()
max_x = pc[:, 0].max()
max_y = pc[:, 1].max()
max_z = pc[:, 2].max()
center = np.array([min_x + (max_x - min_x) / 2.0,
min_y + (max_y - min_y) / 2.0,
min_z + (max_z - min_z) / 2.0])
return center
@numba.jit(forceobj=True)
def voxelize_points(points, pc_bbox_center, voxel_resolution, num_voxels_per_dim, pc_center_in_voxel_grid):
"""
This function takes a pointcloud and produces a an occupancy map or voxel grid surrounding the points.
:type points: numpy.ndarray
:param points: an nx3 numpy array representing a pointcloud
:type pc_bbox_center: numpy.ndarray
:param pc_bbox_center: numpy.ndarray of shape (3,) representing the center of the bbox that contains points
:type voxel_resolution: float
:param voxel_resolution: float describing in meters the length of an individual voxel edge. i.e 0.01 would
mean each voxel is 1cm^3
:type num_voxels_per_dim: int
:param num_voxels_per_dim: how many voxels along a dimension. normally 40, for a 40x40x40 voxel grid
:type pc_center_in_voxel_grid: tuple
:param pc_center_in_voxel_grid: (x,y,z) in voxel coords of where to place the center of the points in the voxel grid
if using 40x40x40 voxel grid, then pc_center_in_voxel_grid = (20,20,20). We often using something more
like (20,20,18) when doing shape completion so there is more room in the back of the grid for the
object to be completed.
"""
# this is the voxel grid we are going to return
voxel_grid = np.zeros((num_voxels_per_dim,
num_voxels_per_dim,
num_voxels_per_dim), dtype=np.bool)
# take the points and convert them from meters to voxel space coords
centered_scaled_points = np.floor(
(points - np.array(pc_bbox_center) + np.array(
pc_center_in_voxel_grid) * voxel_resolution) / voxel_resolution)
# remove any points that are beyond the area that falls in our voxel grid
mask = centered_scaled_points.max(axis=1) < num_voxels_per_dim
centered_scaled_points = centered_scaled_points[mask]
# if we don't have any more points that fall within our voxel grid
# return an empty grid
if centered_scaled_points.shape[0] == 0:
return voxel_grid
# remove any points that are outside of the region we are voxelizing
# as they are to small.
mask = centered_scaled_points.min(axis=1) > 0
centered_scaled_points = centered_scaled_points[mask]
# if we don't have any more points that fall within our voxel grid,
# return an empty grid
if centered_scaled_points.shape[0] == 0:
return voxel_grid
# treat our remaining points as ints, since we are already in voxel coordinate space.
# this points shoule be things like (5, 6, 7) which represent indices in the voxel grid.
csp_int = centered_scaled_points.astype(int)
# create a mask from our set of points.
mask = (csp_int[:, 0], csp_int[:, 1], csp_int[:, 2])
# apply the mask to our voxel grid setting voxel that had points in them to be occupied
voxel_grid[mask] = 1
return voxel_grid
def pc_to_binvox(points, **kwargs):
"""
This function creates a binvox object from a pointcloud. The voxel grid is slightly off center from the
pointcloud bbox center so that the back of the grid has more room for the completion.
:type points: numpy.ndarray
:param points: nx3 numpy array representing a pointcloud
:rtype: binvox_rw.Voxels
:param kwargs:
See below
:Keyword Arguments:
* *patch_size* (``int``) --
how many voxels along a single dimension of the voxel grid.
Ex: patch_size=40 gives us a 40^3 voxel grid
Defaults to 40
* *percent_patch_size* (``float``) --
how much of the voxel grid do we want our pointcloud to fill.
make this < 1 so that there is some padding on the edges
Defaults to 0.8
* *percent_offset* (``tuple``) --
Where should the center of the points be placed inside the voxel grid.
normally make PERCENT_Z < 0.5 so that the points are placed towards the front of the grid
this leaves more room for the shape completion to fill in the occluded back half of the occupancy grid.
"""
patch_size = kwargs.get("patch_size", 40)
percent_offset = kwargs.get("percent_offset", (0.5, 0.5, 0.45))
percent_patch_size = kwargs.get("percent_patch_size", 0.8)
if points.shape[1] != 3:
raise Exception("Invalid pointcloud size, should be nx3, but is {}".format(points.shape))
if len(percent_offset) != 3:
raise Exception("Percent offset should be a tuple of size 3, instead got {}".format(percent_offset))
percent_x, percent_y, percent_z = percent_offset
# get the center of the pointcloud in meters. Ex: center = np.array([0.2, 0.1, 2.0])
voxel_center = get_bbox_center(points)
# get the size of an individual voxel. Ex: voxel_resolution=0.01 meaning 1cm^3 voxel
# PERCENT_PATCH_SIZE determines how much extra padding to leave on the sides
voxel_resolution = get_voxel_resolution(points, percent_patch_size * patch_size)
# this tuple is where we want to stick the center of the pointcloud in our voxel grid
# Ex: (20, 20, 18) leaving some extra room in the back half.
pc_center_in_voxel_grid = (patch_size*percent_x, patch_size*percent_y, patch_size*percent_z)
# create a voxel grid.
vox_np = voxelize_points(
points=points[:, 0:3],
pc_bbox_center=voxel_center,
voxel_resolution=voxel_resolution,
num_voxels_per_dim=patch_size,
pc_center_in_voxel_grid=pc_center_in_voxel_grid)
# location in meters of the bottom corner of the voxel grid in world space
offset = np.array(voxel_center) - np.array(pc_center_in_voxel_grid) * voxel_resolution
# create a voxel grid object to contain the grid, shape, offset in the world, and grid resolution
voxel_grid = binvox_rw.Voxels(vox_np, vox_np.shape, tuple(offset), voxel_resolution * patch_size, "xyz")
# Where am I putting my point cloud relative to the center of my voxel grid
# ex. (20, 20, 20) or (20, 20, 18)
center_point_in_voxel_grid = (patch_size * percent_x, patch_size * percent_y, patch_size * percent_z)
return voxel_grid, voxel_center, voxel_resolution, center_point_in_voxel_grid
@numba.jit(forceobj=True)
def get_ternary_voxel_grid(binary_voxel_grid):
"""
Takes a binary occupancy voxel grid for the surface of the object and
returns a ternary occupancy voxel grid.
:param binary_voxel_grid: a voxel grid that indicates whether a voxel is
occupied by the visible surface ("1") or not occupied by the visible
surface ("0"). If you're seeing a box, the "1"s would represent the location
of the part of the box's surface that you can see, while the "0" would
represent everything else.
:param method: Can be 'simple' or 'projection'.
:return: a voxel grid that indicates whether a voxel is visually occluded
("2"), occupied by the visible surface ("1"), or visibly known to be
unoccupied ("0").
"""
if isinstance(binary_voxel_grid, binvox_rw.Voxels):
binary_voxel_grid = binary_voxel_grid.data
if not isinstance(binary_voxel_grid, np.ndarray):
raise ValueError("binary_voxel_grid must be Voxels or ndarray")
voxel_grid_shape = binary_voxel_grid.shape
assert len(voxel_grid_shape) == 3
# Initialize all ternary grid values to 0.
ternary_voxel_grid = np.zeros(voxel_grid_shape)
# The 'simple' method assumes that the camera is an infinite distance
# away from the object and thus considers as occluded every z value
# behind the surface for a fixed x and y. Perspective isn't taken into
# account.
for i in range(voxel_grid_shape[0]):
for j in range(voxel_grid_shape[1]):
for k in range(voxel_grid_shape[2]):
if binary_voxel_grid[i, j, k] > 0:
# Surface found. set surface to 1 in the ternary_voxel
# grid, and everything behind it to 2.
ternary_voxel_grid[i, j, k] = 1
ternary_voxel_grid[i, j, k + 1:voxel_grid_shape[2]] = 2
break
return ternary_voxel_grid
@numba.jit(forceobj=True)
def rescale_mesh(vertices, patch_center, voxel_resolution, pc_center_in_voxel_grid):
return vertices * voxel_resolution - np.array(pc_center_in_voxel_grid) * voxel_resolution + np.array(patch_center)
@numba.jit(forceobj=True)
def create_voxel_grid_around_point_scaled(
points,
patch_center,
voxel_resolution,
num_voxels_per_dim,
pc_center_in_voxel_grid
):
voxel_grid = np.zeros((num_voxels_per_dim, num_voxels_per_dim, num_voxels_per_dim, 1), dtype=np.float32)
centered_scaled_points = np.floor(
(points - np.array(patch_center) + np.array(
pc_center_in_voxel_grid) * voxel_resolution) / voxel_resolution)
mask = centered_scaled_points.max(axis=1) < num_voxels_per_dim
centered_scaled_points = centered_scaled_points[mask]
if centered_scaled_points.shape[0] == 0:
return voxel_grid
mask = centered_scaled_points.min(axis=1) > 0
centered_scaled_points = centered_scaled_points[mask]
if centered_scaled_points.shape[0] == 0:
return voxel_grid
csp_int = centered_scaled_points.astype(int)
mask = (csp_int[:, 0], csp_int[:, 1], csp_int[:, 2],
np.zeros((csp_int.shape[0]), dtype=int))
voxel_grid[mask] = 1
return voxel_grid
def pc_to_binvox_for_shape_completion(points,
patch_size):
"""
This function creates a binvox object from a pointcloud. The voxel grid is slightly off center from the
pointcloud bbox center so that the back of the grid has more room for the completion.
:type points: numpy.ndarray
:param points: nx3 numpy array representing a pointcloud
:type patch_size: int
:param patch_size: how many voxels along a single dimension of the voxel grid.
Ex: patch_size=40 gives us a 40^3 voxel grid
:rtype: binvox_rw.Voxels
"""
if points.shape[1] != 3:
raise Exception("Invalid pointcloud size, should be nx3, but is {}".format(points.shape))
# how much of the voxel grid do we want our pointcloud to fill.
# make this < 1 so that there is some padding on the edges
PERCENT_PATCH_SIZE = (4.0/5.0)
# Where should the center of the points be placed inside the voxel grid.
# normally make PERCENT_Z < 0.5 so that the points are placed towards the front of the grid
# this leaves more room for the shape completion to fill in the occluded back half of the occupancy grid.
PERCENT_X = 0.5
PERCENT_Y = 0.5
PERCENT_Z = 0.45
# get the center of the pointcloud in meters. Ex: center = np.array([0.2, 0.1, 2.0])
center = get_bbox_center(points)
# get the size of an individual voxel. Ex: voxel_resolution=0.01 meaning 1cm^3 voxel
# PERCENT_PATCH_SIZE determines how much extra padding to leave on the sides
voxel_resolution = get_voxel_resolution(points, PERCENT_PATCH_SIZE * patch_size)
# this tuple is where we want to stick the center of the pointcloud in our voxel grid
# Ex: (20, 20, 18) leaving some extra room in the back half.
pc_center_in_voxel_grid = (patch_size*PERCENT_X, patch_size*PERCENT_Y, patch_size*PERCENT_Z)
# create a voxel grid.
vox_np = voxelize_points(
points=points[:, 0:3],
pc_bbox_center=center,
voxel_resolution=voxel_resolution,
num_voxels_per_dim=patch_size,
pc_center_in_voxel_grid=pc_center_in_voxel_grid)
# location in meters of the bottom corner of the voxel grid in world space
offset = np.array(center) - np.array(pc_center_in_voxel_grid) * voxel_resolution
# create a voxel grid object to contain the grid, shape, offset in the world, and grid resolution
vox = binvox_rw.Voxels(vox_np, vox_np.shape, tuple(offset), voxel_resolution * patch_size, "xyz")
return vox
@numba.jit(forceobj=True)
def voxel_grid_jaccard_similarity(a, b):
'''
Returns the number of pixels of the intersection of two voxel grids divided
by the number of pixels in the union.
The inputs are expected to be numpy 5D ndarrays in BZCXY format.
'''
return np.mean(np.sum(a * b, axis=1) / np.sum((a + b) - a * b, axis=1))
|
{
"content_hash": "bc2d789ad7bf7e8e31f107c9e86f48fb",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 120,
"avg_line_length": 39.25,
"alnum_prop": 0.6589906908378246,
"repo_name": "CRLab/curvox",
"id": "253f31442daffdbb2a0345446c3248a2e27ccec2",
"size": "14287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/curvox/pc_vox_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76823"
}
],
"symlink_target": ""
}
|
"""
The Carnegie Mellon Pronouncing Dictionary [cmudict.0.6]
ftp://ftp.cs.cmu.edu/project/speech/dict/
Copyright 1998 Carnegie Mellon University
File Format: Each line consists of an uppercased word, a counter
(for alternative pronunciations), and a transcription. Vowels are
marked for stress (1=primary, 2=secondary, 0=no stress). E.g.:
NATURAL 1 N AE1 CH ER0 AH0 L
The dictionary contains 127069 entries. Of these, 119400 words are assigned
a unique pronunciation, 6830 words have two pronunciations, and 839 words have
three or more pronunciations. Many of these are fast-speech variants.
Phonemes: There are 39 phonemes, as shown below:
Phoneme Example Translation Phoneme Example Translation
------- ------- ----------- ------- ------- -----------
AA odd AA D AE at AE T
AH hut HH AH T AO ought AO T
AW cow K AW AY hide HH AY D
B be B IY CH cheese CH IY Z
D dee D IY DH thee DH IY
EH Ed EH D ER hurt HH ER T
EY ate EY T F fee F IY
G green G R IY N HH he HH IY
IH it IH T IY eat IY T
JH gee JH IY K key K IY
L lee L IY M me M IY
N knee N IY NG ping P IH NG
OW oat OW T OY toy T OY
P pee P IY R read R IY D
S sea S IY SH she SH IY
T tea T IY TH theta TH EY T AH
UH hood HH UH D UW two T UW
V vee V IY W we W IY
Y yield Y IY L D Z zee Z IY
ZH seizure S IY ZH ER
"""
import codecs
from nltk import compat
from nltk.util import Index
from .util import *
from .api import *
class CMUDictCorpusReader(CorpusReader):
def entries(self):
"""
:return: the cmudict lexicon as a list of entries
containing (word, transcriptions) tuples.
"""
return concat([StreamBackedCorpusView(fileid, read_cmudict_block,
encoding=enc)
for fileid, enc in self.abspaths(None, True)])
def raw(self):
"""
:return: the cmudict lexicon as a raw string.
"""
fileids = self._fileids
if isinstance(fileids, compat.string_types):
fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def words(self):
"""
:return: a list of all words defined in the cmudict lexicon.
"""
return [word.lower() for (word, _) in self.entries()]
def dict(self):
"""
:return: the cmudict lexicon as a dictionary, whose keys are
lowercase words and whose values are lists of pronunciations.
"""
return dict(Index(self.entries()))
def read_cmudict_block(stream):
entries = []
while len(entries) < 100: # Read 100 at a time.
line = stream.readline()
if line == '': return entries # end of file.
pieces = line.split()
entries.append( (pieces[0].lower(), pieces[2:]) )
return entries
|
{
"content_hash": "db831621ff05286b781d517726715582",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 78,
"avg_line_length": 37.44318181818182,
"alnum_prop": 0.5332321699544765,
"repo_name": "haya14busa/alc-etm-searcher",
"id": "ea8c770e7eb43ddd4401e7885ca73be86bc4f46a",
"size": "3533",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nltk-3.0a3/build/lib/nltk/corpus/reader/cmudict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11448"
},
{
"name": "Java",
"bytes": "30518"
},
{
"name": "Python",
"bytes": "6856183"
}
],
"symlink_target": ""
}
|
import json
import logging
from datetime import datetime
from enum import Enum, unique
from typing import Dict, List, Optional, Tuple
import click
import yaml
import ray._private.services as services
from ray._private.thirdparty.tabulate.tabulate import tabulate
from ray.experimental.state.api import (
StateApiClient,
get_log,
list_logs,
summarize_actors,
summarize_objects,
summarize_tasks,
)
from ray.experimental.state.common import (
DEFAULT_LIMIT,
DEFAULT_LOG_LIMIT,
DEFAULT_RPC_TIMEOUT,
GetApiOptions,
ListApiOptions,
PredicateType,
StateResource,
StateSchema,
SupportedFilterType,
resource_to_schema,
)
from ray.experimental.state.exception import RayStateApiException
from ray.util.annotations import PublicAPI
logger = logging.getLogger(__name__)
@unique
class AvailableFormat(Enum):
DEFAULT = "default"
JSON = "json"
YAML = "yaml"
TABLE = "table"
def _parse_filter(filter: str) -> Tuple[str, PredicateType, SupportedFilterType]:
"""Parse the filter string to a tuple of key, preciate, and value."""
# The function assumes there's going to be no key that includes "="" or "!=".
# Since key is controlled by us, it should be trivial to keep the invariant.
predicate = None
# Tuple of [predicate_start, predicate_end).
predicate_index = None
# Find the first predicate match. This logic works because we assume the
# key doesn't contain = or !=.
for i in range(len(filter)):
char = filter[i]
if char == "=":
predicate = "="
predicate_index = (i, i + 1)
break
elif char == "!":
if len(filter) <= i + 1:
continue
next_char = filter[i + 1]
if next_char == "=":
predicate = "!="
predicate_index = (i, i + 2)
break
if not predicate or not predicate_index:
raise ValueError(
f"The format of a given filter {filter} is invalid: "
"Cannot find the predicate. "
"Please provide key=val or key!=val format string."
)
key, predicate, value = (
filter[: predicate_index[0]],
filter[predicate_index[0] : predicate_index[1]],
filter[predicate_index[1] :],
)
assert predicate == "=" or predicate == "!="
if len(key) == 0 or len(value) == 0:
raise ValueError(
f"The format of a given filter {filter} is invalid: "
f"Cannot identify key {key} or value, {value}. "
"Please provide key=val or key!=val format string."
)
return (key, predicate, value)
def _get_available_formats() -> List[str]:
"""Return the available formats in a list of string"""
return [format_enum.value for format_enum in AvailableFormat]
def _get_available_resources(
excluded: Optional[List[StateResource]] = None,
) -> List[str]:
"""Return the available resources in a list of string
Args:
excluded: List of resources that should be excluded
"""
# All resource names use '_' rather than '-'. But users options have '-'
return [
e.value.replace("_", "-")
for e in StateResource
if excluded is None or e not in excluded
]
def get_table_output(state_data: List, schema: StateSchema) -> str:
"""Display the table output.
The table headers are ordered as the order defined in the dataclass of
`StateSchema`. For example,
@dataclass
class A(StateSchema):
a: str
b: str
c: str
will create headers
A B C
-----
Args:
state_data: A list of state data.
schema: The schema for the corresponding resource.
Returns:
The table formatted string.
"""
time = datetime.now()
header = "=" * 8 + f" List: {time} " + "=" * 8
headers = []
table = []
cols = schema.list_columns()
for data in state_data:
for key, val in data.items():
if isinstance(val, dict):
data[key] = yaml.dump(val, indent=2)
keys = set(data.keys())
headers = []
for col in cols:
if col in keys:
headers.append(col.upper())
table.append([data[header.lower()] for header in headers])
return f"""
{header}
Stats:
------------------------------
Total: {len(state_data)}
Table:
------------------------------
{tabulate(table, headers=headers, showindex=True, tablefmt="plain", floatfmt=".3f")}
"""
def output_with_format(
state_data: List,
*,
schema: Optional[StateSchema],
format: AvailableFormat = AvailableFormat.DEFAULT,
) -> str:
if format == AvailableFormat.DEFAULT:
return get_table_output(state_data, schema)
if format == AvailableFormat.YAML:
return yaml.dump(state_data, indent=4, explicit_start=True)
elif format == AvailableFormat.JSON:
return json.dumps(state_data)
elif format == AvailableFormat.TABLE:
return get_table_output(state_data, schema)
else:
raise ValueError(
f"Unexpected format: {format}. "
f"Supported formatting: {_get_available_formats()}"
)
def format_summary_output(state_data: Dict, *, resource: StateResource) -> str:
if len(state_data) == 0:
return "No resource in the cluster"
# Parse the data.
cluster_data = state_data["cluster"]
summaries = cluster_data["summary"]
summary_by = cluster_data["summary_by"]
del cluster_data["summary_by"]
del cluster_data["summary"]
cluster_info_table = yaml.dump(cluster_data, indent=2)
# Create a table.
table = []
headers = []
for summary in summaries.values():
# Convert dict to yaml for better formatting.
for key, val in summary.items():
if isinstance(val, dict):
summary[key] = yaml.dump(val, indent=2)
headers = sorted([key.upper() for key in summary.keys()])
table.append([summary[header.lower()] for header in headers])
summary_table = tabulate(
table, headers=headers, showindex=True, tablefmt="plain", numalign="left"
)
time = datetime.now()
header = "=" * 8 + f" {resource.value.capitalize()} Summary: {time} " + "=" * 8
return f"""
{header}
Stats:
------------------------------------
{cluster_info_table}
Table (group by {summary_by}):
------------------------------------
{summary_table}
"""
def format_object_summary_output(state_data: Dict) -> str:
if len(state_data) == 0:
return "No resource in the cluster"
# Parse the data.
cluster_data = state_data["cluster"]
summaries = cluster_data["summary"]
summary_by = cluster_data["summary_by"]
del cluster_data["summary_by"]
del cluster_data["summary"]
cluster_info_table = yaml.dump(cluster_data, indent=2)
# Create a table per callsite.
tables = []
for callsite, summary in summaries.items():
# Convert dict to yaml for better formatting.
for key, val in summary.items():
if isinstance(val, dict):
summary[key] = yaml.dump(val, indent=2)
table = []
headers = sorted([key.upper() for key in summary.keys()])
table.append([summary[header.lower()] for header in headers])
table_for_callsite = tabulate(
table, headers=headers, showindex=True, numalign="left"
)
# Format callsite. | is a separator for ray callsite.
formatted_callsite = callsite.replace("|", "\n|")
tables.append(f"{formatted_callsite}\n{table_for_callsite}")
time = datetime.now()
header = "=" * 8 + f" Object Summary: {time} " + "=" * 8
table_string = "\n\n\n\n".join(tables)
return f"""
{header}
Stats:
------------------------------------
{cluster_info_table}
Table (group by {summary_by})
------------------------------------
{table_string}
"""
def format_get_api_output(
state_data: Optional[Dict],
id: str,
*,
schema: StateSchema,
format: AvailableFormat = AvailableFormat.YAML,
) -> str:
if not state_data or len(state_data) == 0:
return f"Resource with id={id} not found in the cluster."
return output_with_format(state_data, schema=schema, format=format)
def format_list_api_output(
state_data: List[Dict],
*,
schema: StateSchema,
format: AvailableFormat = AvailableFormat.DEFAULT,
) -> str:
if len(state_data) == 0:
return "No resource in the cluster"
return output_with_format(state_data, schema=schema, format=format)
def _should_explain(format: AvailableFormat) -> bool:
# If the format is json or yaml, it should not print stats because
# users don't want additional strings.
return format == AvailableFormat.DEFAULT or format == AvailableFormat.TABLE
"""
Common Options for State API commands
"""
timeout_option = click.option(
"--timeout",
default=DEFAULT_RPC_TIMEOUT,
help=f"Timeout in seconds for the API requests. Default is {DEFAULT_RPC_TIMEOUT}",
)
address_option = click.option(
"--address",
default=None,
help=(
"The address of Ray API server. If not provided, it will be configured "
"automatically from querying the GCS server."
),
)
@click.command()
@click.argument(
"resource",
# NOTE(rickyyx): We are not allowing query job with id, and runtime envs
type=click.Choice(
_get_available_resources(
excluded=[StateResource.JOBS, StateResource.RUNTIME_ENVS]
)
),
)
@click.argument(
"id",
type=str,
)
@address_option
@timeout_option
@PublicAPI(stability="alpha")
def ray_get(
resource: str,
id: str,
address: Optional[str],
timeout: float,
):
"""Get a state of a given resource by ID.
We currently DO NOT support get by id for jobs and runtime-envs
The output schema is defined at :ref:`State API Schema section. <state-api-schema>`
For example, the output schema of `ray get tasks <task-id>` is
:ref:`ray.experimental.state.common.TaskState <state-api-schema-task>`.
Usage:
Get an actor with actor id <actor-id>
```
ray get actors <actor-id>
```
Get a placement group information with <placement-group-id>
```
ray get placement-groups <placement-group-id>
```
The API queries one or more components from the cluster to obtain the data.
The returned state snapshot could be stale, and it is not guaranteed to return
the live data.
Args:
resource: The type of the resource to query.
id: The id of the resource.
Raises:
:ref:`RayStateApiException <state-api-exceptions>`
if the CLI is failed to query the data.
"""
# All resource names use '_' rather than '-'. But users options have '-'
resource = StateResource(resource.replace("-", "_"))
# Create the State API server and put it into context
logger.debug(f"Create StateApiClient to ray instance at: {address}...")
client = StateApiClient(address=address)
options = GetApiOptions(timeout=timeout)
# If errors occur, exceptions will be thrown.
try:
data = client.get(
resource=resource,
id=id,
options=options,
_explain=_should_explain(AvailableFormat.YAML),
)
except RayStateApiException as e:
raise click.UsageError(str(e))
# Print data to console.
print(
format_get_api_output(
state_data=data,
id=id,
schema=resource_to_schema(resource),
format=AvailableFormat.YAML,
)
)
@click.command()
@click.argument(
"resource",
type=click.Choice(_get_available_resources()),
)
@click.option(
"--format", default="default", type=click.Choice(_get_available_formats())
)
@click.option(
"-f",
"--filter",
help=(
"A key, predicate, and value to filter the result. "
"E.g., --filter 'key=value' or --filter 'key!=value'. "
"You can specify multiple --filter options. In this case all predicates "
"are concatenated as AND. For example, --filter key=value --filter key2=value "
"means (key==val) AND (key2==val2)"
),
multiple=True,
)
@click.option(
"--limit",
default=DEFAULT_LIMIT,
type=int,
help=("Maximum number of entries to return. 100 by default."),
)
@click.option(
"--detail",
help=(
"If the flag is set, the output will contain data in more details. "
"Note that the API could query more sources "
"to obtain information in a greater detail."
),
is_flag=True,
default=False,
)
@timeout_option
@address_option
@PublicAPI(stability="alpha")
def ray_list(
resource: str,
format: str,
filter: List[str],
limit: int,
detail: bool,
timeout: float,
address: str,
):
"""List all states of a given resource.
Normally, summary APIs are recommended before listing all resources.
The output schema is defined at :ref:`State API Schema section. <state-api-schema>`
For example, the output schema of `ray list tasks` is
:ref:`ray.experimental.state.common.TaskState <state-api-schema-task>`.
Usage:
List all actor information from the cluster.
```
ray list actors
```
List 50 actors from the cluster. The sorting order cannot be controlled.
```
ray list actors --limit 50
```
List 10 actors with state PENDING.
```
ray list actors --limit 10 --filter "state=PENDING"
```
List actors with yaml format.
```
ray list actors --format yaml
```
List actors with details. When --detail is specified, it might query
more data sources to obtain data in details.
```
ray list actors --detail
```
The API queries one or more components from the cluster to obtain the data.
The returned state snapshot could be stale, and it is not guaranteed to return
the live data.
The API can return partial or missing output upon the following scenarios.
- When the API queries more than 1 component, if some of them fail,
the API will return the partial result (with a suppressible warning).
- When the API returns too many entries, the API
will truncate the output. Currently, truncated data cannot be
selected by users.
Args:
resource: The type of the resource to query.
Raises:
:ref:`RayStateApiException <state-api-exceptions>`
if the CLI is failed to query the data.
"""
# All resource names use '_' rather than '-'. But users options have '-'
resource = StateResource(resource.replace("-", "_"))
format = AvailableFormat(format)
# Create the State API server and put it into context
client = StateApiClient(address=address)
filter = [_parse_filter(f) for f in filter]
options = ListApiOptions(
limit=limit,
timeout=timeout,
filters=filter,
detail=detail,
)
# If errors occur, exceptions will be thrown. Empty data indicate successful query.
try:
data = client.list(
resource,
options=options,
raise_on_missing_output=False,
_explain=_should_explain(format),
)
except RayStateApiException as e:
raise click.UsageError(str(e))
# If --detail is given, the default formatting is yaml.
if detail and format == AvailableFormat.DEFAULT:
format = AvailableFormat.YAML
# Print data to console.
print(
format_list_api_output(
state_data=data,
schema=resource_to_schema(resource),
format=format,
)
)
@click.group("summary")
@click.pass_context
@PublicAPI(stability="alpha")
def summary_state_cli_group(ctx):
"""Return the summarized information of a given resource."""
pass
@summary_state_cli_group.command(name="tasks")
@timeout_option
@address_option
@click.pass_context
@PublicAPI(stability="alpha")
def task_summary(ctx, timeout: float, address: str):
"""Summarize the task state of the cluster.
By default, the output contains the information grouped by
task function names.
The output schema is
:ref:`ray.experimental.state.common.TaskSummaries <state-api-schema-task-summary>`.
Raises:
:ref:`RayStateApiException <state-api-exceptions>`
if the CLI is failed to query the data.
"""
print(
format_summary_output(
summarize_tasks(
address=address,
timeout=timeout,
raise_on_missing_output=False,
_explain=True,
),
resource=StateResource.TASKS,
)
)
@summary_state_cli_group.command(name="actors")
@timeout_option
@address_option
@click.pass_context
@PublicAPI(stability="alpha")
def actor_summary(ctx, timeout: float, address: str):
"""Summarize the actor state of the cluster.
By default, the output contains the information grouped by
actor class names.
The output schema is
:ref:`ray.experimental.state.common.ActorSummaries
<state-api-schema-actor-summary>`.
Raises:
:ref:`RayStateApiException <state-api-exceptions>`
if the CLI is failed to query the data.
"""
print(
format_summary_output(
summarize_actors(
address=address,
timeout=timeout,
raise_on_missing_output=False,
_explain=True,
),
resource=StateResource.ACTORS,
)
)
@summary_state_cli_group.command(name="objects")
@timeout_option
@address_option
@click.pass_context
@PublicAPI(stability="alpha")
def object_summary(ctx, timeout: float, address: str):
"""Summarize the object state of the cluster.
The API is recommended when debugging memory leaks.
See :ref:`Debugging with Ray Memory <debug-with-ray-memory>` for more details.
(Note that this command is almost equivalent to `ray memory`, but it returns
easier-to-understand output).
By default, the output contains the information grouped by
object callsite. Note that the callsite is not collected and
all data will be aggregated as "disable" callsite if the env var
`RAY_record_ref_creation_sites` is not configured. To enable the
callsite collection, set the following environment variable when
starting Ray.
Example:
```
RAY_record_ref_creation_sites=1 ray start --head
```
```
RAY_record_ref_creation_sites=1 ray_script.py
```
The output schema is
:ref:`ray.experimental.state.common.ObjectSummaries
<state-api-schema-object-summary>`.
Raises:
:ref:`RayStateApiException <state-api-exceptions>`
if the CLI is failed to query the data.
"""
print(
format_object_summary_output(
summarize_objects(
address=address,
timeout=timeout,
raise_on_missing_output=False,
_explain=True,
),
)
)
log_follow_option = click.option(
"--follow",
"-f",
required=False,
type=bool,
is_flag=True,
help="Streams the log file as it is updated instead of just tailing.",
)
log_tail_option = click.option(
"--tail",
required=False,
type=int,
default=DEFAULT_LOG_LIMIT,
help="Number of lines to tail from log. -1 indicates fetching the whole file.",
)
log_interval_option = click.option(
"--interval",
required=False,
type=float,
default=None,
help="The interval in secs to print new logs when `--follow` is specified.",
hidden=True,
)
log_timeout_option = click.option(
"--timeout",
default=DEFAULT_RPC_TIMEOUT,
help=(
"Timeout in seconds for the API requests. "
f"Default is {DEFAULT_RPC_TIMEOUT}. If --follow is specified, "
"this option will be ignored."
),
)
log_node_ip_option = click.option(
"-ip",
"--node-ip",
required=False,
type=str,
default=None,
help="Filters the logs by this ip address",
)
log_node_id_option = click.option(
"--node-id",
"-id",
required=False,
type=str,
default=None,
help="Filters the logs by this NodeID",
)
log_suffix_option = click.option(
"--suffix",
required=False,
default="out",
type=click.Choice(["out", "err"], case_sensitive=False),
help=(
"The suffix of the log file that denotes the log type, where out refers "
"to logs from stdout, and err for logs from stderr "
),
)
def _get_head_node_ip(address: Optional[str] = None):
"""Get the head node ip from the ray address if possible
Args:
address: ray cluster address, e.g. "auto", "localhost:6379"
Raises:
click.UsageError if node ip could not be resolved
"""
try:
address = services.canonicalize_bootstrap_address_or_die(address)
return address.split(":")[0]
except (ConnectionError, ValueError) as e:
# Hide all the stack trace
raise click.UsageError(str(e))
def _print_log(
address: Optional[str] = None,
node_id: Optional[str] = None,
node_ip: Optional[str] = None,
filename: Optional[str] = None,
actor_id: Optional[str] = None,
pid: Optional[int] = None,
follow: bool = False,
tail: int = DEFAULT_LOG_LIMIT,
timeout: int = DEFAULT_RPC_TIMEOUT,
interval: Optional[float] = None,
suffix: Optional[str] = None,
):
"""Wrapper around `get_log()` that prints the preamble and the log lines"""
if tail > 0:
print(
f"--- Log has been truncated to last {tail} lines."
" Use `--tail` flag to toggle. Set to -1 for getting the entire file. ---\n"
)
if node_id is None and node_ip is None:
# Auto detect node ip from the ray address when address neither is given
node_ip = _get_head_node_ip(address)
for chunk in get_log(
address=address,
node_id=node_id,
node_ip=node_ip,
filename=filename,
actor_id=actor_id,
tail=tail,
pid=pid,
follow=follow,
_interval=interval,
timeout=timeout,
suffix=suffix,
):
print(chunk, end="", flush=True)
LOG_CLI_HELP_MSG = """
Get logs based on filename (cluster) or resource identifiers (actor)
Example:
Get all the log files available on a node (ray address could be
obtained from `ray start --head` or `ray.init()`).
```
ray logs --address="localhost:6379"
```
[ray logs cluster] Print the last 500 lines of raylet.out on a head node.
```
ray logs cluster raylet.out --tail 500
```
Or simply, using `ray logs` as an alias for `ray logs cluster`:
```
ray logs raylet.out --tail 500
```
Print the last 500 lines of raylet.out on a worker node id A.
```
ray logs raylet.out --tail 500 —-node-id A
```
[ray logs actor] Follow the log file with an actor id ABC.
```
ray logs actor --id ABC --follow
```
"""
class LogCommandGroup(click.Group):
def resolve_command(self, ctx, args):
"""Try resolve the command line args assuming users omitted the subcommand.
This overrides the default `resolve_command` for the parent class.
This will allow command alias of `ray <glob>` to `ray cluster <glob>`.
"""
ctx.resilient_parsing = True
res = super().resolve_command(ctx, args)
cmd_name, cmd, parsed_args = res
if cmd is None:
# It could have been `ray logs ...`, forward to `ray logs cluster ...`
return super().resolve_command(ctx, ["cluster"] + args)
return cmd_name, cmd, parsed_args
logs_state_cli_group = LogCommandGroup(help=LOG_CLI_HELP_MSG)
@logs_state_cli_group.command(name="cluster")
@click.argument(
"glob_filter",
required=False,
default="*",
)
@address_option
@log_node_id_option
@log_node_ip_option
@log_follow_option
@log_tail_option
@log_interval_option
@log_timeout_option
@click.pass_context
@PublicAPI(stability="alpha")
def log_cluster(
ctx,
glob_filter: str,
address: Optional[str],
node_id: Optional[str],
node_ip: Optional[str],
follow: bool,
tail: int,
interval: float,
timeout: int,
):
"""Get/List logs that matches the GLOB_FILTER in the cluster.
By default, it prints a list of log files that match the filter.
By default, it prints the head node logs.
If there's only 1 match, it will print the log file.
Example:
Print the last 500 lines of raylet.out on a head node.
```
ray logs [cluster] raylet.out --tail 500
```
Print the last 500 lines of raylet.out on a worker node id A.
```
ray logs [cluster] raylet.out --tail 500 —-node-id A
```
Download the gcs_server.txt file to the local machine.
```
ray logs [cluster] gcs_server.out --tail -1 > gcs_server.txt
```
Follow the log files from the last 100 lines.
```
ray logs [cluster] raylet.out --tail 100 -f
```
Raises:
:ref:`RayStateApiException <state-api-exceptions>` if the CLI
is failed to query the data.
"""
if node_id is None and node_ip is None:
node_ip = _get_head_node_ip(address)
logs = list_logs(
address=address,
node_id=node_id,
node_ip=node_ip,
glob_filter=glob_filter,
timeout=timeout,
)
log_files_found = []
for _, log_files in logs.items():
for log_file in log_files:
log_files_found.append(log_file)
if len(log_files_found) != 1:
# Print the list of log files found if no unique log found
if node_id:
print(f"Node ID: {node_id}")
elif node_ip:
print(f"Node IP: {node_ip}")
print(output_with_format(logs, schema=None, format=AvailableFormat.YAML))
return
# If there's only 1 file, that means there's a unique match.
filename = log_files_found[0]
_print_log(
address=address,
node_id=node_id,
node_ip=node_ip,
filename=filename,
tail=tail,
follow=follow,
interval=interval,
timeout=timeout,
)
@logs_state_cli_group.command(name="actor")
@click.option(
"--id",
"-a",
required=False,
type=str,
default=None,
help="Retrieves the logs corresponding to this ActorID.",
)
@click.option(
"--pid",
"-pid",
required=False,
type=str,
default=None,
help="Retrieves the logs from the actor with this pid.",
)
@address_option
@log_node_id_option
@log_node_ip_option
@log_follow_option
@log_tail_option
@log_interval_option
@log_timeout_option
@log_suffix_option
@click.pass_context
@PublicAPI(stability="alpha")
def log_actor(
ctx,
id: Optional[str],
pid: Optional[str],
address: Optional[str],
node_id: Optional[str],
node_ip: Optional[str],
follow: bool,
tail: int,
interval: float,
timeout: int,
suffix: str,
):
"""Get/List logs associated with an actor.
Example:
Follow the log file with an actor id ABC.
```
ray logs actor --id ABC --follow
```
Get the actor log from pid 123, ip ABC.
Note that this goes well with the driver log of Ray which prints
(ip=ABC, pid=123, class_name) logs.
```
ray logs actor --pid=123 —ip=ABC
```
Get the actor err log file.
```
ray logs actor --id ABC --suffix err
```
Raises:
:ref:`RayStateApiException <state-api-exceptions>`
if the CLI is failed to query the data.
MissingParameter if inputs are missing.
"""
if pid is None and id is None:
raise click.MissingParameter(
message="At least one of `--pid` and `--id` has to be set",
param_type="option",
)
_print_log(
address=address,
node_id=node_id,
node_ip=node_ip,
pid=pid,
actor_id=id,
tail=tail,
follow=follow,
interval=interval,
timeout=timeout,
suffix=suffix,
)
@logs_state_cli_group.command(name="worker")
@click.option(
"--pid",
"-pid",
# The only identifier supported for now, TODO(rickyx): add worker id support
required=True,
type=str,
help="Retrieves the logs from the worker with this pid.",
)
@address_option
@log_node_id_option
@log_node_ip_option
@log_follow_option
@log_tail_option
@log_interval_option
@log_timeout_option
@log_suffix_option
@click.pass_context
@PublicAPI(stability="alpha")
def log_worker(
ctx,
pid: Optional[str],
address: Optional[str],
node_id: Optional[str],
node_ip: Optional[str],
follow: bool,
tail: int,
interval: float,
timeout: int,
suffix: str,
):
"""Get/List logs associated with a worker process.
Example:
Follow the log file from a worker process with pid=ABC.
```
ray logs worker --pid ABC --follow
```
Get the stderr logs from a worker process.
```
ray logs worker --pid ABC --suffix err
```
Raises:
:ref:`RayStateApiException <state-api-exceptions>`
if the CLI is failed to query the data.
MissingParameter if inputs are missing.
"""
_print_log(
address=address,
node_id=node_id,
node_ip=node_ip,
pid=pid,
tail=tail,
follow=follow,
interval=interval,
timeout=timeout,
suffix=suffix,
)
|
{
"content_hash": "d081d35f4522bcad54e7b9ba3b53124b",
"timestamp": "",
"source": "github",
"line_count": 1120,
"max_line_length": 88,
"avg_line_length": 26.738392857142856,
"alnum_prop": 0.6101779810999433,
"repo_name": "ray-project/ray",
"id": "3040fe8df7b51393bcaf9fa394665e49003371bf",
"size": "29953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/experimental/state/state_cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '__first__'),
]
operations = [
migrations.CreateModel(
name='GoogleMap',
fields=[
('cmsplugin_ptr', models.OneToOneField(serialize=False, parent_link=True, auto_created=True, to='cms.CMSPlugin', primary_key=True)),
('title', models.CharField(verbose_name='map title', blank=True, null=True, max_length=100)),
('address', models.CharField(verbose_name='address', max_length=150)),
('zipcode', models.CharField(verbose_name='zip code', max_length=30)),
('city', models.CharField(verbose_name='city', max_length=100)),
('content', models.CharField(help_text='Displayed under address in the bubble.', blank=True, max_length=255, verbose_name='additional content')),
('zoom', models.PositiveSmallIntegerField(verbose_name='zoom level', default=13, choices=[(0, '0'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10'), (11, '11'), (12, '12'), (13, '13'), (14, '14'), (15, '15'), (16, '16'), (17, '17'), (18, '18'), (19, '19'), (20, '20'), (21, '21')])),
('lat', models.DecimalField(help_text='Use latitude & longitude to fine tune the map position.', blank=True, max_digits=10, verbose_name='latitude', null=True, decimal_places=6)),
('lng', models.DecimalField(max_digits=10, verbose_name='longitude', blank=True, null=True, decimal_places=6)),
('route_planer_title', models.CharField(verbose_name='route planer title', blank=True, null=True, max_length=150, default='Calculate your fastest way to here')),
('route_planer', models.BooleanField(verbose_name='route planer', default=False)),
('width', models.CharField(help_text='Plugin width (in pixels or percent).', default='100%', max_length=6, verbose_name='width')),
('height', models.CharField(help_text='Plugin height (in pixels).', default='400px', max_length=6, verbose_name='height')),
('info_window', models.BooleanField(help_text='Show textbox over marker', default=True, verbose_name='info window')),
('scrollwheel', models.BooleanField(help_text='Enable scrollwheel zooming on the map', default=True, verbose_name='scrollwheel')),
('double_click_zoom', models.BooleanField(verbose_name='double click zoom', default=True)),
('draggable', models.BooleanField(verbose_name='draggable', default=True)),
('keyboard_shortcuts', models.BooleanField(verbose_name='keyboard shortcuts', default=True)),
('pan_control', models.BooleanField(verbose_name='Pan control', default=True)),
('zoom_control', models.BooleanField(verbose_name='zoom control', default=True)),
('street_view_control', models.BooleanField(verbose_name='Street View control', default=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
|
{
"content_hash": "40e43f6a5b7e4d12f15278c0502bd6df",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 352,
"avg_line_length": 75.34883720930233,
"alnum_prop": 0.6006172839506173,
"repo_name": "macs03/demo-cms",
"id": "790020a6fea96543b32a88bfb06cd00b72445702",
"size": "3264",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cms/lib/python2.7/site-packages/djangocms_googlemap/migrations_django/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "196163"
},
{
"name": "Gettext Catalog",
"bytes": "12979149"
},
{
"name": "Groff",
"bytes": "80"
},
{
"name": "HTML",
"bytes": "349717"
},
{
"name": "JavaScript",
"bytes": "533130"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "11972819"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "Shell",
"bytes": "3771"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('categories', '0001_initial'),
('products_catalogue', '0003_product_catalogue_restaurant'),
]
operations = [
migrations.RemoveField(
model_name='product_catalogue',
name='category',
),
migrations.AddField(
model_name='product_catalogue',
name='category',
field=models.ManyToManyField(blank=True, related_name='products_category', to='categories.Category'),
),
]
|
{
"content_hash": "2bb8af5270afe4aa70ba1060fcbf54f2",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 113,
"avg_line_length": 27.347826086956523,
"alnum_prop": 0.6089030206677265,
"repo_name": "jojoriveraa/titulacion-NFCOW",
"id": "94e547beecc92d666ab8b52fde1d4c18dd70fe0d",
"size": "699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NFCow/products_catalogue/migrations/0004_auto_20160101_0212.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31227"
},
{
"name": "HTML",
"bytes": "43114"
},
{
"name": "JavaScript",
"bytes": "4908"
},
{
"name": "Python",
"bytes": "112019"
},
{
"name": "Shell",
"bytes": "3829"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class CommonConfig(AppConfig):
name = 'saywiti.common'
|
{
"content_hash": "35060e8597049c299f9d893a76be2660",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 19,
"alnum_prop": 0.7578947368421053,
"repo_name": "erickgnavar/saywiti",
"id": "e2bf787a2d0dcb68c9842425e505b995daa5173c",
"size": "95",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saywiti/common/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7576"
},
{
"name": "JavaScript",
"bytes": "4115"
},
{
"name": "Python",
"bytes": "37534"
}
],
"symlink_target": ""
}
|
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'kobra'
copyright = u'2017, Almog Yalinewich'
author = u'Almog Yalinewich'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'kobradoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'kobra.tex', u'kobra Documentation',
u'Almog Yalinewich', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'kobra', u'kobra Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'kobra', u'kobra Documentation',
author, 'kobra', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "b1e217f83721002e228f45b19eaf895b",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 79,
"avg_line_length": 32.13090909090909,
"alnum_prop": 0.7041647804436396,
"repo_name": "bolverk/kobra",
"id": "da353bf3a0626a0a3761886414ad7dcd8b0bc331",
"size": "9254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60771"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_system_vdom_netflow
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_system_vdom_netflow.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_system_vdom_netflow_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_vdom_netflow': {
'collector_ip': 'test_value_3',
'collector_port': '4',
'source_ip': '84.230.14.5',
'vdom_netflow': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_vdom_netflow.fortios_system(input_data, fos_instance)
expected_data = {
'collector-ip': 'test_value_3',
'collector-port': '4',
'source-ip': '84.230.14.5',
'vdom-netflow': 'enable'
}
set_method_mock.assert_called_with('system', 'vdom-netflow', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_vdom_netflow_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_vdom_netflow': {
'collector_ip': 'test_value_3',
'collector_port': '4',
'source_ip': '84.230.14.5',
'vdom_netflow': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_vdom_netflow.fortios_system(input_data, fos_instance)
expected_data = {
'collector-ip': 'test_value_3',
'collector-port': '4',
'source-ip': '84.230.14.5',
'vdom-netflow': 'enable'
}
set_method_mock.assert_called_with('system', 'vdom-netflow', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_vdom_netflow_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_vdom_netflow': {
'collector_ip': 'test_value_3',
'collector_port': '4',
'source_ip': '84.230.14.5',
'vdom_netflow': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_vdom_netflow.fortios_system(input_data, fos_instance)
expected_data = {
'collector-ip': 'test_value_3',
'collector-port': '4',
'source-ip': '84.230.14.5',
'vdom-netflow': 'enable'
}
set_method_mock.assert_called_with('system', 'vdom-netflow', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_system_vdom_netflow_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_vdom_netflow': {
'random_attribute_not_valid': 'tag',
'collector_ip': 'test_value_3',
'collector_port': '4',
'source_ip': '84.230.14.5',
'vdom_netflow': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_vdom_netflow.fortios_system(input_data, fos_instance)
expected_data = {
'collector-ip': 'test_value_3',
'collector-port': '4',
'source-ip': '84.230.14.5',
'vdom-netflow': 'enable'
}
set_method_mock.assert_called_with('system', 'vdom-netflow', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
{
"content_hash": "923b9bd7011091e1bc8c83bff833429a",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 133,
"avg_line_length": 35.459119496855344,
"alnum_prop": 0.6369279886484569,
"repo_name": "thaim/ansible",
"id": "5d776847edb24386c08e6c4a7459b5dd56b2cc0a",
"size": "6334",
"binary": false,
"copies": "20",
"ref": "refs/heads/fix-broken-link",
"path": "test/units/modules/network/fortios/test_fortios_system_vdom_netflow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Difference'] , ['MovingMedian'] , ['Seasonal_Minute'] , ['AR'] );
|
{
"content_hash": "9651747aeec332c11a025fe26d0a5610",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 88,
"avg_line_length": 40.25,
"alnum_prop": 0.7142857142857143,
"repo_name": "antoinecarme/pyaf",
"id": "0d6bc06f90d18e603550680a67fd74d8a998eb7d",
"size": "161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_MovingMedian_Seasonal_Minute_AR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
import unittest
import IECore
import IECoreGL
import Gaffer
import GafferUI
import GafferUITest
class StandardStyleTest( GafferUITest.TestCase ) :
def testColorAccessors( self ) :
s = GafferUI.StandardStyle()
i = 0
for n in GafferUI.StandardStyle.Color.names :
if n=="LastColor" :
continue
c = IECore.Color3f( i )
v = getattr( GafferUI.StandardStyle.Color, n )
s.setColor( v, c )
self.assertEqual( s.getColor( v ), c )
i += 1
def testFontAccessors( self ) :
s = GafferUI.StandardStyle()
f = IECoreGL.FontLoader.defaultFontLoader().load( "VeraMono.ttf" )
for n in GafferUI.Style.TextType.names :
if n=="LastText" :
continue
v = getattr( GafferUI.Style.TextType, n )
s.setFont( v, f )
self.failUnless( s.getFont( v ).isSame( f ) )
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "31ab39b40e4e7eead83efd8473f74da6",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 68,
"avg_line_length": 19.288888888888888,
"alnum_prop": 0.6463133640552995,
"repo_name": "DoubleNegativeVisualEffects/gaffer",
"id": "e7e3f6bb156271ba5081e8476e88b1b2194d0c2b",
"size": "2685",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python/GafferUITest/StandardStyleTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import ProjectOption
from sentry.testutils import APITestCase
class ReleaseTokenGetTest(APITestCase):
def test_simple(self):
project = self.create_project(
name='foo',
)
token = 'abcdefghijklmnop'
ProjectOption.objects.set_value(project, 'sentry:release-token', token)
url = reverse(
'sentry-api-0-project-releases-token',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
self.login_as(user=self.user)
response = self.client.get(url)
assert response.status_code == 200, response.content
assert response.data['token'] == 'abcdefghijklmnop'
def test_generates_token(self):
project = self.create_project(
name='foo',
)
url = reverse(
'sentry-api-0-project-releases-token',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
self.login_as(user=self.user)
response = self.client.get(url)
assert response.status_code == 200, response.content
assert response.data['token'] is not None
assert ProjectOption.objects.get_value(project, 'sentry:release-token') is not None
def test_regenerates_token(self):
project = self.create_project(
name='foo',
)
token = 'abcdefghijklmnop'
ProjectOption.objects.set_value(project, 'sentry:release-token', token)
url = reverse(
'sentry-api-0-project-releases-token',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
self.login_as(user=self.user)
response = self.client.post(url, {'project': project.slug})
assert response.status_code == 200, response.content
assert response.data['token'] is not None
assert response.data['token'] != 'abcdefghijklmnop'
|
{
"content_hash": "a20e7799f308cf308d833f8ecc9ee8f7",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 91,
"avg_line_length": 29.19736842105263,
"alnum_prop": 0.5912573231185219,
"repo_name": "jean/sentry",
"id": "8387857cf537d75437b7a4feffd27b6deabc27ad",
"size": "2244",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/sentry/api/endpoints/test_project_release_token.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "296112"
},
{
"name": "HTML",
"bytes": "314273"
},
{
"name": "JavaScript",
"bytes": "1293918"
},
{
"name": "Lua",
"bytes": "57158"
},
{
"name": "Makefile",
"bytes": "6632"
},
{
"name": "Python",
"bytes": "24515298"
},
{
"name": "Ruby",
"bytes": "4410"
},
{
"name": "Shell",
"bytes": "2942"
}
],
"symlink_target": ""
}
|
from win32com.shell import shell, shellcon
import win32api
import os
def testSHFileOperation(file_cnt):
temp_dir=os.environ['temp']
orig_fnames=[win32api.GetTempFileName(temp_dir,'sfo')[0] for x in range(file_cnt)]
new_fnames=[os.path.join(temp_dir,'copy of '+os.path.split(orig_fnames[x])[1]) for x in range(file_cnt)]
pFrom='\0'.join(orig_fnames)
pTo='\0'.join(new_fnames)
shell.SHFileOperation((0, shellcon.FO_MOVE, pFrom, pTo, shellcon.FOF_MULTIDESTFILES|shellcon.FOF_NOCONFIRMATION))
for fname in orig_fnames:
assert not os.path.isfile(fname)
for fname in new_fnames:
assert os.path.isfile(fname)
shell.SHFileOperation((0, shellcon.FO_DELETE, fname, None, shellcon.FOF_NOCONFIRMATION|shellcon.FOF_NOERRORUI))
def testSHNAMEMAPPINGS(file_cnt):
## attemps to move a set of files to names that already exist, and generated filenames should be returned
## as a sequence of 2-tuples created from SHNAMEMAPPINGS handle
temp_dir=os.environ['temp']
orig_fnames=[win32api.GetTempFileName(temp_dir,'sfo')[0] for x in range(file_cnt)]
new_fnames=[win32api.GetTempFileName(temp_dir,'sfo')[0] for x in range(file_cnt)]
pFrom='\0'.join(orig_fnames)
pTo='\0'.join(new_fnames)
rc, banyaborted, NameMappings=shell.SHFileOperation((0, shellcon.FO_MOVE, pFrom, pTo,
shellcon.FOF_MULTIDESTFILES|shellcon.FOF_NOCONFIRMATION|shellcon.FOF_RENAMEONCOLLISION|shellcon.FOF_WANTMAPPINGHANDLE))
for old_fname, new_fname in NameMappings:
print('Old:',old_fname, 'New:', new_fname)
assert len(NameMappings)==file_cnt
testSHFileOperation(10)
testSHFileOperation(1)
testSHNAMEMAPPINGS(5)
|
{
"content_hash": "7d00d07a9d5f0917c01f513af744643c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 127,
"avg_line_length": 45.648648648648646,
"alnum_prop": 0.7199526346950859,
"repo_name": "ArcherSys/ArcherSys",
"id": "e4b53034dd4946287d9e8e458c50ec0fab6db194",
"size": "1689",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "Lib/site-packages/win32comext/shell/test/testSHFileOperation.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import os
import tempfile
import six
import unittest
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, BASE_DIR)
from auth import create_app
from auth.models import db, users, Role
from flask_security.utils import encrypt_password
from lxml import etree
with open(BASE_DIR + '/tests/tests.db', 'rb') as local_db:
content = local_db.read()
class TestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
def setUp(self):
fd, self.filename = tempfile.mkstemp()
os.write(fd, content)
os.close(fd)
config = {
'SQLALCHEMY_DATABASE_URI': 'sqlite:///' + self.filename,
}
self.app = create_app(config)
self.client = self.app.test_client()
def tearDown(self):
os.unlink(self.filename)
def get_html(self, response):
data = response.get_data()
parser = etree.HTMLParser()
return etree.parse(six.StringIO(str(data)), parser)
def assertOk(self, url):
response = self.client.get(url)
self.assertEqual(200, response.status_code)
return self.get_html(response)
def assertRegex(self, *args, **kwargs):
if six.PY3:
return super(TestCase, self).assertRegex(*args, **kwargs)
else:
return self.assertRegexpMatches(*args, **kwargs)
def assertTitle(self, html, title):
self.assertRegex(html.find('//title').text, r'^%s -' % title)
def urlencode(self, value):
if six.PY3:
import urllib.parse
return urllib.parse.urlencode(value)
else:
import urllib
return urllib.urlencode(value)
def assertRedirect(self, url, location='http://localhost/'):
response = self.client.get(url)
self.assertEqual(302, response.status_code)
self.assertEqual(location, response.headers['Location'])
def assertRedirectPost(self, url, data={}, location='http://localhost/'):
response = self.client.post(url, data=data)
self.assertEqual(302, response.status_code)
self.assertEqual(location, response.headers['Location'])
def assertForbidden(self, url):
response = self.client.get(url, follow_redirects=True)
html = self.get_html(response)
li = html.find('//ul[@class="flashes"]/li[@class="error"]')
self.assertEqual('You do not have permission to view this resource.', li.text)
def assertForbiddenPost(self, url, data={}):
response = self.client.post(url, data=data, follow_redirects=True)
html = self.get_html(response)
li = html.find('//ul[@class="flashes"]/li[@class="error"]')
self.assertEqual('You do not have permission to view this resource.', li.text)
def assertLogin(self, url):
location = 'http://localhost/login'
if url != '/':
location += '?' + self.urlencode({'next': url})
self.assertRedirect(url, location)
def assertLoginPost(self, url, data={}):
location = 'http://localhost/login'
if url != '/':
location += '?' + self.urlencode({'next': url})
self.assertRedirectPost(url, data, location)
def create_user(self, email, password, role, network_id=None, gateway_id=None):
with self.app.test_request_context():
user = users.create_user(email=email, password=encrypt_password(password))
user.network_id = network_id
user.gateway_id = gateway_id
role = Role.query.filter_by(name=role).first_or_404()
user.roles.append(role)
db.session.commit()
def login(self, email, password):
return self.client.post('/login', data=dict(email=email, password=password))
def logout(self):
return self.client.get('/logout')
|
{
"content_hash": "12e914e97301f76e6fa6d8e32efb2941",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 86,
"avg_line_length": 34.39823008849557,
"alnum_prop": 0.6207872395163365,
"repo_name": "datashaman/wifidog-auth-flask",
"id": "0e0a0698cafeaba8aae192f92a8fb92b7912c159",
"size": "3887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7069"
},
{
"name": "HTML",
"bytes": "49450"
},
{
"name": "JavaScript",
"bytes": "4567"
},
{
"name": "Makefile",
"bytes": "2000"
},
{
"name": "Python",
"bytes": "99304"
},
{
"name": "Shell",
"bytes": "108"
}
],
"symlink_target": ""
}
|
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic.simple import direct_to_template
from django.core.urlresolvers import reverse
from django.db import connection, transaction
from django.shortcuts import get_object_or_404
from helpers.shortcuts import set_message
from forms import FortuneForm
from models import Fortune
import strings
def index(request, template='fortunes/index.html'):
"""Main view of fortunes app"""
form = FortuneForm()
fortune = None
if request.method == 'POST':
form = FortuneForm(data=request.POST)
if form.is_valid():
remote_addr = request.META.get('REMOTE_ADDR', None)
fortune = form.save(remote_addr=remote_addr)
set_message(strings.FORTUNE_CREATED_MSG, request)
return HttpResponseRedirect(reverse('homepage'))
random_fortune = Fortune.objects.random(exclude=fortune)
return direct_to_template(
request,
template,
{'form': form,
'random_fortune': random_fortune
}
)
def fortune_detail(request, url_id, format='text', template='fortunes/fortune_detail.html'):
"""Renders fortune in detail"""
fortune = get_object_or_404(Fortune, url_id=url_id, accepted=True, moderated=True)
if format == 'text':
return HttpResponse(fortune.as_text(), content_type='text/plain; charset="utf-8"')
else:
return direct_to_template(
request,
template,
{'fortune': fortune}
)
def fortunes_as_text(request):
"""Renders fortunes as text file"""
fortunes = Fortune.objects.accepted()
return HttpResponse('\n'.join([f.as_text() for f in fortunes]),
content_type='text/plain; charset="utf-8"')
def fortunes_as_html(request, template='fortunes/fortunes.html'):
"""Renders fortunes as HTML file"""
fortunes = Fortune.objects.accepted()
return direct_to_template(
request,
template,
{'fortunes': fortunes}
)
|
{
"content_hash": "010fec41faceeadeff9f690feec21bb8",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 92,
"avg_line_length": 35.14754098360656,
"alnum_prop": 0.6240671641791045,
"repo_name": "gnrfan/chichafortunes",
"id": "042e6a71cf63e39da719076548954fba9bb7b856",
"size": "2195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fortunes/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "42427"
},
{
"name": "Python",
"bytes": "21465"
}
],
"symlink_target": ""
}
|
from pyflink.testing.test_case_utils import PyFlinkTestCase
class ShellExampleTests(PyFlinkTestCase):
"""
If these tests failed, please fix these examples code and copy them to shell.py
"""
def test_batch_case(self):
from pyflink.shell import b_env, bt_env, FileSystem, OldCsv, DataTypes, Schema
# example begin
import tempfile
import os
import shutil
sink_path = tempfile.gettempdir() + '/batch.csv'
if os.path.exists(sink_path):
if os.path.isfile(sink_path):
os.remove(sink_path)
else:
shutil.rmtree(sink_path)
b_env.set_parallelism(1)
t = bt_env.from_elements([(1, 'hi', 'hello'), (2, 'hi', 'hello')], ['a', 'b', 'c'])
bt_env.connect(FileSystem().path(sink_path))\
.with_format(OldCsv()
.field_delimiter(',')
.field("a", DataTypes.BIGINT())
.field("b", DataTypes.STRING())
.field("c", DataTypes.STRING()))\
.with_schema(Schema()
.field("a", DataTypes.BIGINT())
.field("b", DataTypes.STRING())
.field("c", DataTypes.STRING()))\
.create_temporary_table("batch_sink")
t.select("a + 1, b, c").insert_into("batch_sink")
bt_env.execute("batch_job")
# verify code, do not copy these code to shell.py
with open(sink_path, 'r') as f:
lines = f.read()
self.assertEqual(lines, '2,hi,hello\n' + '3,hi,hello\n')
def test_stream_case(self):
from pyflink.shell import s_env, st_env, FileSystem, OldCsv, DataTypes, Schema
# example begin
import tempfile
import os
import shutil
sink_path = tempfile.gettempdir() + '/streaming.csv'
if os.path.exists(sink_path):
if os.path.isfile(sink_path):
os.remove(sink_path)
else:
shutil.rmtree(sink_path)
s_env.set_parallelism(1)
t = st_env.from_elements([(1, 'hi', 'hello'), (2, 'hi', 'hello')], ['a', 'b', 'c'])
st_env.connect(FileSystem().path(sink_path))\
.with_format(OldCsv()
.field_delimiter(',')
.field("a", DataTypes.BIGINT())
.field("b", DataTypes.STRING())
.field("c", DataTypes.STRING()))\
.with_schema(Schema()
.field("a", DataTypes.BIGINT())
.field("b", DataTypes.STRING())
.field("c", DataTypes.STRING()))\
.create_temporary_table("stream_sink")
t.select("a + 1, b, c").insert_into("stream_sink")
st_env.execute("stream_job")
# verify code, do not copy these code to shell.py
with open(sink_path, 'r') as f:
lines = f.read()
self.assertEqual(lines, '2,hi,hello\n' + '3,hi,hello\n')
|
{
"content_hash": "e5b4c675a6577ec77a43d49e78f90bef",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 91,
"avg_line_length": 38.734177215189874,
"alnum_prop": 0.5049019607843137,
"repo_name": "kaibozhou/flink",
"id": "eaee9d83d50f4b9f046d6acaa240d1c5f407286a",
"size": "4018",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "flink-python/pyflink/table/tests/test_shell_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4722"
},
{
"name": "CSS",
"bytes": "58149"
},
{
"name": "Clojure",
"bytes": "93247"
},
{
"name": "Dockerfile",
"bytes": "12142"
},
{
"name": "FreeMarker",
"bytes": "28662"
},
{
"name": "HTML",
"bytes": "108850"
},
{
"name": "Java",
"bytes": "53661126"
},
{
"name": "JavaScript",
"bytes": "1829"
},
{
"name": "Makefile",
"bytes": "5134"
},
{
"name": "Python",
"bytes": "1129763"
},
{
"name": "Scala",
"bytes": "13885013"
},
{
"name": "Shell",
"bytes": "533455"
},
{
"name": "TSQL",
"bytes": "123113"
},
{
"name": "TypeScript",
"bytes": "249103"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
from typing import Dict, Type
from .base import DatasetServiceTransport
from .grpc import DatasetServiceGrpcTransport
from .grpc_asyncio import DatasetServiceGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[DatasetServiceTransport]]
_transport_registry["grpc"] = DatasetServiceGrpcTransport
_transport_registry["grpc_asyncio"] = DatasetServiceGrpcAsyncIOTransport
__all__ = (
"DatasetServiceTransport",
"DatasetServiceGrpcTransport",
"DatasetServiceGrpcAsyncIOTransport",
)
|
{
"content_hash": "822e544b9c01289e18cc8e3ec15a8f4a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 85,
"avg_line_length": 33,
"alnum_prop": 0.8080808080808081,
"repo_name": "googleapis/python-aiplatform",
"id": "07bc11c0c89728531bb6f151d4b7274cecdabe3b",
"size": "1194",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
}
|
from parse_folder import ParseFolderTask
from create_db import CreateDbTask
from analyze_db import AnalyzeDbTask
|
{
"content_hash": "2ff6aec56eed9280379c4efee2bb1ad5",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 40,
"avg_line_length": 37.666666666666664,
"alnum_prop": 0.8672566371681416,
"repo_name": "crohkohl/DIGITS",
"id": "636a7dd465c10fd77c83b426010469306fd7820c",
"size": "183",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "digits/dataset/tasks/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "710"
},
{
"name": "HTML",
"bytes": "158019"
},
{
"name": "JavaScript",
"bytes": "103357"
},
{
"name": "Python",
"bytes": "430506"
},
{
"name": "Shell",
"bytes": "1377"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import struct
import sys
from uproot3._util import _tobytes
class Cursor(object):
def __init__(self, index):
self.index = index
def skip(self, numbytes):
self.index += numbytes
def update_fields(self, sink, format, *args):
sink.write(format.pack(*args), self.index)
def write_fields(self, sink, format, *args):
self.update_fields(sink, format, *args)
self.index += format.size
def put_fields(self, format, *args):
self.index += format.size
return format.pack(*args)
@staticmethod
def length_string(string):
if len(string) < 255:
return len(string) + 1
else:
return len(string) + 5
@staticmethod
def length_strings(strings):
return sum(Cursor.length_string(x) for x in strings)
_format_byte = struct.Struct("B")
_format_byteint = struct.Struct(">Bi")
def update_string(self, sink, data):
if len(data) < 255:
sink.write(self._format_byte.pack(len(data)), self.index)
sink.write(data, self.index + 1)
else:
sink.write(self._format_byteint.pack(255, len(data)), self.index)
sink.write(data, self.index + 5)
def write_string(self, sink, data):
self.update_string(sink, data)
self.index += self.length_string(data)
def put_string(self, data):
self.index += self.length_string(data)
if len(data) < 255:
return self._format_byte.pack(len(data)) + data
else:
return self._format_byteint.pack(255, len(data)) + data
def update_cstring(self, sink, data):
sink.write(data, self.index)
sink.write(b"\x00")
def write_cstring(self, sink, data):
self.update_cstring(sink, data)
self.index += len(data) + 1
def put_cstring(self, data):
self.index += len(data) + 1
return data.encode("utf-8") + b"\x00"
def update_data(self, sink, data):
sink.write(data, self.index)
def write_data(self, sink, data):
self.update_data(sink, data)
self.index += len(data)
def put_data(self, data):
self.index += len(data)
return data
def put_array(self, data):
self.index += data.nbytes
return _tobytes(data)
def update_array(self, sink, data):
sink.write(_tobytes(data), self.index)
def write_array(self, sink, data):
self.update_array(sink, data)
self.index += data.nbytes
|
{
"content_hash": "511c1bd8a521d92f36b853e107424707",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 77,
"avg_line_length": 28.344444444444445,
"alnum_prop": 0.5907487259898079,
"repo_name": "scikit-hep/uproot",
"id": "4d2110da33be00fc155513e06207954dffe14345",
"size": "2661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uproot3/write/sink/cursor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "15573"
},
{
"name": "C++",
"bytes": "100"
},
{
"name": "Jupyter Notebook",
"bytes": "270594"
},
{
"name": "Python",
"bytes": "813073"
},
{
"name": "Shell",
"bytes": "566"
}
],
"symlink_target": ""
}
|
"""
L{twisted.words} support for Instance Messenger.
"""
from twisted.internet import defer
from twisted.internet import error
from twisted.python import log
from twisted.python.failure import Failure
from twisted.spread import pb
from twisted.words.im.locals import ONLINE, OFFLINE, AWAY
from twisted.words.im import basesupport, interfaces
from zope.interface import implements
class TwistedWordsPerson(basesupport.AbstractPerson):
"""I a facade for a person you can talk to through a twisted.words service.
"""
def __init__(self, name, wordsAccount):
basesupport.AbstractPerson.__init__(self, name, wordsAccount)
self.status = OFFLINE
def isOnline(self):
return ((self.status == ONLINE) or
(self.status == AWAY))
def getStatus(self):
return self.status
def sendMessage(self, text, metadata):
"""Return a deferred...
"""
if metadata:
d=self.account.client.perspective.directMessage(self.name,
text, metadata)
d.addErrback(self.metadataFailed, "* "+text)
return d
else:
return self.account.client.perspective.callRemote('directMessage',self.name, text)
def metadataFailed(self, result, text):
print "result:",result,"text:",text
return self.account.client.perspective.directMessage(self.name, text)
def setStatus(self, status):
self.status = status
self.chat.getContactsList().setContactStatus(self)
class TwistedWordsGroup(basesupport.AbstractGroup):
implements(interfaces.IGroup)
def __init__(self, name, wordsClient):
basesupport.AbstractGroup.__init__(self, name, wordsClient)
self.joined = 0
def sendGroupMessage(self, text, metadata=None):
"""Return a deferred.
"""
#for backwards compatibility with older twisted.words servers.
if metadata:
d=self.account.client.perspective.callRemote(
'groupMessage', self.name, text, metadata)
d.addErrback(self.metadataFailed, "* "+text)
return d
else:
return self.account.client.perspective.callRemote('groupMessage',
self.name, text)
def setTopic(self, text):
self.account.client.perspective.callRemote(
'setGroupMetadata',
{'topic': text, 'topic_author': self.client.name},
self.name)
def metadataFailed(self, result, text):
print "result:",result,"text:",text
return self.account.client.perspective.callRemote('groupMessage',
self.name, text)
def joining(self):
self.joined = 1
def leaving(self):
self.joined = 0
def leave(self):
return self.account.client.perspective.callRemote('leaveGroup',
self.name)
class TwistedWordsClient(pb.Referenceable, basesupport.AbstractClientMixin):
"""In some cases, this acts as an Account, since it a source of text
messages (multiple Words instances may be on a single PB connection)
"""
def __init__(self, acct, serviceName, perspectiveName, chatui,
_logonDeferred=None):
self.accountName = "%s (%s:%s)" % (acct.accountName, serviceName, perspectiveName)
self.name = perspectiveName
print "HELLO I AM A PB SERVICE", serviceName, perspectiveName
self.chat = chatui
self.account = acct
self._logonDeferred = _logonDeferred
def getPerson(self, name):
return self.chat.getPerson(name, self)
def getGroup(self, name):
return self.chat.getGroup(name, self)
def getGroupConversation(self, name):
return self.chat.getGroupConversation(self.getGroup(name))
def addContact(self, name):
self.perspective.callRemote('addContact', name)
def remote_receiveGroupMembers(self, names, group):
print 'received group members:', names, group
self.getGroupConversation(group).setGroupMembers(names)
def remote_receiveGroupMessage(self, sender, group, message, metadata=None):
print 'received a group message', sender, group, message, metadata
self.getGroupConversation(group).showGroupMessage(sender, message, metadata)
def remote_memberJoined(self, member, group):
print 'member joined', member, group
self.getGroupConversation(group).memberJoined(member)
def remote_memberLeft(self, member, group):
print 'member left'
self.getGroupConversation(group).memberLeft(member)
def remote_notifyStatusChanged(self, name, status):
self.chat.getPerson(name, self).setStatus(status)
def remote_receiveDirectMessage(self, name, message, metadata=None):
self.chat.getConversation(self.chat.getPerson(name, self)).showMessage(message, metadata)
def remote_receiveContactList(self, clist):
for name, status in clist:
self.chat.getPerson(name, self).setStatus(status)
def remote_setGroupMetadata(self, dict_, groupName):
if dict_.has_key("topic"):
self.getGroupConversation(groupName).setTopic(dict_["topic"], dict_.get("topic_author", None))
def joinGroup(self, name):
self.getGroup(name).joining()
return self.perspective.callRemote('joinGroup', name).addCallback(self._cbGroupJoined, name)
def leaveGroup(self, name):
self.getGroup(name).leaving()
return self.perspective.callRemote('leaveGroup', name).addCallback(self._cbGroupLeft, name)
def _cbGroupJoined(self, result, name):
groupConv = self.chat.getGroupConversation(self.getGroup(name))
groupConv.showGroupMessage("sys", "you joined")
self.perspective.callRemote('getGroupMembers', name)
def _cbGroupLeft(self, result, name):
print 'left',name
groupConv = self.chat.getGroupConversation(self.getGroup(name), 1)
groupConv.showGroupMessage("sys", "you left")
def connected(self, perspective):
print 'Connected Words Client!', perspective
if self._logonDeferred is not None:
self._logonDeferred.callback(self)
self.perspective = perspective
self.chat.getContactsList()
pbFrontEnds = {
"twisted.words": TwistedWordsClient,
"twisted.reality": None
}
class PBAccount(basesupport.AbstractAccount):
implements(interfaces.IAccount)
gatewayType = "PB"
_groupFactory = TwistedWordsGroup
_personFactory = TwistedWordsPerson
def __init__(self, accountName, autoLogin, username, password, host, port,
services=None):
"""
@param username: The name of your PB Identity.
@type username: string
"""
basesupport.AbstractAccount.__init__(self, accountName, autoLogin,
username, password, host, port)
self.services = []
if not services:
services = [('twisted.words', 'twisted.words', username)]
for serviceType, serviceName, perspectiveName in services:
self.services.append([pbFrontEnds[serviceType], serviceName,
perspectiveName])
def logOn(self, chatui):
"""
@returns: this breaks with L{interfaces.IAccount}
@returntype: DeferredList of L{interfaces.IClient}s
"""
# Overriding basesupport's implementation on account of the
# fact that _startLogOn tends to return a deferredList rather
# than a simple Deferred, and we need to do registerAccountClient.
if (not self._isConnecting) and (not self._isOnline):
self._isConnecting = 1
d = self._startLogOn(chatui)
d.addErrback(self._loginFailed)
def registerMany(results):
for success, result in results:
if success:
chatui.registerAccountClient(result)
self._cb_logOn(result)
else:
log.err(result)
d.addCallback(registerMany)
return d
else:
raise error.ConnectionError("Connection in progress")
def _startLogOn(self, chatui):
print 'Connecting...',
d = pb.getObjectAt(self.host, self.port)
d.addCallbacks(self._cbConnected, self._ebConnected,
callbackArgs=(chatui,))
return d
def _cbConnected(self, root, chatui):
print 'Connected!'
print 'Identifying...',
d = pb.authIdentity(root, self.username, self.password)
d.addCallbacks(self._cbIdent, self._ebConnected,
callbackArgs=(chatui,))
return d
def _cbIdent(self, ident, chatui):
if not ident:
print 'falsely identified.'
return self._ebConnected(Failure(Exception("username or password incorrect")))
print 'Identified!'
dl = []
for handlerClass, sname, pname in self.services:
d = defer.Deferred()
dl.append(d)
handler = handlerClass(self, sname, pname, chatui, d)
ident.callRemote('attach', sname, pname, handler).addCallback(handler.connected)
return defer.DeferredList(dl)
def _ebConnected(self, error):
print 'Not connected.'
return error
|
{
"content_hash": "1744c5c4927c09c668ec556e074819ed",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 106,
"avg_line_length": 37.26953125,
"alnum_prop": 0.6269783041609894,
"repo_name": "berendkleinhaneveld/VTK",
"id": "04d14e997d333c79164c5d8f76207578ebf3127f",
"size": "9615",
"binary": false,
"copies": "48",
"ref": "refs/heads/master",
"path": "ThirdParty/Twisted/twisted/words/im/pbsupport.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "37444"
},
{
"name": "C",
"bytes": "53279549"
},
{
"name": "C++",
"bytes": "64123408"
},
{
"name": "CSS",
"bytes": "186729"
},
{
"name": "Cuda",
"bytes": "29068"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "IDL",
"bytes": "4406"
},
{
"name": "Java",
"bytes": "197110"
},
{
"name": "JavaScript",
"bytes": "1283723"
},
{
"name": "Objective-C",
"bytes": "125350"
},
{
"name": "Objective-C++",
"bytes": "192609"
},
{
"name": "Pascal",
"bytes": "3255"
},
{
"name": "Perl",
"bytes": "177007"
},
{
"name": "Python",
"bytes": "15901722"
},
{
"name": "Shell",
"bytes": "49248"
},
{
"name": "Tcl",
"bytes": "1892632"
}
],
"symlink_target": ""
}
|
""" A WebIDL parser. """
from ply import lex, yacc
import re
import os
import traceback
import math
# Machinery
def parseInt(literal):
string = literal
sign = 0
base = 0
if string[0] == '-':
sign = -1
string = string[1:]
else:
sign = 1
if string[0] == '0' and len(string) > 1:
if string[1] == 'x' or string[1] == 'X':
base = 16
string = string[2:]
else:
base = 8
string = string[1:]
else:
base = 10
value = int(string, base)
return value * sign
# Magic for creating enums
def M_add_class_attribs(attribs, start):
def foo(name, bases, dict_):
for v, k in enumerate(attribs):
dict_[k] = start + v
assert 'length' not in dict_
dict_['length'] = start + len(attribs)
return type(name, bases, dict_)
return foo
def enum(*names, **kw):
if len(kw) == 1:
base = kw['base'].__class__
start = base.length
else:
assert len(kw) == 0
base = object
start = 0
class Foo(base):
__metaclass__ = M_add_class_attribs(names, start)
def __setattr__(self, name, value): # this makes it read-only
raise NotImplementedError
return Foo()
class WebIDLError(Exception):
def __init__(self, message, locations, warning=False):
self.message = message
self.locations = [str(loc) for loc in locations]
self.warning = warning
def __str__(self):
return "%s: %s%s%s" % (self.warning and 'warning' or 'error',
self.message,
", " if len(self.locations) != 0 else "",
"\n".join(self.locations))
class Location(object):
def __init__(self, lexer, lineno, lexpos, filename):
self._line = None
self._lineno = lineno
self._lexpos = lexpos
self._lexdata = lexer.lexdata
self._file = filename if filename else "<unknown>"
def __eq__(self, other):
return self._lexpos == other._lexpos and \
self._file == other._file
def filename(self):
return self._file
def resolve(self):
if self._line:
return
startofline = self._lexdata.rfind('\n', 0, self._lexpos) + 1
endofline = self._lexdata.find('\n', self._lexpos, self._lexpos + 80)
if endofline != -1:
self._line = self._lexdata[startofline:endofline]
else:
self._line = self._lexdata[startofline:]
self._colno = self._lexpos - startofline
# Our line number seems to point to the start of self._lexdata
self._lineno += self._lexdata.count('\n', 0, startofline)
def get(self):
self.resolve()
return "%s line %s:%s" % (self._file, self._lineno, self._colno)
def _pointerline(self):
return " " * self._colno + "^"
def __str__(self):
self.resolve()
return "%s line %s:%s\n%s\n%s" % (self._file, self._lineno, self._colno,
self._line, self._pointerline())
class BuiltinLocation(object):
def __init__(self, text):
self.msg = text + "\n"
def __eq__(self, other):
return isinstance(other, BuiltinLocation) and \
self.msg == other.msg
def filename(self):
return '<builtin>'
def resolve(self):
pass
def get(self):
return self.msg
def __str__(self):
return self.get()
# Data Model
class IDLObject(object):
def __init__(self, location):
self.location = location
self.userData = dict()
def filename(self):
return self.location.filename()
def isInterface(self):
return False
def isEnum(self):
return False
def isCallback(self):
return False
def isType(self):
return False
def isDictionary(self):
return False;
def isUnion(self):
return False
def getUserData(self, key, default):
return self.userData.get(key, default)
def setUserData(self, key, value):
self.userData[key] = value
def addExtendedAttributes(self, attrs):
assert False # Override me!
def handleExtendedAttribute(self, attr):
assert False # Override me!
def _getDependentObjects(self):
assert False # Override me!
def getDeps(self, visited=None):
""" Return a set of files that this object depends on. If any of
these files are changed the parser needs to be rerun to regenerate
a new IDLObject.
The visited argument is a set of all the objects already visited.
We must test to see if we are in it, and if so, do nothing. This
prevents infinite recursion."""
# NB: We can't use visited=set() above because the default value is
# evaluated when the def statement is evaluated, not when the function
# is executed, so there would be one set for all invocations.
if visited == None:
visited = set()
if self in visited:
return set()
visited.add(self)
deps = set()
if self.filename() != "<builtin>":
deps.add(self.filename())
for d in self._getDependentObjects():
deps = deps.union(d.getDeps(visited))
return deps
class IDLScope(IDLObject):
def __init__(self, location, parentScope, identifier):
IDLObject.__init__(self, location)
self.parentScope = parentScope
if identifier:
assert isinstance(identifier, IDLIdentifier)
self._name = identifier
else:
self._name = None
self._dict = {}
def __str__(self):
return self.QName()
def QName(self):
if self._name:
return self._name.QName() + "::"
return "::"
def ensureUnique(self, identifier, object):
"""
Ensure that there is at most one 'identifier' in scope ('self').
Note that object can be None. This occurs if we end up here for an
interface type we haven't seen yet.
"""
assert isinstance(identifier, IDLUnresolvedIdentifier)
assert not object or isinstance(object, IDLObjectWithIdentifier)
assert not object or object.identifier == identifier
if identifier.name in self._dict:
if not object:
return
# ensureUnique twice with the same object is not allowed
assert id(object) != id(self._dict[identifier.name])
replacement = self.resolveIdentifierConflict(self, identifier,
self._dict[identifier.name],
object)
self._dict[identifier.name] = replacement
return
assert object
self._dict[identifier.name] = object
def resolveIdentifierConflict(self, scope, identifier, originalObject, newObject):
if isinstance(originalObject, IDLExternalInterface) and \
isinstance(newObject, IDLExternalInterface) and \
originalObject.identifier.name == newObject.identifier.name:
return originalObject
if (isinstance(originalObject, IDLExternalInterface) or
isinstance(newObject, IDLExternalInterface)):
raise WebIDLError(
"Name collision between "
"interface declarations for identifier '%s' at '%s' and '%s'"
% (identifier.name,
originalObject.location, newObject.location), [])
# We do the merging of overloads here as opposed to in IDLInterface
# because we need to merge overloads of NamedConstructors and we need to
# detect conflicts in those across interfaces. See also the comment in
# IDLInterface.addExtendedAttributes for "NamedConstructor".
if originalObject.tag == IDLInterfaceMember.Tags.Method and \
newObject.tag == IDLInterfaceMember.Tags.Method:
return originalObject.addOverload(newObject)
# Default to throwing, derived classes can override.
conflictdesc = "\n\t%s at %s\n\t%s at %s" % \
(originalObject, originalObject.location, newObject, newObject.location)
raise WebIDLError(
"Multiple unresolvable definitions of identifier '%s' in scope '%s%s"
% (identifier.name, str(self), conflictdesc), [])
def _lookupIdentifier(self, identifier):
return self._dict[identifier.name]
def lookupIdentifier(self, identifier):
assert isinstance(identifier, IDLIdentifier)
assert identifier.scope == self
return self._lookupIdentifier(identifier)
class IDLIdentifier(IDLObject):
def __init__(self, location, scope, name):
IDLObject.__init__(self, location)
self.name = name
assert isinstance(scope, IDLScope)
self.scope = scope
def __str__(self):
return self.QName()
def QName(self):
return self.scope.QName() + self.name
def __hash__(self):
return self.QName().__hash__()
def __eq__(self, other):
return self.QName() == other.QName()
def object(self):
return self.scope.lookupIdentifier(self)
class IDLUnresolvedIdentifier(IDLObject):
def __init__(self, location, name, allowDoubleUnderscore = False,
allowForbidden = False):
IDLObject.__init__(self, location)
assert len(name) > 0
if name[:2] == "__" and name != "__content" and name != "___noSuchMethod__" and not allowDoubleUnderscore:
raise WebIDLError("Identifiers beginning with __ are reserved",
[location])
if name[0] == '_' and not allowDoubleUnderscore:
name = name[1:]
# TODO: Bug 872377, Restore "toJSON" to below list.
# We sometimes need custom serialization, so allow toJSON for now.
if (name in ["constructor", "toString"] and
not allowForbidden):
raise WebIDLError("Cannot use reserved identifier '%s'" % (name),
[location])
self.name = name
def __str__(self):
return self.QName()
def QName(self):
return "<unresolved scope>::" + self.name
def resolve(self, scope, object):
assert isinstance(scope, IDLScope)
assert not object or isinstance(object, IDLObjectWithIdentifier)
assert not object or object.identifier == self
scope.ensureUnique(self, object)
identifier = IDLIdentifier(self.location, scope, self.name)
if object:
object.identifier = identifier
return identifier
def finish(self):
assert False # Should replace with a resolved identifier first.
class IDLObjectWithIdentifier(IDLObject):
def __init__(self, location, parentScope, identifier):
IDLObject.__init__(self, location)
assert isinstance(identifier, IDLUnresolvedIdentifier)
self.identifier = identifier
if parentScope:
self.resolve(parentScope)
self.treatNullAs = "Default"
def resolve(self, parentScope):
assert isinstance(parentScope, IDLScope)
assert isinstance(self.identifier, IDLUnresolvedIdentifier)
self.identifier.resolve(parentScope, self)
def checkForStringHandlingExtendedAttributes(self, attrs,
isDictionaryMember=False,
isOptional=False):
"""
A helper function to deal with TreatNullAs. Returns the list
of attrs it didn't handle itself.
"""
assert isinstance(self, IDLArgument) or isinstance(self, IDLAttribute)
unhandledAttrs = list()
for attr in attrs:
if not attr.hasValue():
unhandledAttrs.append(attr)
continue
identifier = attr.identifier()
value = attr.value()
if identifier == "TreatNullAs":
if not self.type.isDOMString() or self.type.nullable():
raise WebIDLError("[TreatNullAs] is only allowed on "
"arguments or attributes whose type is "
"DOMString",
[self.location])
if isDictionaryMember:
raise WebIDLError("[TreatNullAs] is not allowed for "
"dictionary members", [self.location])
if value != 'EmptyString':
raise WebIDLError("[TreatNullAs] must take the identifier "
"'EmptyString', not '%s'" % value,
[self.location])
self.treatNullAs = value
else:
unhandledAttrs.append(attr)
return unhandledAttrs
class IDLObjectWithScope(IDLObjectWithIdentifier, IDLScope):
def __init__(self, location, parentScope, identifier):
assert isinstance(identifier, IDLUnresolvedIdentifier)
IDLObjectWithIdentifier.__init__(self, location, parentScope, identifier)
IDLScope.__init__(self, location, parentScope, self.identifier)
class IDLIdentifierPlaceholder(IDLObjectWithIdentifier):
def __init__(self, location, identifier):
assert isinstance(identifier, IDLUnresolvedIdentifier)
IDLObjectWithIdentifier.__init__(self, location, None, identifier)
def finish(self, scope):
try:
scope._lookupIdentifier(self.identifier)
except:
raise WebIDLError("Unresolved type '%s'." % self.identifier,
[self.location])
obj = self.identifier.resolve(scope, None)
return scope.lookupIdentifier(obj)
class IDLExternalInterface(IDLObjectWithIdentifier):
def __init__(self, location, parentScope, identifier):
assert isinstance(identifier, IDLUnresolvedIdentifier)
assert isinstance(parentScope, IDLScope)
self.parent = None
IDLObjectWithIdentifier.__init__(self, location, parentScope, identifier)
IDLObjectWithIdentifier.resolve(self, parentScope)
def finish(self, scope):
pass
def validate(self):
pass
def isExternal(self):
return True
def isInterface(self):
return True
def isConsequential(self):
return False
def addExtendedAttributes(self, attrs):
assert len(attrs) == 0
def resolve(self, parentScope):
pass
def getJSImplementation(self):
return None
def isJSImplemented(self):
return False
def getNavigatorProperty(self):
return None
def _getDependentObjects(self):
return set()
class IDLInterface(IDLObjectWithScope):
def __init__(self, location, parentScope, name, parent, members,
isPartial):
assert isinstance(parentScope, IDLScope)
assert isinstance(name, IDLUnresolvedIdentifier)
assert not isPartial or not parent
self.parent = None
self._callback = False
self._finished = False
self.members = []
# namedConstructors needs deterministic ordering because bindings code
# outputs the constructs in the order that namedConstructors enumerates
# them.
self.namedConstructors = list()
self.implementedInterfaces = set()
self._consequential = False
self._isPartial = True
# self.interfacesBasedOnSelf is the set of interfaces that inherit from
# self or have self as a consequential interface, including self itself.
# Used for distinguishability checking.
self.interfacesBasedOnSelf = set([self])
# self.interfacesImplementingSelf is the set of interfaces that directly
# have self as a consequential interface
self.interfacesImplementingSelf = set()
self._hasChildInterfaces = False
self._isOnGlobalProtoChain = False
# Tracking of the number of reserved slots we need for our
# members and those of ancestor interfaces.
self.totalMembersInSlots = 0
# Tracking of the number of own own members we have in slots
self._ownMembersInSlots = 0
IDLObjectWithScope.__init__(self, location, parentScope, name)
if not isPartial:
self.setNonPartial(location, parent, members)
else:
# Just remember our members for now
self.members = members
def __str__(self):
return "Interface '%s'" % self.identifier.name
def ctor(self):
identifier = IDLUnresolvedIdentifier(self.location, "constructor",
allowForbidden=True)
try:
return self._lookupIdentifier(identifier)
except:
return None
def resolveIdentifierConflict(self, scope, identifier, originalObject, newObject):
assert isinstance(scope, IDLScope)
assert isinstance(originalObject, IDLInterfaceMember)
assert isinstance(newObject, IDLInterfaceMember)
retval = IDLScope.resolveIdentifierConflict(self, scope, identifier,
originalObject, newObject)
# Might be a ctor, which isn't in self.members
if newObject in self.members:
self.members.remove(newObject)
return retval
def finish(self, scope):
if self._finished:
return
self._finished = True
if self._isPartial:
raise WebIDLError("Interface %s does not have a non-partial "
"declaration" % self.identifier.name,
[self.location])
assert not self.parent or isinstance(self.parent, IDLIdentifierPlaceholder)
parent = self.parent.finish(scope) if self.parent else None
if parent and isinstance(parent, IDLExternalInterface):
raise WebIDLError("%s inherits from %s which does not have "
"a definition" %
(self.identifier.name,
self.parent.identifier.name),
[self.location])
assert not parent or isinstance(parent, IDLInterface)
self.parent = parent
assert iter(self.members)
if self.parent:
self.parent.finish(scope)
self.parent._hasChildInterfaces = True
self.totalMembersInSlots = self.parent.totalMembersInSlots
# Interfaces with [Global] must not have anything inherit from them
if self.parent.getExtendedAttribute("Global"):
# Note: This is not a self.parent.isOnGlobalProtoChain() check
# because ancestors of a [Global] interface can have other
# descendants.
raise WebIDLError("[Global] interface has another interface "
"inheriting from it",
[self.location, self.parent.location])
# Callbacks must not inherit from non-callbacks or inherit from
# anything that has consequential interfaces.
# XXXbz Can non-callbacks inherit from callbacks? Spec issue pending.
# XXXbz Can callbacks have consequential interfaces? Spec issue pending
if self.isCallback():
if not self.parent.isCallback():
raise WebIDLError("Callback interface %s inheriting from "
"non-callback interface %s" %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
elif self.parent.isCallback():
raise WebIDLError("Non-callback interface %s inheriting from "
"callback interface %s" %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
for iface in self.implementedInterfaces:
iface.finish(scope)
cycleInGraph = self.findInterfaceLoopPoint(self)
if cycleInGraph:
raise WebIDLError("Interface %s has itself as ancestor or "
"implemented interface" % self.identifier.name,
[self.location, cycleInGraph.location])
if self.isCallback():
# "implements" should have made sure we have no
# consequential interfaces.
assert len(self.getConsequentialInterfaces()) == 0
# And that we're not consequential.
assert not self.isConsequential()
# Now resolve() and finish() our members before importing the
# ones from our implemented interfaces.
# resolve() will modify self.members, so we need to iterate
# over a copy of the member list here.
for member in list(self.members):
member.resolve(self)
for member in self.members:
member.finish(scope)
ctor = self.ctor()
if ctor is not None:
ctor.finish(scope)
for ctor in self.namedConstructors:
ctor.finish(scope)
# Make a copy of our member list, so things that implement us
# can get those without all the stuff we implement ourselves
# admixed.
self.originalMembers = list(self.members)
# Import everything from our consequential interfaces into
# self.members. Sort our consequential interfaces by name
# just so we have a consistent order.
for iface in sorted(self.getConsequentialInterfaces(),
cmp=cmp,
key=lambda x: x.identifier.name):
# Flag the interface as being someone's consequential interface
iface.setIsConsequentialInterfaceOf(self)
additionalMembers = iface.originalMembers;
for additionalMember in additionalMembers[:]:
for member in self.members:
if additionalMember.identifier.name == member.identifier.name:
# XXX emscripten: allow such name collisions, ignore parent
additionalMembers.remove(additionalMember)
#raise WebIDLError(
# "Multiple definitions of %s on %s coming from 'implements' statements" %
# (member.identifier.name, self),
# [additionalMember.location, member.location])
self.members.extend(additionalMembers)
iface.interfacesImplementingSelf.add(self)
for ancestor in self.getInheritedInterfaces():
ancestor.interfacesBasedOnSelf.add(self)
for ancestorConsequential in ancestor.getConsequentialInterfaces():
ancestorConsequential.interfacesBasedOnSelf.add(self)
for member in self.members:
if (member.isAttr() and member.isUnforgeable() and
not hasattr(member, "originatingInterface")):
member.originatingInterface = self
# Compute slot indices for our members before we pull in
# unforgeable members from our parent.
for member in self.members:
if (member.isAttr() and
(member.getExtendedAttribute("StoreInSlot") or
member.getExtendedAttribute("Cached"))):
member.slotIndex = self.totalMembersInSlots
self.totalMembersInSlots += 1
if member.getExtendedAttribute("StoreInSlot"):
self._ownMembersInSlots += 1
if self.parent:
# Make sure we don't shadow any of the [Unforgeable] attributes on
# our ancestor interfaces. We don't have to worry about
# consequential interfaces here, because those have already been
# imported into the relevant .members lists. And we don't have to
# worry about anything other than our parent, because it has already
# imported its ancestors unforgeable attributes into its member
# list.
for unforgeableAttr in (attr for attr in self.parent.members if
attr.isAttr() and not attr.isStatic() and
attr.isUnforgeable()):
shadows = [ m for m in self.members if
(m.isAttr() or m.isMethod()) and
not m.isStatic() and
m.identifier.name == unforgeableAttr.identifier.name ]
if len(shadows) != 0:
locs = [unforgeableAttr.location] + [ s.location for s
in shadows ]
raise WebIDLError("Interface %s shadows [Unforgeable] "
"members of %s" %
(self.identifier.name,
ancestor.identifier.name),
locs)
# And now just stick it in our members, since we won't be
# inheriting this down the proto chain. If we really cared we
# could try to do something where we set up the unforgeable
# attributes of ancestor interfaces, with their corresponding
# getters, on our interface, but that gets pretty complicated
# and seems unnecessary.
self.members.append(unforgeableAttr)
# Ensure that there's at most one of each {named,indexed}
# {getter,setter,creator,deleter}, at most one stringifier,
# and at most one legacycaller. Note that this last is not
# quite per spec, but in practice no one overloads
# legacycallers.
specialMembersSeen = {}
for member in self.members:
if not member.isMethod():
continue
if member.isGetter():
memberType = "getters"
elif member.isSetter():
memberType = "setters"
elif member.isCreator():
memberType = "creators"
elif member.isDeleter():
memberType = "deleters"
elif member.isStringifier():
memberType = "stringifiers"
elif member.isJsonifier():
memberType = "jsonifiers"
elif member.isLegacycaller():
memberType = "legacycallers"
else:
continue
if (memberType != "stringifiers" and memberType != "legacycallers" and
memberType != "jsonifiers"):
if member.isNamed():
memberType = "named " + memberType
else:
assert member.isIndexed()
memberType = "indexed " + memberType
if memberType in specialMembersSeen:
raise WebIDLError("Multiple " + memberType + " on %s" % (self),
[self.location,
specialMembersSeen[memberType].location,
member.location])
specialMembersSeen[memberType] = member
if self._isOnGlobalProtoChain:
# Make sure we have no named setters, creators, or deleters
for memberType in ["setter", "creator", "deleter"]:
memberId = "named " + memberType + "s"
if memberId in specialMembersSeen:
raise WebIDLError("Interface with [Global] has a named %s" %
memberType,
[self.location,
specialMembersSeen[memberId].location])
# Make sure we're not [OverrideBuiltins]
if self.getExtendedAttribute("OverrideBuiltins"):
raise WebIDLError("Interface with [Global] also has "
"[OverrideBuiltins]",
[self.location])
# Mark all of our ancestors as being on the global's proto chain too
parent = self.parent
while parent:
# Must not inherit from an interface with [OverrideBuiltins]
if parent.getExtendedAttribute("OverrideBuiltins"):
raise WebIDLError("Interface with [Global] inherits from "
"interface with [OverrideBuiltins]",
[self.location, parent.location])
parent._isOnGlobalProtoChain = True
parent = parent.parent
def validate(self):
for member in self.members:
member.validate()
# Check that PutForwards refers to another attribute and that no
# cycles exist in forwarded assignments.
if member.isAttr():
iface = self
attr = member
putForwards = attr.getExtendedAttribute("PutForwards")
if putForwards and self.isCallback():
raise WebIDLError("[PutForwards] used on an attribute "
"on interface %s which is a callback "
"interface" % self.identifier.name,
[self.location, member.location])
while putForwards is not None:
forwardIface = attr.type.unroll().inner
fowardAttr = None
for forwardedMember in forwardIface.members:
if (not forwardedMember.isAttr() or
forwardedMember.identifier.name != putForwards[0]):
continue
if forwardedMember == member:
raise WebIDLError("Cycle detected in forwarded "
"assignments for attribute %s on "
"%s" %
(member.identifier.name, self),
[member.location])
fowardAttr = forwardedMember
break
if fowardAttr is None:
raise WebIDLError("Attribute %s on %s forwards to "
"missing attribute %s" %
(attr.identifier.name, iface, putForwards),
[attr.location])
iface = forwardIface
attr = fowardAttr
putForwards = attr.getExtendedAttribute("PutForwards")
def isInterface(self):
return True
def isExternal(self):
return False
def setIsConsequentialInterfaceOf(self, other):
self._consequential = True
self.interfacesBasedOnSelf.add(other)
def isConsequential(self):
return self._consequential
def setCallback(self, value):
self._callback = value
def isCallback(self):
return self._callback
def isSingleOperationInterface(self):
assert self.isCallback() or self.isJSImplemented()
return (
# JS-implemented things should never need the
# this-handling weirdness of single-operation interfaces.
not self.isJSImplemented() and
# Not inheriting from another interface
not self.parent and
# No consequential interfaces
len(self.getConsequentialInterfaces()) == 0 and
# No attributes of any kinds
not any(m.isAttr() for m in self.members) and
# There is at least one regular operation, and all regular
# operations have the same identifier
len(set(m.identifier.name for m in self.members if
m.isMethod() and not m.isStatic())) == 1)
def inheritanceDepth(self):
depth = 0
parent = self.parent
while parent:
depth = depth + 1
parent = parent.parent
return depth
def hasConstants(self):
return any(m.isConst() for m in self.members)
def hasInterfaceObject(self):
if self.isCallback():
return self.hasConstants()
return not hasattr(self, "_noInterfaceObject")
def hasInterfacePrototypeObject(self):
return not self.isCallback() and self.getUserData('hasConcreteDescendant', False)
def addExtendedAttributes(self, attrs):
self._extendedAttrDict = {}
for attr in attrs:
identifier = attr.identifier()
# Special cased attrs
if identifier == "TreatNonCallableAsNull":
raise WebIDLError("TreatNonCallableAsNull cannot be specified on interfaces",
[attr.location, self.location])
if identifier == "TreatNonObjectAsNull":
raise WebIDLError("TreatNonObjectAsNull cannot be specified on interfaces",
[attr.location, self.location])
elif identifier == "NoInterfaceObject":
if not attr.noArguments():
raise WebIDLError("[NoInterfaceObject] must take no arguments",
[attr.location])
if self.ctor():
raise WebIDLError("Constructor and NoInterfaceObject are incompatible",
[self.location])
self._noInterfaceObject = True
elif identifier == "Constructor" or identifier == "NamedConstructor" or identifier == "ChromeConstructor":
if identifier == "Constructor" and not self.hasInterfaceObject():
raise WebIDLError(str(identifier) + " and NoInterfaceObject are incompatible",
[self.location])
if identifier == "NamedConstructor" and not attr.hasValue():
raise WebIDLError("NamedConstructor must either take an identifier or take a named argument list",
[attr.location])
if identifier == "ChromeConstructor" and not self.hasInterfaceObject():
raise WebIDLError(str(identifier) + " and NoInterfaceObject are incompatible",
[self.location])
args = attr.args() if attr.hasArgs() else []
retType = IDLWrapperType(self.location, self)
if identifier == "Constructor" or identifier == "ChromeConstructor":
name = "constructor"
allowForbidden = True
else:
name = attr.value()
allowForbidden = False
methodIdentifier = IDLUnresolvedIdentifier(self.location, name,
allowForbidden=allowForbidden)
method = IDLMethod(self.location, methodIdentifier, retType,
args, static=True)
# Constructors are always NewObject and are always
# assumed to be able to throw (since there's no way to
# indicate otherwise) and never have any other
# extended attributes.
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("NewObject",)),
IDLExtendedAttribute(self.location, ("Throws",))])
if identifier == "ChromeConstructor":
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("ChromeOnly",))])
if identifier == "Constructor" or identifier == "ChromeConstructor":
method.resolve(self)
else:
# We need to detect conflicts for NamedConstructors across
# interfaces. We first call resolve on the parentScope,
# which will merge all NamedConstructors with the same
# identifier accross interfaces as overloads.
method.resolve(self.parentScope)
# Then we look up the identifier on the parentScope. If the
# result is the same as the method we're adding then it
# hasn't been added as an overload and it's the first time
# we've encountered a NamedConstructor with that identifier.
# If the result is not the same as the method we're adding
# then it has been added as an overload and we need to check
# whether the result is actually one of our existing
# NamedConstructors.
newMethod = self.parentScope.lookupIdentifier(method.identifier)
if newMethod == method:
self.namedConstructors.append(method)
elif not newMethod in self.namedConstructors:
raise WebIDLError("NamedConstructor conflicts with a NamedConstructor of a different interface",
[method.location, newMethod.location])
elif (identifier == "ArrayClass"):
if not attr.noArguments():
raise WebIDLError("[ArrayClass] must take no arguments",
[attr.location])
if self.parent:
raise WebIDLError("[ArrayClass] must not be specified on "
"an interface with inherited interfaces",
[attr.location, self.location])
elif identifier == "Global":
if not attr.noArguments():
raise WebIDLError("[Global] must take no arguments",
[attr.location])
self._isOnGlobalProtoChain = True
elif (identifier == "NeedNewResolve" or
identifier == "OverrideBuiltins" or
identifier == "NoDelete" or
identifier == "ChromeOnly"):
# Known extended attributes that do not take values
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
elif (identifier == "Pref" or
identifier == "JSImplementation" or
identifier == "HeaderFile" or
identifier == "NavigatorProperty" or
identifier == "AvailableIn" or
identifier == "Prefix" or
identifier == "Func"):
# Known extended attributes that take a string value
if not attr.hasValue():
raise WebIDLError("[%s] must have a value" % identifier,
[attr.location])
else:
raise WebIDLError("Unknown extended attribute %s on interface" % identifier,
[attr.location])
attrlist = attr.listValue()
self._extendedAttrDict[identifier] = attrlist if len(attrlist) else True
def addImplementedInterface(self, implementedInterface):
assert(isinstance(implementedInterface, IDLInterface))
self.implementedInterfaces.add(implementedInterface)
def getInheritedInterfaces(self):
"""
Returns a list of the interfaces this interface inherits from
(not including this interface itself). The list is in order
from most derived to least derived.
"""
assert(self._finished)
if not self.parent:
return []
parentInterfaces = self.parent.getInheritedInterfaces()
parentInterfaces.insert(0, self.parent)
return parentInterfaces
def getConsequentialInterfaces(self):
assert(self._finished)
# The interfaces we implement directly
consequentialInterfaces = set(self.implementedInterfaces)
# And their inherited interfaces
for iface in self.implementedInterfaces:
consequentialInterfaces |= set(iface.getInheritedInterfaces())
# And now collect up the consequential interfaces of all of those
temp = set()
for iface in consequentialInterfaces:
temp |= iface.getConsequentialInterfaces()
return consequentialInterfaces | temp
def findInterfaceLoopPoint(self, otherInterface):
"""
Finds an interface, amongst our ancestors and consequential interfaces,
that inherits from otherInterface or implements otherInterface
directly. If there is no such interface, returns None.
"""
if self.parent:
if self.parent == otherInterface:
return self
loopPoint = self.parent.findInterfaceLoopPoint(otherInterface)
if loopPoint:
return loopPoint
if otherInterface in self.implementedInterfaces:
return self
for iface in self.implementedInterfaces:
loopPoint = iface.findInterfaceLoopPoint(otherInterface)
if loopPoint:
return loopPoint
return None
def getExtendedAttribute(self, name):
return self._extendedAttrDict.get(name, None)
def setNonPartial(self, location, parent, members):
assert not parent or isinstance(parent, IDLIdentifierPlaceholder)
if not self._isPartial:
raise WebIDLError("Two non-partial definitions for the "
"same interface",
[location, self.location])
self._isPartial = False
# Now make it look like we were parsed at this new location, since
# that's the place where the interface is "really" defined
self.location = location
assert not self.parent
self.parent = parent
# Put the new members at the beginning
self.members = members + self.members
def getJSImplementation(self):
classId = self.getExtendedAttribute("JSImplementation")
if not classId:
return classId
assert isinstance(classId, list)
assert len(classId) == 1
return classId[0]
def isJSImplemented(self):
return bool(self.getJSImplementation())
def getNavigatorProperty(self):
naviProp = self.getExtendedAttribute("NavigatorProperty")
if not naviProp:
return None
assert len(naviProp) == 1
assert isinstance(naviProp, list)
assert len(naviProp[0]) != 0
return naviProp[0]
def hasChildInterfaces(self):
return self._hasChildInterfaces
def isOnGlobalProtoChain(self):
return self._isOnGlobalProtoChain
def _getDependentObjects(self):
deps = set(self.members)
deps.union(self.implementedInterfaces)
if self.parent:
deps.add(self.parent)
return deps
def hasMembersInSlots(self):
return self._ownMembersInSlots != 0
class IDLDictionary(IDLObjectWithScope):
def __init__(self, location, parentScope, name, parent, members):
assert isinstance(parentScope, IDLScope)
assert isinstance(name, IDLUnresolvedIdentifier)
assert not parent or isinstance(parent, IDLIdentifierPlaceholder)
self.parent = parent
self._finished = False
self.members = list(members)
IDLObjectWithScope.__init__(self, location, parentScope, name)
def __str__(self):
return "Dictionary '%s'" % self.identifier.name
def isDictionary(self):
return True;
def finish(self, scope):
if self._finished:
return
self._finished = True
if self.parent:
assert isinstance(self.parent, IDLIdentifierPlaceholder)
oldParent = self.parent
self.parent = self.parent.finish(scope)
if not isinstance(self.parent, IDLDictionary):
raise WebIDLError("Dictionary %s has parent that is not a dictionary" %
self.identifier.name,
[oldParent.location, self.parent.location])
# Make sure the parent resolves all its members before we start
# looking at them.
self.parent.finish(scope)
for member in self.members:
member.resolve(self)
if not member.isComplete():
member.complete(scope)
assert member.type.isComplete()
# Members of a dictionary are sorted in lexicographic order
self.members.sort(cmp=cmp, key=lambda x: x.identifier.name)
inheritedMembers = []
ancestor = self.parent
while ancestor:
if ancestor == self:
raise WebIDLError("Dictionary %s has itself as an ancestor" %
self.identifier.name,
[self.identifier.location])
inheritedMembers.extend(ancestor.members)
ancestor = ancestor.parent
# Catch name duplication
for inheritedMember in inheritedMembers:
for member in self.members:
if member.identifier.name == inheritedMember.identifier.name:
raise WebIDLError("Dictionary %s has two members with name %s" %
(self.identifier.name, member.identifier.name),
[member.location, inheritedMember.location])
def validate(self):
def typeContainsDictionary(memberType, dictionary):
"""
Returns a tuple whose:
- First element is a Boolean value indicating whether
memberType contains dictionary.
- Second element is:
A list of locations that leads from the type that was passed in
the memberType argument, to the dictionary being validated,
if the boolean value in the first element is True.
None, if the boolean value in the first element is False.
"""
if memberType.nullable() or \
memberType.isArray() or \
memberType.isSequence():
return typeContainsDictionary(memberType.inner, dictionary)
if memberType.isDictionary():
if memberType.inner == dictionary:
return (True, [memberType.location])
(contains, locations) = dictionaryContainsDictionary(memberType.inner, \
dictionary)
if contains:
return (True, [memberType.location] + locations)
if memberType.isUnion():
for member in memberType.flatMemberTypes:
(contains, locations) = typeContainsDictionary(member, dictionary)
if contains:
return (True, locations)
return (False, None)
def dictionaryContainsDictionary(dictMember, dictionary):
for member in dictMember.members:
(contains, locations) = typeContainsDictionary(member.type, dictionary)
if contains:
return (True, [member.location] + locations)
if dictMember.parent:
if dictMember.parent == dictionary:
return (True, [dictMember.location])
else:
(contains, locations) = dictionaryContainsDictionary(dictMember.parent, dictionary)
if contains:
return (True, [dictMember.location] + locations)
return (False, None)
for member in self.members:
if member.type.isDictionary() and member.type.nullable():
raise WebIDLError("Dictionary %s has member with nullable "
"dictionary type" % self.identifier.name,
[member.location])
(contains, locations) = typeContainsDictionary(member.type, self)
if contains:
raise WebIDLError("Dictionary %s has member with itself as type." %
self.identifier.name,
[member.location] + locations)
def addExtendedAttributes(self, attrs):
assert len(attrs) == 0
def _getDependentObjects(self):
deps = set(self.members)
if (self.parent):
deps.add(self.parent)
return deps
class IDLEnum(IDLObjectWithIdentifier):
def __init__(self, location, parentScope, name, values):
assert isinstance(parentScope, IDLScope)
assert isinstance(name, IDLUnresolvedIdentifier)
if len(values) != len(set(values)):
raise WebIDLError("Enum %s has multiple identical strings" % name.name,
[location])
IDLObjectWithIdentifier.__init__(self, location, parentScope, name)
self._values = values
def values(self):
return self._values
def finish(self, scope):
pass
def validate(self):
pass
def isEnum(self):
return True
def addExtendedAttributes(self, attrs):
assert len(attrs) == 0
def _getDependentObjects(self):
return set()
class IDLType(IDLObject):
Tags = enum(
# The integer types
'int8',
'uint8',
'int16',
'uint16',
'int32',
'uint32',
'int64',
'uint64',
# Additional primitive types
'bool',
'unrestricted_float',
'float',
'unrestricted_double',
# "double" last primitive type to match IDLBuiltinType
'double',
# Other types
'any',
'domstring',
'bytestring',
'object',
'date',
'void',
# Funny stuff
'interface',
'dictionary',
'enum',
'callback',
'union',
'sequence',
'array'
)
def __init__(self, location, name):
IDLObject.__init__(self, location)
self.name = name
self.builtin = False
def __eq__(self, other):
return other and self.builtin == other.builtin and self.name == other.name
def __ne__(self, other):
return not self == other
def __str__(self):
return str(self.name)
def isType(self):
return True
def nullable(self):
return False
def isPrimitive(self):
return False
def isBoolean(self):
return False
def isNumeric(self):
return False
def isString(self):
return False
def isByteString(self):
return False
def isDOMString(self):
return False
def isVoid(self):
return self.name == "Void"
def isSequence(self):
return False
def isArray(self):
return False
def isArrayBuffer(self):
return False
def isArrayBufferView(self):
return False
def isTypedArray(self):
return False
def isCallbackInterface(self):
return False
def isNonCallbackInterface(self):
return False
def isGeckoInterface(self):
""" Returns a boolean indicating whether this type is an 'interface'
type that is implemented in Gecko. At the moment, this returns
true for all interface types that are not types from the TypedArray
spec."""
return self.isInterface() and not self.isSpiderMonkeyInterface()
def isSpiderMonkeyInterface(self):
""" Returns a boolean indicating whether this type is an 'interface'
type that is implemented in Spidermonkey. At the moment, this
only returns true for the types from the TypedArray spec. """
return self.isInterface() and (self.isArrayBuffer() or \
self.isArrayBufferView() or \
self.isTypedArray())
def isDictionary(self):
return False
def isInterface(self):
return False
def isAny(self):
return self.tag() == IDLType.Tags.any
def isDate(self):
return self.tag() == IDLType.Tags.date
def isObject(self):
return self.tag() == IDLType.Tags.object
def isPromise(self):
return False
def isComplete(self):
return True
def includesRestrictedFloat(self):
return False
def isFloat(self):
return False
def isUnrestricted(self):
# Should only call this on float types
assert self.isFloat()
def isSerializable(self):
return False
def tag(self):
assert False # Override me!
def treatNonCallableAsNull(self):
assert self.tag() == IDLType.Tags.callback
return self.nullable() and self.inner._treatNonCallableAsNull
def treatNonObjectAsNull(self):
assert self.tag() == IDLType.Tags.callback
return self.nullable() and self.inner._treatNonObjectAsNull
def addExtendedAttributes(self, attrs):
assert len(attrs) == 0
def resolveType(self, parentScope):
pass
def unroll(self):
return self
def isDistinguishableFrom(self, other):
raise TypeError("Can't tell whether a generic type is or is not "
"distinguishable from other things")
class IDLUnresolvedType(IDLType):
"""
Unresolved types are interface types
"""
def __init__(self, location, name):
IDLType.__init__(self, location, name)
def isComplete(self):
return False
def complete(self, scope):
obj = None
try:
obj = scope._lookupIdentifier(self.name)
except:
raise WebIDLError("Unresolved type '%s'." % self.name,
[self.location])
assert obj
if obj.isType():
# obj itself might not be complete; deal with that.
assert obj != self
if not obj.isComplete():
obj = obj.complete(scope)
return obj
name = self.name.resolve(scope, None)
return IDLWrapperType(self.location, obj)
def isDistinguishableFrom(self, other):
raise TypeError("Can't tell whether an unresolved type is or is not "
"distinguishable from other things")
class IDLNullableType(IDLType):
def __init__(self, location, innerType):
assert not innerType.isVoid()
assert not innerType == BuiltinTypes[IDLBuiltinType.Types.any]
IDLType.__init__(self, location, innerType.name)
self.inner = innerType
self.builtin = False
def __eq__(self, other):
return isinstance(other, IDLNullableType) and self.inner == other.inner
def __str__(self):
return self.inner.__str__() + "OrNull"
def nullable(self):
return True
def isCallback(self):
return self.inner.isCallback()
def isPrimitive(self):
return self.inner.isPrimitive()
def isBoolean(self):
return self.inner.isBoolean()
def isNumeric(self):
return self.inner.isNumeric()
def isString(self):
return self.inner.isString()
def isByteString(self):
return self.inner.isByteString()
def isDOMString(self):
return self.inner.isDOMString()
def isFloat(self):
return self.inner.isFloat()
def isUnrestricted(self):
return self.inner.isUnrestricted()
def includesRestrictedFloat(self):
return self.inner.includesRestrictedFloat()
def isInteger(self):
return self.inner.isInteger()
def isVoid(self):
return False
def isSequence(self):
return self.inner.isSequence()
def isArray(self):
return self.inner.isArray()
def isArrayBuffer(self):
return self.inner.isArrayBuffer()
def isArrayBufferView(self):
return self.inner.isArrayBufferView()
def isTypedArray(self):
return self.inner.isTypedArray()
def isDictionary(self):
return self.inner.isDictionary()
def isInterface(self):
return self.inner.isInterface()
def isCallbackInterface(self):
return self.inner.isCallbackInterface()
def isNonCallbackInterface(self):
return self.inner.isNonCallbackInterface()
def isEnum(self):
return self.inner.isEnum()
def isUnion(self):
return self.inner.isUnion()
def isSerializable(self):
return self.inner.isSerializable()
def tag(self):
return self.inner.tag()
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.inner.resolveType(parentScope)
def isComplete(self):
return self.inner.isComplete()
def complete(self, scope):
self.inner = self.inner.complete(scope)
if self.inner.nullable():
raise WebIDLError("The inner type of a nullable type must not be "
"a nullable type",
[self.location, self.inner.location])
if self.inner.isUnion():
if self.inner.hasNullableType:
raise WebIDLError("The inner type of a nullable type must not "
"be a union type that itself has a nullable "
"type as a member type", [self.location])
self.name = self.inner.name
return self
def unroll(self):
return self.inner.unroll()
def isDistinguishableFrom(self, other):
if (other.nullable() or (other.isUnion() and other.hasNullableType) or
other.isDictionary()):
# Can't tell which type null should become
return False
return self.inner.isDistinguishableFrom(other)
def _getDependentObjects(self):
return self.inner._getDependentObjects()
class IDLSequenceType(IDLType):
def __init__(self, location, parameterType):
assert not parameterType.isVoid()
IDLType.__init__(self, location, parameterType.name)
self.inner = parameterType
self.builtin = False
def __eq__(self, other):
return isinstance(other, IDLSequenceType) and self.inner == other.inner
def __str__(self):
return self.inner.__str__() + "Sequence"
def nullable(self):
return False
def isPrimitive(self):
return False;
def isString(self):
return False;
def isByteString(self):
return False
def isDOMString(self):
return False
def isVoid(self):
return False
def isSequence(self):
return True
def isArray(self):
return False
def isDictionary(self):
return False
def isInterface(self):
return False
def isEnum(self):
return False
def isSerializable(self):
return self.inner.isSerializable()
def includesRestrictedFloat(self):
return self.inner.includesRestrictedFloat()
def tag(self):
return IDLType.Tags.sequence
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.inner.resolveType(parentScope)
def isComplete(self):
return self.inner.isComplete()
def complete(self, scope):
self.inner = self.inner.complete(scope)
self.name = self.inner.name
return self
def unroll(self):
return self.inner.unroll()
def isDistinguishableFrom(self, other):
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isDate() or other.isNonCallbackInterface())
def _getDependentObjects(self):
return self.inner._getDependentObjects()
class IDLUnionType(IDLType):
def __init__(self, location, memberTypes):
IDLType.__init__(self, location, "")
self.memberTypes = memberTypes
self.hasNullableType = False
self.hasDictionaryType = False
self.flatMemberTypes = None
self.builtin = False
def __eq__(self, other):
return isinstance(other, IDLUnionType) and self.memberTypes == other.memberTypes
def isVoid(self):
return False
def isUnion(self):
return True
def isSerializable(self):
return all(m.isSerializable() for m in self.memberTypes)
def includesRestrictedFloat(self):
return any(t.includesRestrictedFloat() for t in self.memberTypes)
def tag(self):
return IDLType.Tags.union
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
for t in self.memberTypes:
t.resolveType(parentScope)
def isComplete(self):
return self.flatMemberTypes is not None
def complete(self, scope):
def typeName(type):
if isinstance(type, IDLNullableType):
return typeName(type.inner) + "OrNull"
if isinstance(type, IDLWrapperType):
return typeName(type._identifier.object())
if isinstance(type, IDLObjectWithIdentifier):
return typeName(type.identifier)
if isinstance(type, IDLType) and (type.isArray() or type.isSequence()):
return str(type)
return type.name
for (i, type) in enumerate(self.memberTypes):
if not type.isComplete():
self.memberTypes[i] = type.complete(scope)
self.name = "Or".join(typeName(type) for type in self.memberTypes)
self.flatMemberTypes = list(self.memberTypes)
i = 0
while i < len(self.flatMemberTypes):
if self.flatMemberTypes[i].nullable():
if self.hasNullableType:
raise WebIDLError("Can't have more than one nullable types in a union",
[nullableType.location, self.flatMemberTypes[i].location])
if self.hasDictionaryType:
raise WebIDLError("Can't have a nullable type and a "
"dictionary type in a union",
[dictionaryType.location,
self.flatMemberTypes[i].location])
self.hasNullableType = True
nullableType = self.flatMemberTypes[i]
self.flatMemberTypes[i] = self.flatMemberTypes[i].inner
continue
if self.flatMemberTypes[i].isDictionary():
if self.hasNullableType:
raise WebIDLError("Can't have a nullable type and a "
"dictionary type in a union",
[nullableType.location,
self.flatMemberTypes[i].location])
self.hasDictionaryType = True
dictionaryType = self.flatMemberTypes[i]
elif self.flatMemberTypes[i].isUnion():
self.flatMemberTypes[i:i + 1] = self.flatMemberTypes[i].memberTypes
continue
i += 1
for (i, t) in enumerate(self.flatMemberTypes[:-1]):
for u in self.flatMemberTypes[i + 1:]:
if not t.isDistinguishableFrom(u):
raise WebIDLError("Flat member types of a union should be "
"distinguishable, " + str(t) + " is not "
"distinguishable from " + str(u),
[self.location, t.location, u.location])
return self
def isDistinguishableFrom(self, other):
if self.hasNullableType and other.nullable():
# Can't tell which type null should become
return False
if other.isUnion():
otherTypes = other.unroll().memberTypes
else:
otherTypes = [other]
# For every type in otherTypes, check that it's distinguishable from
# every type in our types
for u in otherTypes:
if any(not t.isDistinguishableFrom(u) for t in self.memberTypes):
return False
return True
def _getDependentObjects(self):
return set(self.memberTypes)
class IDLArrayType(IDLType):
def __init__(self, location, parameterType):
assert not parameterType.isVoid()
if parameterType.isSequence():
raise WebIDLError("Array type cannot parameterize over a sequence type",
[location])
if parameterType.isDictionary():
raise WebIDLError("Array type cannot parameterize over a dictionary type",
[location])
IDLType.__init__(self, location, parameterType.name)
self.inner = parameterType
self.builtin = False
def __eq__(self, other):
return isinstance(other, IDLArrayType) and self.inner == other.inner
def __str__(self):
return self.inner.__str__() + "Array"
def nullable(self):
return False
def isPrimitive(self):
return False
def isString(self):
return False
def isByteString(self):
return False
def isDOMString(self):
return False
def isVoid(self):
return False
def isSequence(self):
assert not self.inner.isSequence()
return False
def isArray(self):
return True
def isDictionary(self):
assert not self.inner.isDictionary()
return False
def isInterface(self):
return False
def isEnum(self):
return False
def tag(self):
return IDLType.Tags.array
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.inner.resolveType(parentScope)
def isComplete(self):
return self.inner.isComplete()
def complete(self, scope):
self.inner = self.inner.complete(scope)
self.name = self.inner.name
if self.inner.isDictionary():
raise WebIDLError("Array type must not contain "
"dictionary as element type.",
[self.inner.location])
assert not self.inner.isSequence()
return self
def unroll(self):
return self.inner.unroll()
def isDistinguishableFrom(self, other):
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isDate() or other.isNonCallbackInterface())
def _getDependentObjects(self):
return self.inner._getDependentObjects()
class IDLTypedefType(IDLType, IDLObjectWithIdentifier):
def __init__(self, location, innerType, name):
IDLType.__init__(self, location, innerType.name)
identifier = IDLUnresolvedIdentifier(location, name)
IDLObjectWithIdentifier.__init__(self, location, None, identifier)
self.inner = innerType
self.name = name
self.builtin = False
def __eq__(self, other):
return isinstance(other, IDLTypedefType) and self.inner == other.inner
def __str__(self):
return self.identifier.name
def nullable(self):
return self.inner.nullable()
def isPrimitive(self):
return self.inner.isPrimitive()
def isBoolean(self):
return self.inner.isBoolean()
def isNumeric(self):
return self.inner.isNumeric()
def isString(self):
return self.inner.isString()
def isByteString(self):
return self.inner.isByteString()
def isDOMString(self):
return self.inner.isDOMString()
def isVoid(self):
return self.inner.isVoid()
def isSequence(self):
return self.inner.isSequence()
def isArray(self):
return self.inner.isArray()
def isDictionary(self):
return self.inner.isDictionary()
def isArrayBuffer(self):
return self.inner.isArrayBuffer()
def isArrayBufferView(self):
return self.inner.isArrayBufferView()
def isTypedArray(self):
return self.inner.isTypedArray()
def isInterface(self):
return self.inner.isInterface()
def isCallbackInterface(self):
return self.inner.isCallbackInterface()
def isNonCallbackInterface(self):
return self.inner.isNonCallbackInterface()
def isComplete(self):
return False
def complete(self, parentScope):
if not self.inner.isComplete():
self.inner = self.inner.complete(parentScope)
assert self.inner.isComplete()
return self.inner
def finish(self, parentScope):
# Maybe the IDLObjectWithIdentifier for the typedef should be
# a separate thing from the type? If that happens, we can
# remove some hackery around avoiding isInterface() in
# Configuration.py.
self.complete(parentScope)
def validate(self):
pass
# Do we need a resolveType impl? I don't think it's particularly useful....
def tag(self):
return self.inner.tag()
def unroll(self):
return self.inner.unroll()
def isDistinguishableFrom(self, other):
return self.inner.isDistinguishableFrom(other)
def _getDependentObjects(self):
return self.inner._getDependentObjects()
class IDLWrapperType(IDLType):
def __init__(self, location, inner):
IDLType.__init__(self, location, inner.identifier.name)
self.inner = inner
self._identifier = inner.identifier
self.builtin = False
def __eq__(self, other):
return isinstance(other, IDLWrapperType) and \
self._identifier == other._identifier and \
self.builtin == other.builtin
def __str__(self):
return str(self.name) + " (Wrapper)"
def nullable(self):
return False
def isPrimitive(self):
return False
def isString(self):
return False
def isByteString(self):
return False
def isDOMString(self):
return False
def isVoid(self):
return False
def isSequence(self):
return False
def isArray(self):
return False
def isDictionary(self):
return isinstance(self.inner, IDLDictionary)
def isInterface(self):
return isinstance(self.inner, IDLInterface) or \
isinstance(self.inner, IDLExternalInterface)
def isCallbackInterface(self):
return self.isInterface() and self.inner.isCallback()
def isNonCallbackInterface(self):
return self.isInterface() and not self.inner.isCallback()
def isEnum(self):
return isinstance(self.inner, IDLEnum)
def isPromise(self):
return isinstance(self.inner, IDLInterface) and \
self.inner.identifier.name == "Promise"
def isSerializable(self):
if self.isInterface():
if self.inner.isExternal():
return False
return any(m.isMethod() and m.isJsonifier() for m in self.inner.members)
elif self.isEnum():
return True
elif self.isDictionary():
return all(m.type.isSerializable() for m in self.inner.members)
else:
raise WebIDLError("IDLWrapperType wraps type %s that we don't know if "
"is serializable" % type(self.inner), [self.location])
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.inner.resolve(parentScope)
def isComplete(self):
return True
def tag(self):
if self.isInterface():
return IDLType.Tags.interface
elif self.isEnum():
return IDLType.Tags.enum
elif self.isDictionary():
return IDLType.Tags.dictionary
else:
assert False
def isDistinguishableFrom(self, other):
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
assert self.isInterface() or self.isEnum() or self.isDictionary()
if self.isEnum():
return (other.isPrimitive() or other.isInterface() or other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isArray() or
other.isDate())
if self.isDictionary() and other.nullable():
return False
if other.isPrimitive() or other.isString() or other.isEnum() or other.isDate():
return True
if self.isDictionary():
return other.isNonCallbackInterface()
assert self.isInterface()
if other.isInterface():
if other.isSpiderMonkeyInterface():
# Just let |other| handle things
return other.isDistinguishableFrom(self)
assert self.isGeckoInterface() and other.isGeckoInterface()
if self.inner.isExternal() or other.unroll().inner.isExternal():
return self != other
return (len(self.inner.interfacesBasedOnSelf &
other.unroll().inner.interfacesBasedOnSelf) == 0 and
(self.isNonCallbackInterface() or
other.isNonCallbackInterface()))
if (other.isDictionary() or other.isCallback() or
other.isSequence() or other.isArray()):
return self.isNonCallbackInterface()
# Not much else |other| can be
assert other.isObject()
return False
def _getDependentObjects(self):
# NB: The codegen for an interface type depends on
# a) That the identifier is in fact an interface (as opposed to
# a dictionary or something else).
# b) The native type of the interface.
# If we depend on the interface object we will also depend on
# anything the interface depends on which is undesirable. We
# considered implementing a dependency just on the interface type
# file, but then every modification to an interface would cause this
# to be regenerated which is still undesirable. We decided not to
# depend on anything, reasoning that:
# 1) Changing the concrete type of the interface requires modifying
# Bindings.conf, which is still a global dependency.
# 2) Changing an interface to a dictionary (or vice versa) with the
# same identifier should be incredibly rare.
return set()
class IDLBuiltinType(IDLType):
Types = enum(
# The integer types
'byte',
'octet',
'short',
'unsigned_short',
'long',
'unsigned_long',
'long_long',
'unsigned_long_long',
# Additional primitive types
'boolean',
'unrestricted_float',
'float',
'unrestricted_double',
# IMPORTANT: "double" must be the last primitive type listed
'double',
# Other types
'any',
'domstring',
'bytestring',
'object',
'date',
'void',
# Funny stuff
'ArrayBuffer',
'ArrayBufferView',
'Int8Array',
'Uint8Array',
'Uint8ClampedArray',
'Int16Array',
'Uint16Array',
'Int32Array',
'Uint32Array',
'Float32Array',
'Float64Array'
)
TagLookup = {
Types.byte: IDLType.Tags.int8,
Types.octet: IDLType.Tags.uint8,
Types.short: IDLType.Tags.int16,
Types.unsigned_short: IDLType.Tags.uint16,
Types.long: IDLType.Tags.int32,
Types.unsigned_long: IDLType.Tags.uint32,
Types.long_long: IDLType.Tags.int64,
Types.unsigned_long_long: IDLType.Tags.uint64,
Types.boolean: IDLType.Tags.bool,
Types.unrestricted_float: IDLType.Tags.unrestricted_float,
Types.float: IDLType.Tags.float,
Types.unrestricted_double: IDLType.Tags.unrestricted_double,
Types.double: IDLType.Tags.double,
Types.any: IDLType.Tags.any,
Types.domstring: IDLType.Tags.domstring,
Types.bytestring: IDLType.Tags.bytestring,
Types.object: IDLType.Tags.object,
Types.date: IDLType.Tags.date,
Types.void: IDLType.Tags.void,
Types.ArrayBuffer: IDLType.Tags.interface,
Types.ArrayBufferView: IDLType.Tags.interface,
Types.Int8Array: IDLType.Tags.interface,
Types.Uint8Array: IDLType.Tags.interface,
Types.Uint8ClampedArray: IDLType.Tags.interface,
Types.Int16Array: IDLType.Tags.interface,
Types.Uint16Array: IDLType.Tags.interface,
Types.Int32Array: IDLType.Tags.interface,
Types.Uint32Array: IDLType.Tags.interface,
Types.Float32Array: IDLType.Tags.interface,
Types.Float64Array: IDLType.Tags.interface
}
def __init__(self, location, name, type):
IDLType.__init__(self, location, name)
self.builtin = True
self._typeTag = type
def isPrimitive(self):
return self._typeTag <= IDLBuiltinType.Types.double
def isBoolean(self):
return self._typeTag == IDLBuiltinType.Types.boolean
def isNumeric(self):
return self.isPrimitive() and not self.isBoolean()
def isString(self):
return self._typeTag == IDLBuiltinType.Types.domstring or \
self._typeTag == IDLBuiltinType.Types.bytestring
def isByteString(self):
return self._typeTag == IDLBuiltinType.Types.bytestring
def isDOMString(self):
return self._typeTag == IDLBuiltinType.Types.domstring
def isInteger(self):
return self._typeTag <= IDLBuiltinType.Types.unsigned_long_long
def isArrayBuffer(self):
return self._typeTag == IDLBuiltinType.Types.ArrayBuffer
def isArrayBufferView(self):
return self._typeTag == IDLBuiltinType.Types.ArrayBufferView
def isTypedArray(self):
return self._typeTag >= IDLBuiltinType.Types.Int8Array and \
self._typeTag <= IDLBuiltinType.Types.Float64Array
def isInterface(self):
# TypedArray things are interface types per the TypedArray spec,
# but we handle them as builtins because SpiderMonkey implements
# all of it internally.
return self.isArrayBuffer() or \
self.isArrayBufferView() or \
self.isTypedArray()
def isNonCallbackInterface(self):
# All the interfaces we can be are non-callback
return self.isInterface()
def isFloat(self):
return self._typeTag == IDLBuiltinType.Types.float or \
self._typeTag == IDLBuiltinType.Types.double or \
self._typeTag == IDLBuiltinType.Types.unrestricted_float or \
self._typeTag == IDLBuiltinType.Types.unrestricted_double
def isUnrestricted(self):
assert self.isFloat()
return self._typeTag == IDLBuiltinType.Types.unrestricted_float or \
self._typeTag == IDLBuiltinType.Types.unrestricted_double
def isSerializable(self):
return self.isPrimitive() or self.isDOMString() or self.isDate()
def includesRestrictedFloat(self):
return self.isFloat() and not self.isUnrestricted()
def tag(self):
return IDLBuiltinType.TagLookup[self._typeTag]
def isDistinguishableFrom(self, other):
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
if self.isBoolean():
return (other.isNumeric() or other.isString() or other.isEnum() or
other.isInterface() or other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isArray() or
other.isDate())
if self.isNumeric():
return (other.isBoolean() or other.isString() or other.isEnum() or
other.isInterface() or other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isArray() or
other.isDate())
if self.isString():
return (other.isPrimitive() or other.isInterface() or
other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isArray() or
other.isDate())
if self.isAny():
# Can't tell "any" apart from anything
return False
if self.isObject():
return other.isPrimitive() or other.isString() or other.isEnum()
if self.isDate():
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isInterface() or other.isCallback() or
other.isDictionary() or other.isSequence() or
other.isArray())
if self.isVoid():
return not other.isVoid()
# Not much else we could be!
assert self.isSpiderMonkeyInterface()
# Like interfaces, but we know we're not a callback
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isArray() or other.isDate() or
(other.isInterface() and (
# ArrayBuffer is distinguishable from everything
# that's not an ArrayBuffer or a callback interface
(self.isArrayBuffer() and not other.isArrayBuffer()) or
# ArrayBufferView is distinguishable from everything
# that's not an ArrayBufferView or typed array.
(self.isArrayBufferView() and not other.isArrayBufferView() and
not other.isTypedArray()) or
# Typed arrays are distinguishable from everything
# except ArrayBufferView and the same type of typed
# array
(self.isTypedArray() and not other.isArrayBufferView() and not
(other.isTypedArray() and other.name == self.name)))))
def _getDependentObjects(self):
return set()
BuiltinTypes = {
IDLBuiltinType.Types.byte:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Byte",
IDLBuiltinType.Types.byte),
IDLBuiltinType.Types.octet:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Octet",
IDLBuiltinType.Types.octet),
IDLBuiltinType.Types.short:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Short",
IDLBuiltinType.Types.short),
IDLBuiltinType.Types.unsigned_short:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedShort",
IDLBuiltinType.Types.unsigned_short),
IDLBuiltinType.Types.long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Long",
IDLBuiltinType.Types.long),
IDLBuiltinType.Types.unsigned_long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedLong",
IDLBuiltinType.Types.unsigned_long),
IDLBuiltinType.Types.long_long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "LongLong",
IDLBuiltinType.Types.long_long),
IDLBuiltinType.Types.unsigned_long_long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedLongLong",
IDLBuiltinType.Types.unsigned_long_long),
IDLBuiltinType.Types.boolean:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Boolean",
IDLBuiltinType.Types.boolean),
IDLBuiltinType.Types.float:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Float",
IDLBuiltinType.Types.float),
IDLBuiltinType.Types.unrestricted_float:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnrestrictedFloat",
IDLBuiltinType.Types.unrestricted_float),
IDLBuiltinType.Types.double:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Double",
IDLBuiltinType.Types.double),
IDLBuiltinType.Types.unrestricted_double:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnrestrictedDouble",
IDLBuiltinType.Types.unrestricted_double),
IDLBuiltinType.Types.any:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Any",
IDLBuiltinType.Types.any),
IDLBuiltinType.Types.domstring:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "String",
IDLBuiltinType.Types.domstring),
IDLBuiltinType.Types.bytestring:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "ByteString",
IDLBuiltinType.Types.bytestring),
IDLBuiltinType.Types.object:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Object",
IDLBuiltinType.Types.object),
IDLBuiltinType.Types.date:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Date",
IDLBuiltinType.Types.date),
IDLBuiltinType.Types.void:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Void",
IDLBuiltinType.Types.void),
IDLBuiltinType.Types.ArrayBuffer:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "ArrayBuffer",
IDLBuiltinType.Types.ArrayBuffer),
IDLBuiltinType.Types.ArrayBufferView:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "ArrayBufferView",
IDLBuiltinType.Types.ArrayBufferView),
IDLBuiltinType.Types.Int8Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Int8Array",
IDLBuiltinType.Types.Int8Array),
IDLBuiltinType.Types.Uint8Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint8Array",
IDLBuiltinType.Types.Uint8Array),
IDLBuiltinType.Types.Uint8ClampedArray:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint8ClampedArray",
IDLBuiltinType.Types.Uint8ClampedArray),
IDLBuiltinType.Types.Int16Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Int16Array",
IDLBuiltinType.Types.Int16Array),
IDLBuiltinType.Types.Uint16Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint16Array",
IDLBuiltinType.Types.Uint16Array),
IDLBuiltinType.Types.Int32Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Int32Array",
IDLBuiltinType.Types.Int32Array),
IDLBuiltinType.Types.Uint32Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint32Array",
IDLBuiltinType.Types.Uint32Array),
IDLBuiltinType.Types.Float32Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Float32Array",
IDLBuiltinType.Types.Float32Array),
IDLBuiltinType.Types.Float64Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Float64Array",
IDLBuiltinType.Types.Float64Array)
}
integerTypeSizes = {
IDLBuiltinType.Types.byte: (-128, 127),
IDLBuiltinType.Types.octet: (0, 255),
IDLBuiltinType.Types.short: (-32768, 32767),
IDLBuiltinType.Types.unsigned_short: (0, 65535),
IDLBuiltinType.Types.long: (-2147483648, 2147483647),
IDLBuiltinType.Types.unsigned_long: (0, 4294967295),
IDLBuiltinType.Types.long_long: (-9223372036854775808,
9223372036854775807),
IDLBuiltinType.Types.unsigned_long_long: (0, 18446744073709551615)
}
def matchIntegerValueToType(value):
for type, extremes in integerTypeSizes.items():
(min, max) = extremes
if value <= max and value >= min:
return BuiltinTypes[type]
return None
class IDLValue(IDLObject):
def __init__(self, location, type, value):
IDLObject.__init__(self, location)
self.type = type
assert isinstance(type, IDLType)
self.value = value
def coerceToType(self, type, location):
if type == self.type:
return self # Nothing to do
# We first check for unions to ensure that even if the union is nullable
# we end up with the right flat member type, not the union's type.
if type.isUnion():
# We use the flat member types here, because if we have a nullable
# member type, or a nested union, we want the type the value
# actually coerces to, not the nullable or nested union type.
for subtype in type.unroll().flatMemberTypes:
try:
coercedValue = self.coerceToType(subtype, location)
# Create a new IDLValue to make sure that we have the
# correct float/double type. This is necessary because we
# use the value's type when it is a default value of a
# union, and the union cares about the exact float type.
return IDLValue(self.location, subtype, coercedValue.value)
except:
pass
# If the type allows null, rerun this matching on the inner type, except
# nullable enums. We handle those specially, because we want our
# default string values to stay strings even when assigned to a nullable
# enum.
elif type.nullable() and not type.isEnum():
innerValue = self.coerceToType(type.inner, location)
return IDLValue(self.location, type, innerValue.value)
elif self.type.isInteger() and type.isInteger():
# We're both integer types. See if we fit.
(min, max) = integerTypeSizes[type._typeTag]
if self.value <= max and self.value >= min:
# Promote
return IDLValue(self.location, type, self.value)
else:
raise WebIDLError("Value %s is out of range for type %s." %
(self.value, type), [location])
elif self.type.isInteger() and type.isFloat():
# Convert an integer literal into float
if -2**24 <= self.value <= 2**24:
floatType = BuiltinTypes[IDLBuiltinType.Types.float]
return IDLValue(self.location, floatType, float(self.value))
else:
raise WebIDLError("Converting value %s to %s will lose precision." %
(self.value, type), [location])
elif self.type.isString() and type.isEnum():
# Just keep our string, but make sure it's a valid value for this enum
enum = type.unroll().inner
if self.value not in enum.values():
raise WebIDLError("'%s' is not a valid default value for enum %s"
% (self.value, enum.identifier.name),
[location, enum.location])
return self
elif self.type.isFloat() and type.isFloat():
if (not type.isUnrestricted() and
(self.value == float("inf") or self.value == float("-inf") or
math.isnan(self.value))):
raise WebIDLError("Trying to convert unrestricted value %s to non-unrestricted"
% self.value, [location]);
return self
raise WebIDLError("Cannot coerce type %s to type %s." %
(self.type, type), [location])
def _getDependentObjects(self):
return set()
class IDLNullValue(IDLObject):
def __init__(self, location):
IDLObject.__init__(self, location)
self.type = None
self.value = None
def coerceToType(self, type, location):
if (not isinstance(type, IDLNullableType) and
not (type.isUnion() and type.hasNullableType) and
not (type.isUnion() and type.hasDictionaryType) and
not type.isDictionary() and
not type.isAny()):
raise WebIDLError("Cannot coerce null value to type %s." % type,
[location])
nullValue = IDLNullValue(self.location)
if type.isUnion() and not type.nullable() and type.hasDictionaryType:
# We're actually a default value for the union's dictionary member.
# Use its type.
for t in type.flatMemberTypes:
if t.isDictionary():
nullValue.type = t
return nullValue
nullValue.type = type
return nullValue
def _getDependentObjects(self):
return set()
class IDLUndefinedValue(IDLObject):
def __init__(self, location):
IDLObject.__init__(self, location)
self.type = None
self.value = None
def coerceToType(self, type, location):
if not type.isAny():
raise WebIDLError("Cannot coerce undefined value to type %s." % type,
[location])
undefinedValue = IDLUndefinedValue(self.location)
undefinedValue.type = type
return undefinedValue
def _getDependentObjects(self):
return set()
class IDLInterfaceMember(IDLObjectWithIdentifier):
Tags = enum(
'Const',
'Attr',
'Method'
)
Special = enum(
'Static',
'Stringifier'
)
def __init__(self, location, identifier, tag):
IDLObjectWithIdentifier.__init__(self, location, None, identifier)
self.tag = tag
self._extendedAttrDict = {}
def isMethod(self):
return self.tag == IDLInterfaceMember.Tags.Method
def isAttr(self):
return self.tag == IDLInterfaceMember.Tags.Attr
def isConst(self):
return self.tag == IDLInterfaceMember.Tags.Const
def addExtendedAttributes(self, attrs):
for attr in attrs:
self.handleExtendedAttribute(attr)
attrlist = attr.listValue()
self._extendedAttrDict[attr.identifier()] = attrlist if len(attrlist) else True
def handleExtendedAttribute(self, attr):
pass
def getExtendedAttribute(self, name):
return self._extendedAttrDict.get(name, None)
class IDLConst(IDLInterfaceMember):
def __init__(self, location, identifier, type, value):
IDLInterfaceMember.__init__(self, location, identifier,
IDLInterfaceMember.Tags.Const)
assert isinstance(type, IDLType)
if type.isDictionary():
raise WebIDLError("A constant cannot be of a dictionary type",
[self.location])
self.type = type
self.value = value
if identifier.name == "prototype":
raise WebIDLError("The identifier of a constant must not be 'prototype'",
[location])
def __str__(self):
return "'%s' const '%s'" % (self.type, self.identifier)
def finish(self, scope):
if not self.type.isComplete():
type = self.type.complete(scope)
if not type.isPrimitive() and not type.isString():
locations = [self.type.location, type.location]
try:
locations.append(type.inner.location)
except:
pass
raise WebIDLError("Incorrect type for constant", locations)
self.type = type
# The value might not match the type
coercedValue = self.value.coerceToType(self.type, self.location)
assert coercedValue
self.value = coercedValue
def validate(self):
pass
def _getDependentObjects(self):
return set([self.type, self.value])
class IDLAttribute(IDLInterfaceMember):
def __init__(self, location, identifier, type, readonly, inherit=False,
static=False, stringifier=False):
IDLInterfaceMember.__init__(self, location, identifier,
IDLInterfaceMember.Tags.Attr)
assert isinstance(type, IDLType)
self.type = type
self.readonly = readonly
self.inherit = inherit
self.static = static
self.lenientThis = False
self._unforgeable = False
self.stringifier = stringifier
self.enforceRange = False
self.clamp = False
self.slotIndex = None
if static and identifier.name == "prototype":
raise WebIDLError("The identifier of a static attribute must not be 'prototype'",
[location])
if readonly and inherit:
raise WebIDLError("An attribute cannot be both 'readonly' and 'inherit'",
[self.location])
def isStatic(self):
return self.static
def __str__(self):
return "'%s' attribute '%s'" % (self.type, self.identifier)
def finish(self, scope):
if not self.type.isComplete():
t = self.type.complete(scope)
assert not isinstance(t, IDLUnresolvedType)
assert not isinstance(t, IDLTypedefType)
assert not isinstance(t.name, IDLUnresolvedIdentifier)
self.type = t
if self.type.isDictionary() and not self.getExtendedAttribute("Cached"):
raise WebIDLError("An attribute cannot be of a dictionary type",
[self.location])
if self.type.isSequence() and not self.getExtendedAttribute("Cached"):
raise WebIDLError("A non-cached attribute cannot be of a sequence "
"type", [self.location])
if self.type.isUnion():
for f in self.type.unroll().flatMemberTypes:
if f.isDictionary():
raise WebIDLError("An attribute cannot be of a union "
"type if one of its member types (or "
"one of its member types's member "
"types, and so on) is a dictionary "
"type", [self.location, f.location])
if f.isSequence():
raise WebIDLError("An attribute cannot be of a union "
"type if one of its member types (or "
"one of its member types's member "
"types, and so on) is a sequence "
"type", [self.location, f.location])
if not self.type.isInterface() and self.getExtendedAttribute("PutForwards"):
raise WebIDLError("An attribute with [PutForwards] must have an "
"interface type as its type", [self.location])
if not self.type.isInterface() and self.getExtendedAttribute("SameObject"):
raise WebIDLError("An attribute with [SameObject] must have an "
"interface type as its type", [self.location])
def validate(self):
if ((self.getExtendedAttribute("Cached") or
self.getExtendedAttribute("StoreInSlot")) and
not self.getExtendedAttribute("Constant") and
not self.getExtendedAttribute("Pure")):
raise WebIDLError("Cached attributes and attributes stored in "
"slots must be constant or pure, since the "
"getter won't always be called.",
[self.location])
if self.getExtendedAttribute("Frozen"):
if not self.type.isSequence() and not self.type.isDictionary():
raise WebIDLError("[Frozen] is only allowed on sequence-valued "
"and dictionary-valued attributes",
[self.location])
def handleExtendedAttribute(self, attr):
identifier = attr.identifier()
if identifier == "SetterThrows" and self.readonly:
raise WebIDLError("Readonly attributes must not be flagged as "
"[SetterThrows]",
[self.location])
elif (((identifier == "Throws" or identifier == "GetterThrows") and
self.getExtendedAttribute("StoreInSlot")) or
(identifier == "StoreInSlot" and
(self.getExtendedAttribute("Throws") or
self.getExtendedAttribute("GetterThrows")))):
raise WebIDLError("Throwing things can't be [Pure] or [Constant] "
"or [SameObject] or [StoreInSlot]",
[attr.location])
elif identifier == "LenientThis":
if not attr.noArguments():
raise WebIDLError("[LenientThis] must take no arguments",
[attr.location])
if self.isStatic():
raise WebIDLError("[LenientThis] is only allowed on non-static "
"attributes", [attr.location, self.location])
if self.getExtendedAttribute("CrossOriginReadable"):
raise WebIDLError("[LenientThis] is not allowed in combination "
"with [CrossOriginReadable]",
[attr.location, self.location])
if self.getExtendedAttribute("CrossOriginWritable"):
raise WebIDLError("[LenientThis] is not allowed in combination "
"with [CrossOriginWritable]",
[attr.location, self.location])
self.lenientThis = True
elif identifier == "Unforgeable":
if not self.readonly:
raise WebIDLError("[Unforgeable] is only allowed on readonly "
"attributes", [attr.location, self.location])
if self.isStatic():
raise WebIDLError("[Unforgeable] is only allowed on non-static "
"attributes", [attr.location, self.location])
self._unforgeable = True
elif identifier == "SameObject" and not self.readonly:
raise WebIDLError("[SameObject] only allowed on readonly attributes",
[attr.location, self.location])
elif identifier == "Constant" and not self.readonly:
raise WebIDLError("[Constant] only allowed on readonly attributes",
[attr.location, self.location])
elif identifier == "PutForwards":
if not self.readonly:
raise WebIDLError("[PutForwards] is only allowed on readonly "
"attributes", [attr.location, self.location])
if self.isStatic():
raise WebIDLError("[PutForwards] is only allowed on non-static "
"attributes", [attr.location, self.location])
if self.getExtendedAttribute("Replaceable") is not None:
raise WebIDLError("[PutForwards] and [Replaceable] can't both "
"appear on the same attribute",
[attr.location, self.location])
if not attr.hasValue():
raise WebIDLError("[PutForwards] takes an identifier",
[attr.location, self.location])
elif identifier == "Replaceable":
if self.getExtendedAttribute("PutForwards") is not None:
raise WebIDLError("[PutForwards] and [Replaceable] can't both "
"appear on the same attribute",
[attr.location, self.location])
elif identifier == "LenientFloat":
if self.readonly:
raise WebIDLError("[LenientFloat] used on a readonly attribute",
[attr.location, self.location])
if not self.type.includesRestrictedFloat():
raise WebIDLError("[LenientFloat] used on an attribute with a "
"non-restricted-float type",
[attr.location, self.location])
elif identifier == "EnforceRange":
if self.readonly:
raise WebIDLError("[EnforceRange] used on a readonly attribute",
[attr.location, self.location])
self.enforceRange = True
elif identifier == "Clamp":
if self.readonly:
raise WebIDLError("[Clamp] used on a readonly attribute",
[attr.location, self.location])
self.clamp = True
elif identifier == "StoreInSlot":
if self.getExtendedAttribute("Cached"):
raise WebIDLError("[StoreInSlot] and [Cached] must not be "
"specified on the same attribute",
[attr.location, self.location])
elif identifier == "Cached":
if self.getExtendedAttribute("StoreInSlot"):
raise WebIDLError("[Cached] and [StoreInSlot] must not be "
"specified on the same attribute",
[attr.location, self.location])
elif (identifier == "CrossOriginReadable" or
identifier == "CrossOriginWritable"):
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
if self.isStatic():
raise WebIDLError("[%s] is only allowed on non-static "
"attributes" % identifier,
[attr.location, self.location])
if self.getExtendedAttribute("LenientThis"):
raise WebIDLError("[LenientThis] is not allowed in combination "
"with [%s]" % identifier,
[attr.location, self.location])
elif (identifier == "Pref" or
identifier == "SetterThrows" or
identifier == "Pure" or
identifier == "Throws" or
identifier == "GetterThrows" or
identifier == "ChromeOnly" or
identifier == "SameObject" or
identifier == "Constant" or
identifier == "Func" or
identifier == "Frozen" or
identifier == "AvailableIn" or
identifier == "Const" or
identifier == "Value" or
identifier == "BoundsChecked" or
identifier == "NewObject"):
# Known attributes that we don't need to do anything with here
pass
else:
raise WebIDLError("Unknown extended attribute %s on attribute" % identifier,
[attr.location])
IDLInterfaceMember.handleExtendedAttribute(self, attr)
def resolve(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.type.resolveType(parentScope)
IDLObjectWithIdentifier.resolve(self, parentScope)
def addExtendedAttributes(self, attrs):
attrs = self.checkForStringHandlingExtendedAttributes(attrs)
IDLInterfaceMember.addExtendedAttributes(self, attrs)
def hasLenientThis(self):
return self.lenientThis
def isUnforgeable(self):
return self._unforgeable
def _getDependentObjects(self):
return set([self.type])
class IDLArgument(IDLObjectWithIdentifier):
def __init__(self, location, identifier, type, optional=False, defaultValue=None, variadic=False, dictionaryMember=False):
IDLObjectWithIdentifier.__init__(self, location, None, identifier)
assert isinstance(type, IDLType)
self.type = type
self.optional = optional
self.defaultValue = defaultValue
self.variadic = variadic
self.dictionaryMember = dictionaryMember
self._isComplete = False
self.enforceRange = False
self.clamp = False
self._allowTreatNonCallableAsNull = False
self._extraAttributes = {}
assert not variadic or optional
def getExtendedAttribute(self, name):
return self._extraAttributes.get(name)
def addExtendedAttributes(self, attrs):
attrs = self.checkForStringHandlingExtendedAttributes(
attrs,
isDictionaryMember=self.dictionaryMember,
isOptional=self.optional)
for attribute in attrs:
identifier = attribute.identifier()
if identifier == "Clamp":
if not attribute.noArguments():
raise WebIDLError("[Clamp] must take no arguments",
[attribute.location])
if self.enforceRange:
raise WebIDLError("[EnforceRange] and [Clamp] are mutually exclusive",
[self.location]);
self.clamp = True
elif identifier == "EnforceRange":
if not attribute.noArguments():
raise WebIDLError("[EnforceRange] must take no arguments",
[attribute.location])
if self.clamp:
raise WebIDLError("[EnforceRange] and [Clamp] are mutually exclusive",
[self.location]);
self.enforceRange = True
elif identifier == "TreatNonCallableAsNull":
self._allowTreatNonCallableAsNull = True
elif identifier in ['Ref', 'Const']:
# ok in emscripten
self._extraAttributes[identifier] = True
else:
raise WebIDLError("Unhandled extended attribute on an argument",
[attribute.location])
def isComplete(self):
return self._isComplete
def complete(self, scope):
if self._isComplete:
return
self._isComplete = True
if not self.type.isComplete():
type = self.type.complete(scope)
assert not isinstance(type, IDLUnresolvedType)
assert not isinstance(type, IDLTypedefType)
assert not isinstance(type.name, IDLUnresolvedIdentifier)
self.type = type
if ((self.type.isDictionary() or
self.type.isUnion() and self.type.unroll().hasDictionaryType) and
self.optional and not self.defaultValue):
# Default optional dictionaries to null, for simplicity,
# so the codegen doesn't have to special-case this.
self.defaultValue = IDLNullValue(self.location)
elif self.type.isAny():
assert (self.defaultValue is None or
isinstance(self.defaultValue, IDLNullValue))
# optional 'any' values always have a default value
if self.optional and not self.defaultValue and not self.variadic:
# Set the default value to undefined, for simplicity, so the
# codegen doesn't have to special-case this.
self.defaultValue = IDLUndefinedValue(self.location)
# Now do the coercing thing; this needs to happen after the
# above creation of a default value.
if self.defaultValue:
self.defaultValue = self.defaultValue.coerceToType(self.type,
self.location)
assert self.defaultValue
def allowTreatNonCallableAsNull(self):
return self._allowTreatNonCallableAsNull
def _getDependentObjects(self):
deps = set([self.type])
if self.defaultValue:
deps.add(self.defaultValue)
return deps
class IDLCallbackType(IDLType, IDLObjectWithScope):
def __init__(self, location, parentScope, identifier, returnType, arguments):
assert isinstance(returnType, IDLType)
IDLType.__init__(self, location, identifier.name)
self._returnType = returnType
# Clone the list
self._arguments = list(arguments)
IDLObjectWithScope.__init__(self, location, parentScope, identifier)
for (returnType, arguments) in self.signatures():
for argument in arguments:
argument.resolve(self)
self._treatNonCallableAsNull = False
self._treatNonObjectAsNull = False
def isCallback(self):
return True
def signatures(self):
return [(self._returnType, self._arguments)]
def tag(self):
return IDLType.Tags.callback
def finish(self, scope):
if not self._returnType.isComplete():
type = self._returnType.complete(scope)
assert not isinstance(type, IDLUnresolvedType)
assert not isinstance(type, IDLTypedefType)
assert not isinstance(type.name, IDLUnresolvedIdentifier)
self._returnType = type
for argument in self._arguments:
if argument.type.isComplete():
continue
type = argument.type.complete(scope)
assert not isinstance(type, IDLUnresolvedType)
assert not isinstance(type, IDLTypedefType)
assert not isinstance(type.name, IDLUnresolvedIdentifier)
argument.type = type
def validate(self):
pass
def isDistinguishableFrom(self, other):
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isNonCallbackInterface() or other.isDate())
def addExtendedAttributes(self, attrs):
unhandledAttrs = []
for attr in attrs:
if attr.identifier() == "TreatNonCallableAsNull":
self._treatNonCallableAsNull = True
elif attr.identifier() == "TreatNonObjectAsNull":
self._treatNonObjectAsNull = True
else:
unhandledAttrs.append(attr)
if self._treatNonCallableAsNull and self._treatNonObjectAsNull:
raise WebIDLError("Cannot specify both [TreatNonCallableAsNull] "
"and [TreatNonObjectAsNull]", [self.location])
if len(unhandledAttrs) != 0:
IDLType.addExtendedAttributes(self, unhandledAttrs)
def _getDependentObjects(self):
return set([self._returnType] + self._arguments)
class IDLMethodOverload:
"""
A class that represents a single overload of a WebIDL method. This is not
quite the same as an element of the "effective overload set" in the spec,
because separate IDLMethodOverloads are not created based on arguments being
optional. Rather, when multiple methods have the same name, there is an
IDLMethodOverload for each one, all hanging off an IDLMethod representing
the full set of overloads.
"""
def __init__(self, returnType, arguments, location):
self.returnType = returnType
# Clone the list of arguments, just in case
self.arguments = list(arguments)
self.location = location
def _getDependentObjects(self):
deps = set(self.arguments)
deps.add(self.returnType)
return deps
class IDLMethod(IDLInterfaceMember, IDLScope):
Special = enum(
'Getter',
'Setter',
'Creator',
'Deleter',
'LegacyCaller',
base=IDLInterfaceMember.Special
)
TypeSuffixModifier = enum(
'None',
'QMark',
'Brackets'
)
NamedOrIndexed = enum(
'Neither',
'Named',
'Indexed'
)
def __init__(self, location, identifier, returnType, arguments,
static=False, getter=False, setter=False, creator=False,
deleter=False, specialType=NamedOrIndexed.Neither,
legacycaller=False, stringifier=False, jsonifier=False):
# REVIEW: specialType is NamedOrIndexed -- wow, this is messed up.
IDLInterfaceMember.__init__(self, location, identifier,
IDLInterfaceMember.Tags.Method)
self._hasOverloads = False
assert isinstance(returnType, IDLType)
# self._overloads is a list of IDLMethodOverloads
self._overloads = [IDLMethodOverload(returnType, arguments, location)]
assert isinstance(static, bool)
self._static = static
assert isinstance(getter, bool)
self._getter = getter
assert isinstance(setter, bool)
self._setter = setter
assert isinstance(creator, bool)
self._creator = creator
assert isinstance(deleter, bool)
self._deleter = deleter
assert isinstance(legacycaller, bool)
self._legacycaller = legacycaller
assert isinstance(stringifier, bool)
self._stringifier = stringifier
assert isinstance(jsonifier, bool)
self._jsonifier = jsonifier
self._specialType = specialType
if static and identifier.name == "prototype":
raise WebIDLError("The identifier of a static operation must not be 'prototype'",
[location])
self.assertSignatureConstraints()
def __str__(self):
return "Method '%s'" % self.identifier
def assertSignatureConstraints(self):
if self._getter or self._deleter:
assert len(self._overloads) == 1
overload = self._overloads[0]
arguments = overload.arguments
assert len(arguments) == 1
assert arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.domstring] or \
arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.unsigned_long]
assert not arguments[0].optional and not arguments[0].variadic
assert not self._getter or not overload.returnType.isVoid()
if self._setter or self._creator:
assert len(self._overloads) == 1
arguments = self._overloads[0].arguments
assert len(arguments) == 2
assert arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.domstring] or \
arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.unsigned_long]
assert not arguments[0].optional and not arguments[0].variadic
assert not arguments[1].optional and not arguments[1].variadic
if self._stringifier:
assert len(self._overloads) == 1
overload = self._overloads[0]
assert len(overload.arguments) == 0
assert overload.returnType == BuiltinTypes[IDLBuiltinType.Types.domstring]
if self._jsonifier:
assert len(self._overloads) == 1
overload = self._overloads[0]
assert len(overload.arguments) == 0
assert overload.returnType == BuiltinTypes[IDLBuiltinType.Types.object]
def isStatic(self):
return self._static
def isGetter(self):
return self._getter
def isSetter(self):
return self._setter
def isCreator(self):
return self._creator
def isDeleter(self):
return self._deleter
def isNamed(self):
assert self._specialType == IDLMethod.NamedOrIndexed.Named or \
self._specialType == IDLMethod.NamedOrIndexed.Indexed
return self._specialType == IDLMethod.NamedOrIndexed.Named
def isIndexed(self):
assert self._specialType == IDLMethod.NamedOrIndexed.Named or \
self._specialType == IDLMethod.NamedOrIndexed.Indexed
return self._specialType == IDLMethod.NamedOrIndexed.Indexed
def isLegacycaller(self):
return self._legacycaller
def isStringifier(self):
return self._stringifier
def isJsonifier(self):
return self._jsonifier
def hasOverloads(self):
return self._hasOverloads
def isIdentifierLess(self):
return self.identifier.name[:2] == "__" and self.identifier.name != "__noSuchMethod__"
def resolve(self, parentScope):
assert isinstance(parentScope, IDLScope)
IDLObjectWithIdentifier.resolve(self, parentScope)
IDLScope.__init__(self, self.location, parentScope, self.identifier)
for (returnType, arguments) in self.signatures():
for argument in arguments:
argument.resolve(self)
def addOverload(self, method):
assert len(method._overloads) == 1
if self._extendedAttrDict != method ._extendedAttrDict:
raise WebIDLError("Extended attributes differ on different "
"overloads of %s" % method.identifier,
[self.location, method.location])
self._overloads.extend(method._overloads)
self._hasOverloads = True
if self.isStatic() != method.isStatic():
raise WebIDLError("Overloaded identifier %s appears with different values of the 'static' attribute" % method.identifier,
[method.location])
if self.isLegacycaller() != method.isLegacycaller():
raise WebIDLError("Overloaded identifier %s appears with different values of the 'legacycaller' attribute" % method.identifier,
[method.location])
# Can't overload special things!
assert not self.isGetter()
assert not method.isGetter()
assert not self.isSetter()
assert not method.isSetter()
assert not self.isCreator()
assert not method.isCreator()
assert not self.isDeleter()
assert not method.isDeleter()
assert not self.isStringifier()
assert not method.isStringifier()
assert not self.isJsonifier()
assert not method.isJsonifier()
return self
def signatures(self):
return [(overload.returnType, overload.arguments) for overload in
self._overloads]
def finish(self, scope):
overloadWithPromiseReturnType = None
overloadWithoutPromiseReturnType = None
for overload in self._overloads:
variadicArgument = None
arguments = overload.arguments
for (idx, argument) in enumerate(arguments):
if not argument.isComplete():
argument.complete(scope)
assert argument.type.isComplete()
if (argument.type.isDictionary() or
(argument.type.isUnion() and
argument.type.unroll().hasDictionaryType)):
# Dictionaries and unions containing dictionaries at the
# end of the list or followed by optional arguments must be
# optional.
if (not argument.optional and
all(arg.optional for arg in arguments[idx+1:])):
raise WebIDLError("Dictionary argument or union "
"argument containing a dictionary "
"not followed by a required argument "
"must be optional",
[argument.location])
# An argument cannot be a Nullable Dictionary
if argument.type.nullable():
raise WebIDLError("An argument cannot be a nullable "
"dictionary or nullable union "
"containing a dictionary",
[argument.location])
# Only the last argument can be variadic
if variadicArgument:
raise WebIDLError("Variadic argument is not last argument",
[variadicArgument.location])
if argument.variadic:
variadicArgument = argument
returnType = overload.returnType
if not returnType.isComplete():
returnType = returnType.complete(scope)
assert not isinstance(returnType, IDLUnresolvedType)
assert not isinstance(returnType, IDLTypedefType)
assert not isinstance(returnType.name, IDLUnresolvedIdentifier)
overload.returnType = returnType
if returnType.isPromise():
overloadWithPromiseReturnType = overload
else:
overloadWithoutPromiseReturnType = overload
# Make sure either all our overloads return Promises or none do
if overloadWithPromiseReturnType and overloadWithoutPromiseReturnType:
raise WebIDLError("We have overloads with both Promise and "
"non-Promise return types",
[overloadWithPromiseReturnType.location,
overloadWithoutPromiseReturnType.location])
if overloadWithPromiseReturnType and self._legacycaller:
raise WebIDLError("May not have a Promise return type for a "
"legacycaller.",
[overloadWithPromiseReturnType.location])
# Now compute various information that will be used by the
# WebIDL overload resolution algorithm.
self.maxArgCount = max(len(s[1]) for s in self.signatures())
self.allowedArgCounts = [ i for i in range(self.maxArgCount+1)
if len(self.signaturesForArgCount(i)) != 0 ]
def validate(self):
# Make sure our overloads are properly distinguishable and don't have
# different argument types before the distinguishing args.
for argCount in self.allowedArgCounts:
possibleOverloads = self.overloadsForArgCount(argCount)
if len(possibleOverloads) == 1:
continue
distinguishingIndex = self.distinguishingIndexForArgCount(argCount)
for idx in range(distinguishingIndex):
firstSigType = possibleOverloads[0].arguments[idx].type
for overload in possibleOverloads[1:]:
if overload.arguments[idx].type != firstSigType:
raise WebIDLError(
"Signatures for method '%s' with %d arguments have "
"different types of arguments at index %d, which "
"is before distinguishing index %d" %
(self.identifier.name, argCount, idx,
distinguishingIndex),
[self.location, overload.location])
def overloadsForArgCount(self, argc):
return [overload for overload in self._overloads if
len(overload.arguments) == argc or
(len(overload.arguments) > argc and
all(arg.optional for arg in overload.arguments[argc:])) or
(len(overload.arguments) < argc and
len(overload.arguments) > 0 and
overload.arguments[-1].variadic)]
def signaturesForArgCount(self, argc):
return [(overload.returnType, overload.arguments) for overload
in self.overloadsForArgCount(argc)]
def locationsForArgCount(self, argc):
return [overload.location for overload in self.overloadsForArgCount(argc)]
def distinguishingIndexForArgCount(self, argc):
def isValidDistinguishingIndex(idx, signatures):
for (firstSigIndex, (firstRetval, firstArgs)) in enumerate(signatures[:-1]):
for (secondRetval, secondArgs) in signatures[firstSigIndex+1:]:
if idx < len(firstArgs):
firstType = firstArgs[idx].type
else:
assert(firstArgs[-1].variadic)
firstType = firstArgs[-1].type
if idx < len(secondArgs):
secondType = secondArgs[idx].type
else:
assert(secondArgs[-1].variadic)
secondType = secondArgs[-1].type
if not firstType.isDistinguishableFrom(secondType):
return False
return True
signatures = self.signaturesForArgCount(argc)
for idx in range(argc):
if isValidDistinguishingIndex(idx, signatures):
return idx
# No valid distinguishing index. Time to throw
locations = self.locationsForArgCount(argc)
raise WebIDLError("Signatures with %d arguments for method '%s' are not "
"distinguishable" % (argc, self.identifier.name),
locations)
def handleExtendedAttribute(self, attr):
identifier = attr.identifier()
if identifier == "GetterThrows":
raise WebIDLError("Methods must not be flagged as "
"[GetterThrows]",
[attr.location, self.location])
elif identifier == "SetterThrows":
raise WebIDLError("Methods must not be flagged as "
"[SetterThrows]",
[attr.location, self.location])
elif identifier == "Unforgeable":
raise WebIDLError("Methods must not be flagged as "
"[Unforgeable]",
[attr.location, self.location])
elif identifier == "SameObject":
raise WebIDLError("Methods must not be flagged as [SameObject]",
[attr.location, self.location]);
elif identifier == "Constant":
raise WebIDLError("Methods must not be flagged as [Constant]",
[attr.location, self.location]);
elif identifier == "PutForwards":
raise WebIDLError("Only attributes support [PutForwards]",
[attr.location, self.location])
elif identifier == "LenientFloat":
# This is called before we've done overload resolution
assert len(self.signatures()) == 1
sig = self.signatures()[0]
if not sig[0].isVoid():
raise WebIDLError("[LenientFloat] used on a non-void method",
[attr.location, self.location])
if not any(arg.type.includesRestrictedFloat() for arg in sig[1]):
raise WebIDLError("[LenientFloat] used on an operation with no "
"restricted float type arguments",
[attr.location, self.location])
elif (identifier == "Throws" or
identifier == "NewObject" or
identifier == "ChromeOnly" or
identifier == "Pref" or
identifier == "Func" or
identifier == "AvailableIn" or
identifier == "Pure" or
identifier == "CrossOriginCallable" or
identifier == "Ref" or
identifier == "Value" or
identifier == "Operator" or
identifier == "Const" or
identifier == "WebGLHandlesContextLoss"):
# Known attributes that we don't need to do anything with here
pass
else:
raise WebIDLError("Unknown extended attribute %s on method" % identifier,
[attr.location])
IDLInterfaceMember.handleExtendedAttribute(self, attr)
def returnsPromise(self):
return self._overloads[0].returnType.isPromise()
def _getDependentObjects(self):
deps = set()
for overload in self._overloads:
deps.union(overload._getDependentObjects())
return deps
class IDLImplementsStatement(IDLObject):
def __init__(self, location, implementor, implementee):
IDLObject.__init__(self, location)
self.implementor = implementor;
self.implementee = implementee
def finish(self, scope):
assert(isinstance(self.implementor, IDLIdentifierPlaceholder))
assert(isinstance(self.implementee, IDLIdentifierPlaceholder))
implementor = self.implementor.finish(scope)
implementee = self.implementee.finish(scope)
# NOTE: we depend on not setting self.implementor and
# self.implementee here to keep track of the original
# locations.
if not isinstance(implementor, IDLInterface):
raise WebIDLError("Left-hand side of 'implements' is not an "
"interface",
[self.implementor.location])
if implementor.isCallback():
raise WebIDLError("Left-hand side of 'implements' is a callback "
"interface",
[self.implementor.location])
if not isinstance(implementee, IDLInterface):
raise WebIDLError("Right-hand side of 'implements' is not an "
"interface",
[self.implementee.location])
if implementee.isCallback():
raise WebIDLError("Right-hand side of 'implements' is a callback "
"interface",
[self.implementee.location])
implementor.addImplementedInterface(implementee)
def validate(self):
pass
def addExtendedAttributes(self, attrs):
assert len(attrs) == 0
class IDLExtendedAttribute(IDLObject):
"""
A class to represent IDL extended attributes so we can give them locations
"""
def __init__(self, location, tuple):
IDLObject.__init__(self, location)
self._tuple = tuple
def identifier(self):
return self._tuple[0]
def noArguments(self):
return len(self._tuple) == 1
def hasValue(self):
return len(self._tuple) >= 2 and isinstance(self._tuple[1], str)
def value(self):
assert(self.hasValue())
return self._tuple[1]
def hasArgs(self):
return (len(self._tuple) == 2 and isinstance(self._tuple[1], list) or
len(self._tuple) == 3)
def args(self):
assert(self.hasArgs())
# Our args are our last element
return self._tuple[-1]
def listValue(self):
"""
Backdoor for storing random data in _extendedAttrDict
"""
return list(self._tuple)[1:]
# Parser
class Tokenizer(object):
tokens = [
"INTEGER",
"FLOATLITERAL",
"IDENTIFIER",
"STRING",
"WHITESPACE",
"OTHER"
]
def t_FLOATLITERAL(self, t):
r'(-?(([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)([Ee][+-]?[0-9]+)?|[0-9]+[Ee][+-]?[0-9]+|Infinity))|NaN'
t.value = float(t.value)
return t
def t_INTEGER(self, t):
r'-?(0([0-7]+|[Xx][0-9A-Fa-f]+)?|[1-9][0-9]*)'
try:
# Can't use int(), because that doesn't handle octal properly.
t.value = parseInt(t.value)
except:
raise WebIDLError("Invalid integer literal",
[Location(lexer=self.lexer,
lineno=self.lexer.lineno,
lexpos=self.lexer.lexpos,
filename=self._filename)])
return t
def t_IDENTIFIER(self, t):
r'[A-Z_a-z][0-9A-Z_a-z]*'
t.type = self.keywords.get(t.value, 'IDENTIFIER')
return t
def t_STRING(self, t):
r'"[^"]*"'
t.value = t.value[1:-1]
return t
def t_WHITESPACE(self, t):
r'[\t\n\r ]+|[\t\n\r ]*((//[^\n]*|/\*.*?\*/)[\t\n\r ]*)+'
pass
def t_ELLIPSIS(self, t):
r'\.\.\.'
t.type = self.keywords.get(t.value)
return t
def t_OTHER(self, t):
r'[^\t\n\r 0-9A-Z_a-z]'
t.type = self.keywords.get(t.value, 'OTHER')
return t
keywords = {
"module": "MODULE",
"interface": "INTERFACE",
"partial": "PARTIAL",
"dictionary": "DICTIONARY",
"exception": "EXCEPTION",
"enum": "ENUM",
"callback": "CALLBACK",
"typedef": "TYPEDEF",
"implements": "IMPLEMENTS",
"const": "CONST",
"null": "NULL",
"true": "TRUE",
"false": "FALSE",
"serializer": "SERIALIZER",
"stringifier": "STRINGIFIER",
"jsonifier": "JSONIFIER",
"unrestricted": "UNRESTRICTED",
"attribute": "ATTRIBUTE",
"readonly": "READONLY",
"inherit": "INHERIT",
"static": "STATIC",
"getter": "GETTER",
"setter": "SETTER",
"creator": "CREATOR",
"deleter": "DELETER",
"legacycaller": "LEGACYCALLER",
"optional": "OPTIONAL",
"...": "ELLIPSIS",
"::": "SCOPE",
"Date": "DATE",
"DOMString": "DOMSTRING",
"ByteString": "BYTESTRING",
"any": "ANY",
"boolean": "BOOLEAN",
"byte": "BYTE",
"double": "DOUBLE",
"float": "FLOAT",
"long": "LONG",
"object": "OBJECT",
"octet": "OCTET",
"optional": "OPTIONAL",
"sequence": "SEQUENCE",
"short": "SHORT",
"unsigned": "UNSIGNED",
"void": "VOID",
":": "COLON",
";": "SEMICOLON",
"{": "LBRACE",
"}": "RBRACE",
"(": "LPAREN",
")": "RPAREN",
"[": "LBRACKET",
"]": "RBRACKET",
"?": "QUESTIONMARK",
",": "COMMA",
"=": "EQUALS",
"<": "LT",
">": "GT",
"ArrayBuffer": "ARRAYBUFFER",
"or": "OR"
}
tokens.extend(keywords.values())
def t_error(self, t):
raise WebIDLError("Unrecognized Input",
[Location(lexer=self.lexer,
lineno=self.lexer.lineno,
lexpos=self.lexer.lexpos,
filename = self.filename)])
def __init__(self, outputdir, lexer=None):
if lexer:
self.lexer = lexer
else:
self.lexer = lex.lex(object=self,
outputdir=outputdir,
lextab='webidllex',
reflags=re.DOTALL)
class Parser(Tokenizer):
def getLocation(self, p, i):
return Location(self.lexer, p.lineno(i), p.lexpos(i), self._filename)
def globalScope(self):
return self._globalScope
# The p_Foo functions here must match the WebIDL spec's grammar.
# It's acceptable to split things at '|' boundaries.
def p_Definitions(self, p):
"""
Definitions : ExtendedAttributeList Definition Definitions
"""
if p[2]:
p[0] = [p[2]]
p[2].addExtendedAttributes(p[1])
else:
assert not p[1]
p[0] = []
p[0].extend(p[3])
def p_DefinitionsEmpty(self, p):
"""
Definitions :
"""
p[0] = []
def p_Definition(self, p):
"""
Definition : CallbackOrInterface
| PartialInterface
| Dictionary
| Exception
| Enum
| Typedef
| ImplementsStatement
"""
p[0] = p[1]
assert p[1] # We might not have implemented something ...
def p_CallbackOrInterfaceCallback(self, p):
"""
CallbackOrInterface : CALLBACK CallbackRestOrInterface
"""
if p[2].isInterface():
assert isinstance(p[2], IDLInterface)
p[2].setCallback(True)
p[0] = p[2]
def p_CallbackOrInterfaceInterface(self, p):
"""
CallbackOrInterface : Interface
"""
p[0] = p[1]
def p_CallbackRestOrInterface(self, p):
"""
CallbackRestOrInterface : CallbackRest
| Interface
"""
assert p[1]
p[0] = p[1]
def p_Interface(self, p):
"""
Interface : INTERFACE IDENTIFIER Inheritance LBRACE InterfaceMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[5]
parent = p[3]
try:
if self.globalScope()._lookupIdentifier(identifier):
p[0] = self.globalScope()._lookupIdentifier(identifier)
if not isinstance(p[0], IDLInterface):
raise WebIDLError("Partial interface has the same name as "
"non-interface object",
[location, p[0].location])
p[0].setNonPartial(location, parent, members)
return
except Exception, ex:
if isinstance(ex, WebIDLError):
raise ex
pass
p[0] = IDLInterface(location, self.globalScope(), identifier, parent,
members, isPartial=False)
def p_InterfaceForwardDecl(self, p):
"""
Interface : INTERFACE IDENTIFIER SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
try:
if self.globalScope()._lookupIdentifier(identifier):
p[0] = self.globalScope()._lookupIdentifier(identifier)
if not isinstance(p[0], IDLExternalInterface):
raise WebIDLError("Name collision between external "
"interface declaration for identifier "
"%s and %s" % (identifier.name, p[0]),
[location, p[0].location])
return
except Exception, ex:
if isinstance(ex, WebIDLError):
raise ex
pass
p[0] = IDLExternalInterface(location, self.globalScope(), identifier)
def p_PartialInterface(self, p):
"""
PartialInterface : PARTIAL INTERFACE IDENTIFIER LBRACE InterfaceMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 2)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 3), p[3])
members = p[5]
try:
if self.globalScope()._lookupIdentifier(identifier):
p[0] = self.globalScope()._lookupIdentifier(identifier)
if not isinstance(p[0], IDLInterface):
raise WebIDLError("Partial interface has the same name as "
"non-interface object",
[location, p[0].location])
# Just throw our members into the existing IDLInterface. If we
# have extended attributes, those will get added to it
# automatically.
p[0].members.extend(members)
return
except Exception, ex:
if isinstance(ex, WebIDLError):
raise ex
pass
p[0] = IDLInterface(location, self.globalScope(), identifier, None,
members, isPartial=True)
pass
def p_Inheritance(self, p):
"""
Inheritance : COLON ScopedName
"""
p[0] = IDLIdentifierPlaceholder(self.getLocation(p, 2), p[2])
def p_InheritanceEmpty(self, p):
"""
Inheritance :
"""
pass
def p_InterfaceMembers(self, p):
"""
InterfaceMembers : ExtendedAttributeList InterfaceMember InterfaceMembers
"""
p[0] = [p[2]] if p[2] else []
assert not p[1] or p[2]
p[2].addExtendedAttributes(p[1])
p[0].extend(p[3])
def p_InterfaceMembersEmpty(self, p):
"""
InterfaceMembers :
"""
p[0] = []
def p_InterfaceMember(self, p):
"""
InterfaceMember : Const
| AttributeOrOperation
"""
p[0] = p[1]
def p_Dictionary(self, p):
"""
Dictionary : DICTIONARY IDENTIFIER Inheritance LBRACE DictionaryMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[5]
p[0] = IDLDictionary(location, self.globalScope(), identifier, p[3], members)
def p_DictionaryMembers(self, p):
"""
DictionaryMembers : ExtendedAttributeList DictionaryMember DictionaryMembers
|
"""
if len(p) == 1:
# We're at the end of the list
p[0] = []
return
# Add our extended attributes
p[2].addExtendedAttributes(p[1])
p[0] = [p[2]]
p[0].extend(p[3])
def p_DictionaryMember(self, p):
"""
DictionaryMember : Type IDENTIFIER DefaultValue SEMICOLON
"""
# These quack a lot like optional arguments, so just treat them that way.
t = p[1]
assert isinstance(t, IDLType)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
defaultValue = p[3]
p[0] = IDLArgument(self.getLocation(p, 2), identifier, t, optional=True,
defaultValue=defaultValue, variadic=False,
dictionaryMember=True)
def p_DefaultValue(self, p):
"""
DefaultValue : EQUALS ConstValue
|
"""
if len(p) > 1:
p[0] = p[2]
else:
p[0] = None
def p_Exception(self, p):
"""
Exception : EXCEPTION IDENTIFIER Inheritance LBRACE ExceptionMembers RBRACE SEMICOLON
"""
pass
def p_Enum(self, p):
"""
Enum : ENUM IDENTIFIER LBRACE EnumValueList RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
values = p[4]
assert values
p[0] = IDLEnum(location, self.globalScope(), identifier, values)
def p_EnumValueList(self, p):
"""
EnumValueList : STRING EnumValueListComma
"""
p[0] = [p[1]]
p[0].extend(p[2])
def p_EnumValueListComma(self, p):
"""
EnumValueListComma : COMMA EnumValueListString
"""
p[0] = p[2]
def p_EnumValueListCommaEmpty(self, p):
"""
EnumValueListComma :
"""
p[0] = []
def p_EnumValueListString(self, p):
"""
EnumValueListString : STRING EnumValueListComma
"""
p[0] = [p[1]]
p[0].extend(p[2])
def p_EnumValueListStringEmpty(self, p):
"""
EnumValueListString :
"""
p[0] = []
def p_CallbackRest(self, p):
"""
CallbackRest : IDENTIFIER EQUALS ReturnType LPAREN ArgumentList RPAREN SEMICOLON
"""
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
p[0] = IDLCallbackType(self.getLocation(p, 1), self.globalScope(),
identifier, p[3], p[5])
def p_ExceptionMembers(self, p):
"""
ExceptionMembers : ExtendedAttributeList ExceptionMember ExceptionMembers
|
"""
pass
def p_Typedef(self, p):
"""
Typedef : TYPEDEF Type IDENTIFIER SEMICOLON
"""
typedef = IDLTypedefType(self.getLocation(p, 1), p[2], p[3])
typedef.resolve(self.globalScope())
p[0] = typedef
def p_ImplementsStatement(self, p):
"""
ImplementsStatement : ScopedName IMPLEMENTS ScopedName SEMICOLON
"""
assert(p[2] == "implements")
implementor = IDLIdentifierPlaceholder(self.getLocation(p, 1), p[1])
implementee = IDLIdentifierPlaceholder(self.getLocation(p, 3), p[3])
p[0] = IDLImplementsStatement(self.getLocation(p, 1), implementor,
implementee)
def p_Const(self, p):
"""
Const : CONST ConstType IDENTIFIER EQUALS ConstValue SEMICOLON
"""
location = self.getLocation(p, 1)
type = p[2]
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 3), p[3])
value = p[5]
p[0] = IDLConst(location, identifier, type, value)
def p_ConstValueBoolean(self, p):
"""
ConstValue : BooleanLiteral
"""
location = self.getLocation(p, 1)
booleanType = BuiltinTypes[IDLBuiltinType.Types.boolean]
p[0] = IDLValue(location, booleanType, p[1])
def p_ConstValueInteger(self, p):
"""
ConstValue : INTEGER
"""
location = self.getLocation(p, 1)
# We don't know ahead of time what type the integer literal is.
# Determine the smallest type it could possibly fit in and use that.
integerType = matchIntegerValueToType(p[1])
if integerType == None:
raise WebIDLError("Integer literal out of range", [location])
p[0] = IDLValue(location, integerType, p[1])
def p_ConstValueFloat(self, p):
"""
ConstValue : FLOATLITERAL
"""
location = self.getLocation(p, 1)
p[0] = IDLValue(location, BuiltinTypes[IDLBuiltinType.Types.unrestricted_float], p[1])
def p_ConstValueString(self, p):
"""
ConstValue : STRING
"""
location = self.getLocation(p, 1)
stringType = BuiltinTypes[IDLBuiltinType.Types.domstring]
p[0] = IDLValue(location, stringType, p[1])
def p_ConstValueNull(self, p):
"""
ConstValue : NULL
"""
p[0] = IDLNullValue(self.getLocation(p, 1))
def p_BooleanLiteralTrue(self, p):
"""
BooleanLiteral : TRUE
"""
p[0] = True
def p_BooleanLiteralFalse(self, p):
"""
BooleanLiteral : FALSE
"""
p[0] = False
def p_AttributeOrOperation(self, p):
"""
AttributeOrOperation : Attribute
| Operation
"""
p[0] = p[1]
def p_AttributeWithQualifier(self, p):
"""
Attribute : Qualifier AttributeRest
"""
static = IDLInterfaceMember.Special.Static in p[1]
stringifier = IDLInterfaceMember.Special.Stringifier in p[1]
(location, identifier, type, readonly) = p[2]
p[0] = IDLAttribute(location, identifier, type, readonly, static=static,
stringifier=stringifier)
def p_Attribute(self, p):
"""
Attribute : Inherit AttributeRest
"""
(location, identifier, type, readonly) = p[2]
p[0] = IDLAttribute(location, identifier, type, readonly, inherit=p[1])
def p_AttributeRest(self, p):
"""
AttributeRest : ReadOnly ATTRIBUTE Type IDENTIFIER SEMICOLON
"""
location = self.getLocation(p, 2)
readonly = p[1]
t = p[3]
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 4), p[4])
p[0] = (location, identifier, t, readonly)
def p_ReadOnly(self, p):
"""
ReadOnly : READONLY
"""
p[0] = True
def p_ReadOnlyEmpty(self, p):
"""
ReadOnly :
"""
p[0] = False
def p_Inherit(self, p):
"""
Inherit : INHERIT
"""
p[0] = True
def p_InheritEmpty(self, p):
"""
Inherit :
"""
p[0] = False
def p_Operation(self, p):
"""
Operation : Qualifiers OperationRest
"""
qualifiers = p[1]
# Disallow duplicates in the qualifier set
if not len(set(qualifiers)) == len(qualifiers):
raise WebIDLError("Duplicate qualifiers are not allowed",
[self.getLocation(p, 1)])
static = IDLInterfaceMember.Special.Static in p[1]
# If static is there that's all that's allowed. This is disallowed
# by the parser, so we can assert here.
assert not static or len(qualifiers) == 1
stringifier = IDLInterfaceMember.Special.Stringifier in p[1]
# If stringifier is there that's all that's allowed. This is disallowed
# by the parser, so we can assert here.
assert not stringifier or len(qualifiers) == 1
getter = True if IDLMethod.Special.Getter in p[1] else False
setter = True if IDLMethod.Special.Setter in p[1] else False
creator = True if IDLMethod.Special.Creator in p[1] else False
deleter = True if IDLMethod.Special.Deleter in p[1] else False
legacycaller = True if IDLMethod.Special.LegacyCaller in p[1] else False
if getter or deleter:
if setter or creator:
raise WebIDLError("getter and deleter are incompatible with setter and creator",
[self.getLocation(p, 1)])
(returnType, identifier, arguments) = p[2]
assert isinstance(returnType, IDLType)
specialType = IDLMethod.NamedOrIndexed.Neither
if getter or deleter:
if len(arguments) != 1:
raise WebIDLError("%s has wrong number of arguments" %
("getter" if getter else "deleter"),
[self.getLocation(p, 2)])
argType = arguments[0].type
if argType == BuiltinTypes[IDLBuiltinType.Types.domstring]:
specialType = IDLMethod.NamedOrIndexed.Named
elif argType == BuiltinTypes[IDLBuiltinType.Types.unsigned_long]:
specialType = IDLMethod.NamedOrIndexed.Indexed
else:
raise WebIDLError("%s has wrong argument type (must be DOMString or UnsignedLong)" %
("getter" if getter else "deleter"),
[arguments[0].location])
if arguments[0].optional or arguments[0].variadic:
raise WebIDLError("%s cannot have %s argument" %
("getter" if getter else "deleter",
"optional" if arguments[0].optional else "variadic"),
[arguments[0].location])
if getter:
if returnType.isVoid():
raise WebIDLError("getter cannot have void return type",
[self.getLocation(p, 2)])
if setter or creator:
if len(arguments) != 2:
raise WebIDLError("%s has wrong number of arguments" %
("setter" if setter else "creator"),
[self.getLocation(p, 2)])
argType = arguments[0].type
if argType == BuiltinTypes[IDLBuiltinType.Types.domstring]:
specialType = IDLMethod.NamedOrIndexed.Named
elif argType == BuiltinTypes[IDLBuiltinType.Types.unsigned_long]:
specialType = IDLMethod.NamedOrIndexed.Indexed
else:
raise WebIDLError("%s has wrong argument type (must be DOMString or UnsignedLong)" %
("setter" if setter else "creator"),
[arguments[0].location])
if arguments[0].optional or arguments[0].variadic:
raise WebIDLError("%s cannot have %s argument" %
("setter" if setter else "creator",
"optional" if arguments[0].optional else "variadic"),
[arguments[0].location])
if arguments[1].optional or arguments[1].variadic:
raise WebIDLError("%s cannot have %s argument" %
("setter" if setter else "creator",
"optional" if arguments[1].optional else "variadic"),
[arguments[1].location])
if stringifier:
if len(arguments) != 0:
raise WebIDLError("stringifier has wrong number of arguments",
[self.getLocation(p, 2)])
if not returnType.isDOMString():
raise WebIDLError("stringifier must have DOMString return type",
[self.getLocation(p, 2)])
# identifier might be None. This is only permitted for special methods.
if not identifier:
if not getter and not setter and not creator and \
not deleter and not legacycaller and not stringifier:
raise WebIDLError("Identifier required for non-special methods",
[self.getLocation(p, 2)])
location = BuiltinLocation("<auto-generated-identifier>")
identifier = IDLUnresolvedIdentifier(location, "__%s%s%s%s%s%s%s" %
("named" if specialType == IDLMethod.NamedOrIndexed.Named else \
"indexed" if specialType == IDLMethod.NamedOrIndexed.Indexed else "",
"getter" if getter else "",
"setter" if setter else "",
"deleter" if deleter else "",
"creator" if creator else "",
"legacycaller" if legacycaller else "",
"stringifier" if stringifier else ""), allowDoubleUnderscore=True)
method = IDLMethod(self.getLocation(p, 2), identifier, returnType, arguments,
static=static, getter=getter, setter=setter, creator=creator,
deleter=deleter, specialType=specialType,
legacycaller=legacycaller, stringifier=stringifier)
p[0] = method
def p_Stringifier(self, p):
"""
Operation : STRINGIFIER SEMICOLON
"""
identifier = IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"),
"__stringifier",
allowDoubleUnderscore=True)
method = IDLMethod(self.getLocation(p, 1),
identifier,
returnType=BuiltinTypes[IDLBuiltinType.Types.domstring],
arguments=[],
stringifier=True)
p[0] = method
def p_Jsonifier(self, p):
"""
Operation : JSONIFIER SEMICOLON
"""
identifier = IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"),
"__jsonifier", allowDoubleUnderscore=True)
method = IDLMethod(self.getLocation(p, 1),
identifier,
returnType=BuiltinTypes[IDLBuiltinType.Types.object],
arguments=[],
jsonifier=True)
p[0] = method
def p_QualifierStatic(self, p):
"""
Qualifier : STATIC
"""
p[0] = [IDLInterfaceMember.Special.Static]
def p_QualifierStringifier(self, p):
"""
Qualifier : STRINGIFIER
"""
p[0] = [IDLInterfaceMember.Special.Stringifier]
def p_Qualifiers(self, p):
"""
Qualifiers : Qualifier
| Specials
"""
p[0] = p[1]
def p_Specials(self, p):
"""
Specials : Special Specials
"""
p[0] = [p[1]]
p[0].extend(p[2])
def p_SpecialsEmpty(self, p):
"""
Specials :
"""
p[0] = []
def p_SpecialGetter(self, p):
"""
Special : GETTER
"""
p[0] = IDLMethod.Special.Getter
def p_SpecialSetter(self, p):
"""
Special : SETTER
"""
p[0] = IDLMethod.Special.Setter
def p_SpecialCreator(self, p):
"""
Special : CREATOR
"""
p[0] = IDLMethod.Special.Creator
def p_SpecialDeleter(self, p):
"""
Special : DELETER
"""
p[0] = IDLMethod.Special.Deleter
def p_SpecialLegacyCaller(self, p):
"""
Special : LEGACYCALLER
"""
p[0] = IDLMethod.Special.LegacyCaller
def p_OperationRest(self, p):
"""
OperationRest : ReturnType OptionalIdentifier LPAREN ArgumentList RPAREN SEMICOLON
"""
p[0] = (p[1], p[2], p[4])
def p_OptionalIdentifier(self, p):
"""
OptionalIdentifier : IDENTIFIER
"""
p[0] = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
def p_OptionalIdentifierEmpty(self, p):
"""
OptionalIdentifier :
"""
pass
def p_ArgumentList(self, p):
"""
ArgumentList : Argument Arguments
"""
p[0] = [p[1]] if p[1] else []
p[0].extend(p[2])
def p_ArgumentListEmpty(self, p):
"""
ArgumentList :
"""
p[0] = []
def p_Arguments(self, p):
"""
Arguments : COMMA Argument Arguments
"""
p[0] = [p[2]] if p[2] else []
p[0].extend(p[3])
def p_ArgumentsEmpty(self, p):
"""
Arguments :
"""
p[0] = []
def p_Argument(self, p):
"""
Argument : ExtendedAttributeList Optional Type Ellipsis ArgumentName DefaultValue
"""
t = p[3]
assert isinstance(t, IDLType)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 5), p[5])
optional = p[2]
variadic = p[4]
defaultValue = p[6]
if not optional and defaultValue:
raise WebIDLError("Mandatory arguments can't have a default value.",
[self.getLocation(p, 6)])
# We can't test t.isAny() here and give it a default value as needed,
# since at this point t is not a fully resolved type yet (e.g. it might
# be a typedef). We'll handle the 'any' case in IDLArgument.complete.
if variadic:
if optional:
raise WebIDLError("Variadic arguments should not be marked optional.",
[self.getLocation(p, 2)])
optional = variadic
p[0] = IDLArgument(self.getLocation(p, 5), identifier, t, optional, defaultValue, variadic)
p[0].addExtendedAttributes(p[1])
def p_ArgumentName(self, p):
"""
ArgumentName : IDENTIFIER
| ATTRIBUTE
| CALLBACK
| CONST
| CREATOR
| DELETER
| DICTIONARY
| ENUM
| EXCEPTION
| GETTER
| IMPLEMENTS
| INHERIT
| INTERFACE
| LEGACYCALLER
| PARTIAL
| SERIALIZER
| SETTER
| STATIC
| STRINGIFIER
| JSONIFIER
| TYPEDEF
| UNRESTRICTED
"""
p[0] = p[1]
def p_Optional(self, p):
"""
Optional : OPTIONAL
"""
p[0] = True
def p_OptionalEmpty(self, p):
"""
Optional :
"""
p[0] = False
def p_Ellipsis(self, p):
"""
Ellipsis : ELLIPSIS
"""
p[0] = True
def p_EllipsisEmpty(self, p):
"""
Ellipsis :
"""
p[0] = False
def p_ExceptionMember(self, p):
"""
ExceptionMember : Const
| ExceptionField
"""
pass
def p_ExceptionField(self, p):
"""
ExceptionField : Type IDENTIFIER SEMICOLON
"""
pass
def p_ExtendedAttributeList(self, p):
"""
ExtendedAttributeList : LBRACKET ExtendedAttribute ExtendedAttributes RBRACKET
"""
p[0] = [p[2]]
if p[3]:
p[0].extend(p[3])
def p_ExtendedAttributeListEmpty(self, p):
"""
ExtendedAttributeList :
"""
p[0] = []
def p_ExtendedAttribute(self, p):
"""
ExtendedAttribute : ExtendedAttributeNoArgs
| ExtendedAttributeArgList
| ExtendedAttributeIdent
| ExtendedAttributeNamedArgList
"""
p[0] = IDLExtendedAttribute(self.getLocation(p, 1), p[1])
def p_ExtendedAttributeEmpty(self, p):
"""
ExtendedAttribute :
"""
pass
def p_ExtendedAttributes(self, p):
"""
ExtendedAttributes : COMMA ExtendedAttribute ExtendedAttributes
"""
p[0] = [p[2]] if p[2] else []
p[0].extend(p[3])
def p_ExtendedAttributesEmpty(self, p):
"""
ExtendedAttributes :
"""
p[0] = []
def p_Other(self, p):
"""
Other : INTEGER
| FLOATLITERAL
| IDENTIFIER
| STRING
| OTHER
| ELLIPSIS
| COLON
| SCOPE
| SEMICOLON
| LT
| EQUALS
| GT
| QUESTIONMARK
| DATE
| DOMSTRING
| BYTESTRING
| ANY
| ATTRIBUTE
| BOOLEAN
| BYTE
| LEGACYCALLER
| CONST
| CREATOR
| DELETER
| DOUBLE
| EXCEPTION
| FALSE
| FLOAT
| GETTER
| IMPLEMENTS
| INHERIT
| INTERFACE
| LONG
| MODULE
| NULL
| OBJECT
| OCTET
| OPTIONAL
| SEQUENCE
| SETTER
| SHORT
| STATIC
| STRINGIFIER
| JSONIFIER
| TRUE
| TYPEDEF
| UNSIGNED
| VOID
"""
pass
def p_OtherOrComma(self, p):
"""
OtherOrComma : Other
| COMMA
"""
pass
def p_TypeSingleType(self, p):
"""
Type : SingleType
"""
p[0] = p[1]
def p_TypeUnionType(self, p):
"""
Type : UnionType TypeSuffix
"""
p[0] = self.handleModifiers(p[1], p[2])
def p_SingleTypeNonAnyType(self, p):
"""
SingleType : NonAnyType
"""
p[0] = p[1]
def p_SingleTypeAnyType(self, p):
"""
SingleType : ANY TypeSuffixStartingWithArray
"""
p[0] = self.handleModifiers(BuiltinTypes[IDLBuiltinType.Types.any], p[2])
def p_UnionType(self, p):
"""
UnionType : LPAREN UnionMemberType OR UnionMemberType UnionMemberTypes RPAREN
"""
types = [p[2], p[4]]
types.extend(p[5])
p[0] = IDLUnionType(self.getLocation(p, 1), types)
def p_UnionMemberTypeNonAnyType(self, p):
"""
UnionMemberType : NonAnyType
"""
p[0] = p[1]
def p_UnionMemberTypeArrayOfAny(self, p):
"""
UnionMemberTypeArrayOfAny : ANY LBRACKET RBRACKET
"""
p[0] = IDLArrayType(self.getLocation(p, 2),
BuiltinTypes[IDLBuiltinType.Types.any])
def p_UnionMemberType(self, p):
"""
UnionMemberType : UnionType TypeSuffix
| UnionMemberTypeArrayOfAny TypeSuffix
"""
p[0] = self.handleModifiers(p[1], p[2])
def p_UnionMemberTypes(self, p):
"""
UnionMemberTypes : OR UnionMemberType UnionMemberTypes
"""
p[0] = [p[2]]
p[0].extend(p[3])
def p_UnionMemberTypesEmpty(self, p):
"""
UnionMemberTypes :
"""
p[0] = []
def p_NonAnyType(self, p):
"""
NonAnyType : PrimitiveOrStringType TypeSuffix
| ARRAYBUFFER TypeSuffix
| OBJECT TypeSuffix
"""
if p[1] == "object":
type = BuiltinTypes[IDLBuiltinType.Types.object]
elif p[1] == "ArrayBuffer":
type = BuiltinTypes[IDLBuiltinType.Types.ArrayBuffer]
else:
type = BuiltinTypes[p[1]]
p[0] = self.handleModifiers(type, p[2])
def p_NonAnyTypeSequenceType(self, p):
"""
NonAnyType : SEQUENCE LT Type GT Null
"""
innerType = p[3]
type = IDLSequenceType(self.getLocation(p, 1), innerType)
if p[5]:
type = IDLNullableType(self.getLocation(p, 5), type)
p[0] = type
def p_NonAnyTypeScopedName(self, p):
"""
NonAnyType : ScopedName TypeSuffix
"""
assert isinstance(p[1], IDLUnresolvedIdentifier)
type = None
try:
if self.globalScope()._lookupIdentifier(p[1]):
obj = self.globalScope()._lookupIdentifier(p[1])
if obj.isType():
type = obj
else:
type = IDLWrapperType(self.getLocation(p, 1), p[1])
p[0] = self.handleModifiers(type, p[2])
return
except:
pass
type = IDLUnresolvedType(self.getLocation(p, 1), p[1])
p[0] = self.handleModifiers(type, p[2])
def p_NonAnyTypeDate(self, p):
"""
NonAnyType : DATE TypeSuffix
"""
p[0] = self.handleModifiers(BuiltinTypes[IDLBuiltinType.Types.date],
p[2])
def p_ConstType(self, p):
"""
ConstType : PrimitiveOrStringType Null
"""
type = BuiltinTypes[p[1]]
if p[2]:
type = IDLNullableType(self.getLocation(p, 1), type)
p[0] = type
def p_ConstTypeIdentifier(self, p):
"""
ConstType : IDENTIFIER Null
"""
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
type = IDLUnresolvedType(self.getLocation(p, 1), identifier)
if p[2]:
type = IDLNullableType(self.getLocation(p, 1), type)
p[0] = type
def p_PrimitiveOrStringTypeUint(self, p):
"""
PrimitiveOrStringType : UnsignedIntegerType
"""
p[0] = p[1]
def p_PrimitiveOrStringTypeBoolean(self, p):
"""
PrimitiveOrStringType : BOOLEAN
"""
p[0] = IDLBuiltinType.Types.boolean
def p_PrimitiveOrStringTypeByte(self, p):
"""
PrimitiveOrStringType : BYTE
"""
p[0] = IDLBuiltinType.Types.byte
def p_PrimitiveOrStringTypeOctet(self, p):
"""
PrimitiveOrStringType : OCTET
"""
p[0] = IDLBuiltinType.Types.octet
def p_PrimitiveOrStringTypeFloat(self, p):
"""
PrimitiveOrStringType : FLOAT
"""
p[0] = IDLBuiltinType.Types.float
def p_PrimitiveOrStringTypeUnrestictedFloat(self, p):
"""
PrimitiveOrStringType : UNRESTRICTED FLOAT
"""
p[0] = IDLBuiltinType.Types.unrestricted_float
def p_PrimitiveOrStringTypeDouble(self, p):
"""
PrimitiveOrStringType : DOUBLE
"""
p[0] = IDLBuiltinType.Types.double
def p_PrimitiveOrStringTypeUnrestictedDouble(self, p):
"""
PrimitiveOrStringType : UNRESTRICTED DOUBLE
"""
p[0] = IDLBuiltinType.Types.unrestricted_double
def p_PrimitiveOrStringTypeDOMString(self, p):
"""
PrimitiveOrStringType : DOMSTRING
"""
p[0] = IDLBuiltinType.Types.domstring
def p_PrimitiveOrStringTypeBytestring(self, p):
"""
PrimitiveOrStringType : BYTESTRING
"""
p[0] = IDLBuiltinType.Types.bytestring
def p_UnsignedIntegerTypeUnsigned(self, p):
"""
UnsignedIntegerType : UNSIGNED IntegerType
"""
p[0] = p[2] + 1 # Adding one to a given signed integer type
# gets you the unsigned type.
def p_UnsignedIntegerType(self, p):
"""
UnsignedIntegerType : IntegerType
"""
p[0] = p[1]
def p_IntegerTypeShort(self, p):
"""
IntegerType : SHORT
"""
p[0] = IDLBuiltinType.Types.short
def p_IntegerTypeLong(self, p):
"""
IntegerType : LONG OptionalLong
"""
if p[2]:
p[0] = IDLBuiltinType.Types.long_long
else:
p[0] = IDLBuiltinType.Types.long
def p_OptionalLong(self, p):
"""
OptionalLong : LONG
"""
p[0] = True
def p_OptionalLongEmpty(self, p):
"""
OptionalLong :
"""
p[0] = False
def p_TypeSuffixBrackets(self, p):
"""
TypeSuffix : LBRACKET RBRACKET TypeSuffix
"""
p[0] = [(IDLMethod.TypeSuffixModifier.Brackets, self.getLocation(p, 1))]
p[0].extend(p[3])
def p_TypeSuffixQMark(self, p):
"""
TypeSuffix : QUESTIONMARK TypeSuffixStartingWithArray
"""
p[0] = [(IDLMethod.TypeSuffixModifier.QMark, self.getLocation(p, 1))]
p[0].extend(p[2])
def p_TypeSuffixEmpty(self, p):
"""
TypeSuffix :
"""
p[0] = []
def p_TypeSuffixStartingWithArray(self, p):
"""
TypeSuffixStartingWithArray : LBRACKET RBRACKET TypeSuffix
"""
p[0] = [(IDLMethod.TypeSuffixModifier.Brackets, self.getLocation(p, 1))]
p[0].extend(p[3])
def p_TypeSuffixStartingWithArrayEmpty(self, p):
"""
TypeSuffixStartingWithArray :
"""
p[0] = []
def p_Null(self, p):
"""
Null : QUESTIONMARK
|
"""
if len(p) > 1:
p[0] = True
else:
p[0] = False
def p_ReturnTypeType(self, p):
"""
ReturnType : Type
"""
p[0] = p[1]
def p_ReturnTypeVoid(self, p):
"""
ReturnType : VOID
"""
p[0] = BuiltinTypes[IDLBuiltinType.Types.void]
def p_ScopedName(self, p):
"""
ScopedName : AbsoluteScopedName
| RelativeScopedName
"""
p[0] = p[1]
def p_AbsoluteScopedName(self, p):
"""
AbsoluteScopedName : SCOPE IDENTIFIER ScopedNameParts
"""
assert False
pass
def p_RelativeScopedName(self, p):
"""
RelativeScopedName : IDENTIFIER ScopedNameParts
"""
assert not p[2] # Not implemented!
p[0] = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
def p_ScopedNameParts(self, p):
"""
ScopedNameParts : SCOPE IDENTIFIER ScopedNameParts
"""
assert False
pass
def p_ScopedNamePartsEmpty(self, p):
"""
ScopedNameParts :
"""
p[0] = None
def p_ExtendedAttributeNoArgs(self, p):
"""
ExtendedAttributeNoArgs : IDENTIFIER
"""
p[0] = (p[1],)
def p_ExtendedAttributeArgList(self, p):
"""
ExtendedAttributeArgList : IDENTIFIER LPAREN ArgumentList RPAREN
"""
p[0] = (p[1], p[3])
def p_ExtendedAttributeIdent(self, p):
"""
ExtendedAttributeIdent : IDENTIFIER EQUALS STRING
| IDENTIFIER EQUALS IDENTIFIER
"""
p[0] = (p[1], p[3])
def p_ExtendedAttributeNamedArgList(self, p):
"""
ExtendedAttributeNamedArgList : IDENTIFIER EQUALS IDENTIFIER LPAREN ArgumentList RPAREN
"""
p[0] = (p[1], p[3], p[5])
def p_error(self, p):
if not p:
raise WebIDLError("Syntax Error at end of file. Possibly due to missing semicolon(;), braces(}) or both",
[self._filename])
else:
raise WebIDLError("invalid syntax", [Location(self.lexer, p.lineno, p.lexpos, self._filename)])
def __init__(self, outputdir='', lexer=None):
Tokenizer.__init__(self, outputdir, lexer)
self.parser = yacc.yacc(module=self,
outputdir=outputdir,
tabmodule='webidlyacc',
errorlog=yacc.NullLogger(),
picklefile='WebIDLGrammar.pkl')
self._globalScope = IDLScope(BuiltinLocation("<Global Scope>"), None, None)
self._installBuiltins(self._globalScope)
self._productions = []
self._filename = "<builtin>"
self.lexer.input(Parser._builtins)
self._filename = None
self.parser.parse(lexer=self.lexer,tracking=True)
def _installBuiltins(self, scope):
assert isinstance(scope, IDLScope)
# xrange omits the last value.
for x in xrange(IDLBuiltinType.Types.ArrayBuffer, IDLBuiltinType.Types.Float64Array + 1):
builtin = BuiltinTypes[x]
name = builtin.name
typedef = IDLTypedefType(BuiltinLocation("<builtin type>"), builtin, name)
typedef.resolve(scope)
@ staticmethod
def handleModifiers(type, modifiers):
for (modifier, modifierLocation) in modifiers:
assert modifier == IDLMethod.TypeSuffixModifier.QMark or \
modifier == IDLMethod.TypeSuffixModifier.Brackets
if modifier == IDLMethod.TypeSuffixModifier.QMark:
type = IDLNullableType(modifierLocation, type)
elif modifier == IDLMethod.TypeSuffixModifier.Brackets:
type = IDLArrayType(modifierLocation, type)
return type
def parse(self, t, filename=None):
self.lexer.input(t)
#for tok in iter(self.lexer.token, None):
# print tok
self._filename = filename
self._productions.extend(self.parser.parse(lexer=self.lexer,tracking=True))
self._filename = None
def finish(self):
# First, finish all the IDLImplementsStatements. In particular, we
# have to make sure we do those before we do the IDLInterfaces.
# XXX khuey hates this bit and wants to nuke it from orbit.
implementsStatements = [ p for p in self._productions if
isinstance(p, IDLImplementsStatement)]
otherStatements = [ p for p in self._productions if
not isinstance(p, IDLImplementsStatement)]
for production in implementsStatements:
production.finish(self.globalScope())
for production in otherStatements:
production.finish(self.globalScope())
# Do any post-finish validation we need to do
for production in self._productions:
production.validate()
# De-duplicate self._productions, without modifying its order.
seen = set()
result = []
for p in self._productions:
if p not in seen:
seen.add(p)
result.append(p)
return result
def reset(self):
return Parser(lexer=self.lexer)
# Builtin IDL defined by WebIDL
_builtins = """
typedef unsigned long long DOMTimeStamp;
"""
def main():
# Parse arguments.
from optparse import OptionParser
usageString = "usage: %prog [options] files"
o = OptionParser(usage=usageString)
o.add_option("--cachedir", dest='cachedir', default=None,
help="Directory in which to cache lex/parse tables.")
o.add_option("--verbose-errors", action='store_true', default=False,
help="When an error happens, display the Python traceback.")
(options, args) = o.parse_args()
if len(args) < 1:
o.error(usageString)
fileList = args
baseDir = os.getcwd()
# Parse the WebIDL.
parser = Parser(options.cachedir)
try:
for filename in fileList:
fullPath = os.path.normpath(os.path.join(baseDir, filename))
f = open(fullPath, 'rb')
lines = f.readlines()
f.close()
print fullPath
parser.parse(''.join(lines), fullPath)
parser.finish()
except WebIDLError, e:
if options.verbose_errors:
traceback.print_exc()
else:
print e
if __name__ == '__main__':
main()
|
{
"content_hash": "832bc4229f5e20b64dc139d92b54848a",
"timestamp": "",
"source": "github",
"line_count": 5027,
"max_line_length": 139,
"avg_line_length": 36.481400437636765,
"alnum_prop": 0.5595827517012738,
"repo_name": "hyundukkim/WATT",
"id": "f9113004071b4fa2ce8f26bafa95a1b4b01c2826",
"size": "183695",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tools/WebIDLBinder/third_party/WebIDL.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "79596"
},
{
"name": "HTML",
"bytes": "91303"
},
{
"name": "JavaScript",
"bytes": "815569"
},
{
"name": "Makefile",
"bytes": "527"
},
{
"name": "Python",
"bytes": "20167"
},
{
"name": "Shell",
"bytes": "821"
}
],
"symlink_target": ""
}
|
"""Utilities of network.
Message format:
Compressed JSON string
{
"type": "text",
# Other information ...
}
"""
import json
from ..utils.error import GameError
__author__ = 'fyabc'
# Exceptions.
class NetworkError(GameError):
pass
class ServerFull(GameError):
def __init__(self, server):
super().__init__('Server(address={}) already full!'.format(server.server_address))
self.server = server
class UserAlreadyExists(GameError):
def __init__(self, server, user):
super().__init__('{} already exists in Server(address={})!'.format(user, server))
self.user = user
self.server = server
class UserNotExists(GameError):
def __init__(self, server, user):
super().__init__('{} does not exist in Server(address={})!'.format(user, server))
self.user = user
self.server = server
class MasterAlreadyExists(GameError):
def __init__(self, server, user):
super().__init__('Master {} already exists in Server(address={})!'.format(user, server))
self.user = user
self.server = server
# Message utilities.
def send_dict(fd, d):
fd.write(((json.dumps(d, separators=(',', ':')) + '\n').encode()))
def recv_dict(fd):
s = fd.readline().strip().decode()
if not s:
return None
return json.loads(s)
class MsgTypes:
Text = 'text'
OK = 'ok'
Error = 'error'
Default = Text
def send_msg(fd, msg_type, **kwargs):
kwargs['type'] = msg_type
send_dict(fd, kwargs)
def recv_msg(fd):
"""Receive message.
:param fd:
:return: Pair of (msg_type, msg_dict)
If no data received, return (None, None), it usually means connection closed.
:rtype: tuple
"""
d = recv_dict(fd)
if d is None:
return None, None
return d.get('type', MsgTypes.Default), d
class UserState:
Invalid = -1
WaitUserData = 0
CloseConnection = 10
__all__ = [
'NetworkError',
'ServerFull',
'UserAlreadyExists',
'UserNotExists',
'MasterAlreadyExists',
'MsgTypes',
'send_dict', 'recv_dict',
'send_msg', 'recv_msg',
'UserState',
]
|
{
"content_hash": "f1629649e870cffc8ff878f1d9dd9aa6",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 96,
"avg_line_length": 19.706422018348626,
"alnum_prop": 0.5972998137802608,
"repo_name": "fyabc/MiniGames",
"id": "86674b22b13dc713cc15a9d88d66f12f1568afca",
"size": "2192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HearthStone2/MyHearthStone/network/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "821180"
}
],
"symlink_target": ""
}
|
import json
import uuid
from typing import List
from dateutil.parser import parse
from cerebralcortex.kernel.DataStoreEngine.Metadata.Metadata import Metadata
from cerebralcortex.kernel.datatypes.datastream import DataStream, DataPoint
class StoreData:
def store_stream(self, datastream: DataStream, type):
"""
:param datastream:
:param type: support types are formatted json object or CC Datastream objects
"""
if (type == "json"):
datastream = self.json_to_datastream(datastream)
elif (type != "json" or type != "datastream"):
raise ValueError(type + " is not supported data type")
ownerID = datastream.owner
name = datastream.name
data_descriptor = datastream.data_descriptor
execution_context = datastream.execution_context
annotations = datastream.annotations
stream_type = datastream.datastream_type
data = datastream.data
if data:
if isinstance(data, list):
total_dp = len(data) - 1
if not datastream._start_time:
new_start_time = data[0].start_time
else:
new_start_time = datastream._start_time
if not datastream._end_time:
new_end_time = data[total_dp].start_time
else:
new_end_time = datastream._end_time
else:
if not datastream._start_time:
new_start_time = data.start_time
else:
new_start_time = datastream._start_time
if not datastream._end_time:
new_end_time = data.start_time
else:
new_end_time = datastream._end_time
stream_identifier = datastream.identifier
result = Metadata(self.CC_obj).is_id_created(stream_identifier)
Metadata(self.CC_obj).store_stream_info(stream_identifier, ownerID, name,
data_descriptor, execution_context,
annotations,
stream_type, new_start_time, new_end_time, result["status"])
dataframe = self.map_datapoint_to_dataframe(stream_identifier, data)
self.store_data(dataframe, self.datapointTable)
def store_data(self, dataframe_data: object, table_name: str):
"""
:param dataframe_data: pyspark Dataframe
:param table_name: Cassandra table name
"""
if table_name == "":
raise Exception("Table name cannot be null.")
elif dataframe_data == "":
raise Exception("Data cannot be null.")
dataframe_data.write.format("org.apache.spark.sql.cassandra") \
.mode('append') \
.options(table=table_name, keyspace=self.keyspaceName) \
.option("spark.cassandra.connection.host", self.hostIP) \
.option("spark.cassandra.auth.username", self.dbUser) \
.option("spark.cassandra.auth.password", self.dbPassword) \
.save()
def map_datapoint_to_dataframe(self, stream_id: uuid, datapoints: DataPoint) -> List:
"""
:param stream_id:
:param datapoints:
:return:
"""
temp = []
no_end_time = 0
for i in datapoints:
day = i.start_time
day = day.strftime("%Y%m%d")
if isinstance(i.sample, str):
sample = i.sample
else:
sample = json.dumps(i.sample)
if i.end_time:
dp = str(stream_id), day, i.start_time, i.end_time, sample
else:
dp = str(stream_id), day, i.start_time, sample
if no_end_time != 1:
no_end_time = 1
temp.append(dp)
temp_RDD = self.CC_obj.getOrCreateSC(type="sparkContext").parallelize(temp)
if (no_end_time == 1):
df = self.CC_obj.getOrCreateSC(type="sqlContext").createDataFrame(temp_RDD,
schema=["identifier", "day", "start_time",
"sample"]).coalesce(400)
else:
df = self.CC_obj.getOrCreateSC(type="sqlContext").createDataFrame(temp_RDD,
schema=["identifier", "day", "start_time",
"end_time",
"sample"]).coalesce(400)
return df
#################################################################
## json to CC objects and dataframe conversion
#################################################################
def json_to_datapoints(self, json_obj):
if isinstance(json_obj["value"], str):
sample = json_obj["value"]
else:
sample = json.dumps(json_obj["value"])
start_time = parse(json_obj["starttime"])
if "endtime" in json_obj:
return DataPoint(start_time=start_time, end_time=json_obj["endtime"], sample=sample)
else:
return DataPoint(start_time=start_time, sample=sample)
def json_to_datastream(self, json_obj):
data = json_obj["data"]
metadata = json_obj["metadata"]
identifier = metadata["identifier"]
owner = metadata["owner"]
name = metadata["name"]
data_descriptor = metadata["data_descriptor"]
execution_context = metadata["execution_context"]
annotations = metadata["annotations"]
stream_type = "ds" # TODO: it must be defined in json object
start_time = parse(data[0]["starttime"])
end_time = parse(data[len(data) - 1]["starttime"])
datapoints = list(map(self.json_to_datapoints, data))
return DataStream(identifier,
owner,
name,
data_descriptor,
execution_context,
annotations,
stream_type,
start_time,
end_time,
datapoints)
|
{
"content_hash": "de83d88783be278d4fb29c3f352772f7",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 120,
"avg_line_length": 41.37579617834395,
"alnum_prop": 0.500307881773399,
"repo_name": "aungkonazim/CerebralCortex",
"id": "1db4ef0c3f9f29bf36a271c864e986a253f72349",
"size": "7841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cerebralcortex/kernel/DataStoreEngine/Data/StoreData.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "74"
},
{
"name": "Python",
"bytes": "420723"
},
{
"name": "Shell",
"bytes": "493"
}
],
"symlink_target": ""
}
|
import os
import time
import unittest
import numpy as np
from collections import namedtuple, defaultdict
import cereal.messaging as messaging
from cereal import log
from system.hardware import TICI, HARDWARE
from selfdrive.manager.process_config import managed_processes
BMX = {
('bmx055', 'acceleration'),
('bmx055', 'gyroUncalibrated'),
('bmx055', 'magneticUncalibrated'),
('bmx055', 'temperature'),
}
LSM = {
('lsm6ds3', 'acceleration'),
('lsm6ds3', 'gyroUncalibrated'),
('lsm6ds3', 'temperature'),
}
LSM_C = {(x[0]+'trc', x[1]) for x in LSM}
MMC = {
('mmc5603nj', 'magneticUncalibrated'),
}
RPR = {
('rpr0521', 'light'),
}
SENSOR_CONFIGURATIONS = (
(BMX | LSM | RPR),
(MMC | LSM | RPR),
(BMX | LSM_C | RPR),
(MMC| LSM_C | RPR),
)
Sensor = log.SensorEventData.SensorSource
SensorConfig = namedtuple('SensorConfig', ['type', 'sanity_min', 'sanity_max'])
ALL_SENSORS = {
Sensor.rpr0521: {
SensorConfig("light", 0, 1023),
},
Sensor.lsm6ds3: {
SensorConfig("acceleration", 5, 15),
SensorConfig("gyroUncalibrated", 0, .2),
SensorConfig("temperature", 0, 60),
},
Sensor.lsm6ds3trc: {
SensorConfig("acceleration", 5, 15),
SensorConfig("gyroUncalibrated", 0, .2),
SensorConfig("temperature", 0, 60),
},
Sensor.bmx055: {
SensorConfig("acceleration", 5, 15),
SensorConfig("gyroUncalibrated", 0, .2),
SensorConfig("magneticUncalibrated", 0, 300),
SensorConfig("temperature", 0, 60),
},
Sensor.mmc5603nj: {
SensorConfig("magneticUncalibrated", 0, 300),
}
}
LSM_IRQ = 336
def get_irq_count(irq: int):
with open(f"/sys/kernel/irq/{irq}/per_cpu_count") as f:
per_cpu = map(int, f.read().split(","))
return sum(per_cpu)
def read_sensor_events(duration_sec):
sensor_types = ['accelerometer', 'gyroscope', 'magnetometer', 'accelerometer2',
'gyroscope2', 'lightSensor', 'temperatureSensor']
esocks = {}
events = defaultdict(list)
for stype in sensor_types:
esocks[stype] = messaging.sub_sock(stype, timeout=0.1)
start_time_sec = time.monotonic()
while time.monotonic() - start_time_sec < duration_sec:
for esock in esocks:
events[esock] += messaging.drain_sock(esocks[esock])
time.sleep(0.1)
assert sum(map(len, events.values())) != 0, "No sensor events collected!"
return events
class TestSensord(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not TICI:
raise unittest.SkipTest
# make sure gpiochip0 is readable
HARDWARE.initialize_hardware()
# enable LSM self test
os.environ["LSM_SELF_TEST"] = "1"
# read initial sensor values every test case can use
os.system("pkill -f ./_sensord")
try:
managed_processes["sensord"].start()
time.sleep(3)
cls.sample_secs = 10
cls.events = read_sensor_events(cls.sample_secs)
finally:
# teardown won't run if this doesn't succeed
managed_processes["sensord"].stop()
@classmethod
def tearDownClass(cls):
managed_processes["sensord"].stop()
if "LSM_SELF_TEST" in os.environ:
del os.environ['LSM_SELF_TEST']
def tearDown(self):
managed_processes["sensord"].stop()
def test_sensors_present(self):
# verify correct sensors configuration
seen = set()
for etype in self.events:
for measurement in self.events[etype]:
m = getattr(measurement, measurement.which())
seen.add((str(m.source), m.which()))
self.assertIn(seen, SENSOR_CONFIGURATIONS)
def test_lsm6ds3_timing(self):
# verify measurements are sampled and published at 104Hz
sensor_t = {
1: [], # accel
5: [], # gyro
}
for measurement in self.events['accelerometer']:
m = getattr(measurement, measurement.which())
sensor_t[m.sensor].append(m.timestamp)
for measurement in self.events['gyroscope']:
m = getattr(measurement, measurement.which())
sensor_t[m.sensor].append(m.timestamp)
for s, vals in sensor_t.items():
with self.subTest(sensor=s):
assert len(vals) > 0
tdiffs = np.diff(vals) / 1e6 # millis
high_delay_diffs = list(filter(lambda d: d >= 20., tdiffs))
assert len(high_delay_diffs) < 15, f"Too many large diffs: {high_delay_diffs}"
avg_diff = sum(tdiffs)/len(tdiffs)
avg_freq = 1. / (avg_diff * 1e-3)
assert 92. < avg_freq < 114., f"avg freq {avg_freq}Hz wrong, expected 104Hz"
stddev = np.std(tdiffs)
assert stddev < 2.0, f"Standard-dev to big {stddev}"
def test_events_check(self):
# verify if all sensors produce events
sensor_events = dict()
for etype in self.events:
for measurement in self.events[etype]:
m = getattr(measurement, measurement.which())
if m.type in sensor_events:
sensor_events[m.type] += 1
else:
sensor_events[m.type] = 1
for s in sensor_events:
err_msg = f"Sensor {s}: 200 < {sensor_events[s]}"
assert sensor_events[s] > 200, err_msg
def test_logmonottime_timestamp_diff(self):
# ensure diff between the message logMonotime and sample timestamp is small
tdiffs = list()
for etype in self.events:
for measurement in self.events[etype]:
m = getattr(measurement, measurement.which())
# check if gyro and accel timestamps are before logMonoTime
if str(m.source).startswith("lsm6ds3") and m.which() != 'temperature':
err_msg = f"Timestamp after logMonoTime: {m.timestamp} > {measurement.logMonoTime}"
assert m.timestamp < measurement.logMonoTime, err_msg
# negative values might occur, as non interrupt packages created
# before the sensor is read
tdiffs.append(abs(measurement.logMonoTime - m.timestamp) / 1e6)
high_delay_diffs = set(filter(lambda d: d >= 15., tdiffs))
assert len(high_delay_diffs) < 20, f"Too many measurements published : {high_delay_diffs}"
avg_diff = round(sum(tdiffs)/len(tdiffs), 4)
assert avg_diff < 4, f"Avg packet diff: {avg_diff:.1f}ms"
stddev = np.std(tdiffs)
assert stddev < 2, f"Timing diffs have too high stddev: {stddev}"
def test_sensor_values_sanity_check(self):
sensor_values = dict()
for etype in self.events:
for measurement in self.events[etype]:
m = getattr(measurement, measurement.which())
key = (m.source.raw, m.which())
values = getattr(m, m.which())
if hasattr(values, 'v'):
values = values.v
values = np.atleast_1d(values)
if key in sensor_values:
sensor_values[key].append(values)
else:
sensor_values[key] = [values]
# Sanity check sensor values and counts
for sensor, stype in sensor_values:
for s in ALL_SENSORS[sensor]:
if s.type != stype:
continue
key = (sensor, s.type)
val_cnt = len(sensor_values[key])
min_samples = self.sample_secs * 100 # Hz
err_msg = f"Sensor {sensor} {s.type} got {val_cnt} measurements, expected {min_samples}"
assert min_samples*0.9 < val_cnt < min_samples*1.1, err_msg
mean_norm = np.mean(np.linalg.norm(sensor_values[key], axis=1))
err_msg = f"Sensor '{sensor} {s.type}' failed sanity checks {mean_norm} is not between {s.sanity_min} and {s.sanity_max}"
assert s.sanity_min <= mean_norm <= s.sanity_max, err_msg
def test_sensor_verify_no_interrupts_after_stop(self):
managed_processes["sensord"].start()
time.sleep(3)
# read /proc/interrupts to verify interrupts are received
state_one = get_irq_count(LSM_IRQ)
time.sleep(1)
state_two = get_irq_count(LSM_IRQ)
error_msg = f"no interrupts received after sensord start!\n{state_one} {state_two}"
assert state_one != state_two, error_msg
managed_processes["sensord"].stop()
time.sleep(1)
# read /proc/interrupts to verify no more interrupts are received
state_one = get_irq_count(LSM_IRQ)
time.sleep(1)
state_two = get_irq_count(LSM_IRQ)
assert state_one == state_two, "Interrupts received after sensord stop!"
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "42fc9dee53e707d20207d235e8c57fb3",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 129,
"avg_line_length": 30.16296296296296,
"alnum_prop": 0.6423133595284872,
"repo_name": "commaai/openpilot",
"id": "c6fe33129ad3ff1275f78c7490c800cb561c31b0",
"size": "8167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "selfdrive/sensord/tests/test_sensord.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "604924"
},
{
"name": "C++",
"bytes": "1125553"
},
{
"name": "Cython",
"bytes": "50503"
},
{
"name": "Dockerfile",
"bytes": "1239"
},
{
"name": "Emacs Lisp",
"bytes": "124"
},
{
"name": "HTML",
"bytes": "11493"
},
{
"name": "Kaitai Struct",
"bytes": "8093"
},
{
"name": "MATLAB",
"bytes": "35190"
},
{
"name": "Makefile",
"bytes": "14018"
},
{
"name": "Python",
"bytes": "2386885"
},
{
"name": "QML",
"bytes": "1132"
},
{
"name": "Shell",
"bytes": "32876"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
from systemds.context import SystemDSContext
class TestSource_01(unittest.TestCase):
sds: SystemDSContext = None
@classmethod
def setUpClass(cls):
cls.sds = SystemDSContext()
@classmethod
def tearDownClass(cls):
cls.sds.close()
def test_01_single_call(self):
c = self.sds.source("./tests/source/source_01.dml",
"test").test_01()
res = c.compute()
self.assertEqual(1, self.imports(c.script_str))
self.assertTrue(np.allclose(np.array([[1]]), res))
def test_01_multi_call_01(self):
s = self.sds.source("./tests/source/source_01.dml",
"test")
a = s.test_01()
b = s.test_01()
c = a + b
res = c.compute()
self.assertEqual(1, self.imports(c.script_str))
self.assertTrue(np.allclose(np.array([[2]]), res))
def test_01_multi_call_02(self):
s = self.sds.source("./tests/source/source_01.dml",
"test")
a = s.test_01()
b = s.test_01()
c = a + b + a
res = c.compute()
self.assertEqual(1, self.imports(c.script_str))
self.assertTrue(np.allclose(np.array([[3]]), res))
def test_01_invalid_function(self):
s = self.sds.source("./tests/source/source_01.dml",
"test")
with self.assertRaises(AttributeError) as context:
a = s.test_01_NOT_A_REAL_FUNCTION()
def test_01_invalid_arguments(self):
s = self.sds.source("./tests/source/source_01.dml",
"test")
m = self.sds.full((1,1),2)
with self.assertRaises(TypeError) as context:
a = s.test_01(m)
def test_01_sum(self):
c = self.sds.source("./tests/source/source_01.dml",
"test").test_01().sum()
res = c.compute()
self.assertEqual(1, self.imports(c.script_str))
self.assertTrue(np.allclose(np.array([[1]]), res))
def imports(self, script:str) -> int:
return script.split("\n").count('source("./tests/source/source_01.dml") as test')
if __name__ == "__main__":
unittest.main(exit=False)
|
{
"content_hash": "bd7b638ed6205c5886e50a2e228bcca8",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 89,
"avg_line_length": 32.27142857142857,
"alnum_prop": 0.5427180168216025,
"repo_name": "apache/incubator-systemml",
"id": "00cb14b827695bfebfb497e26fd4d876aee06702",
"size": "3177",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/main/python/tests/source/test_source_01.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "31285"
},
{
"name": "Batchfile",
"bytes": "22265"
},
{
"name": "C",
"bytes": "8676"
},
{
"name": "C++",
"bytes": "30804"
},
{
"name": "CMake",
"bytes": "10312"
},
{
"name": "Cuda",
"bytes": "30575"
},
{
"name": "Java",
"bytes": "12990600"
},
{
"name": "Jupyter Notebook",
"bytes": "36387"
},
{
"name": "Makefile",
"bytes": "936"
},
{
"name": "Protocol Buffer",
"bytes": "66399"
},
{
"name": "Python",
"bytes": "195969"
},
{
"name": "R",
"bytes": "672462"
},
{
"name": "Scala",
"bytes": "185698"
},
{
"name": "Shell",
"bytes": "152940"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.